-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[clang][RISCV] Correct the SEW operand of indexed/fault only first segment intrinsics #111476
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
…gment intrinsics Indexed segment load/store intrinsics don't have SEW information encoded in the name, so we need to get the information from its pointer type argument at runtime.
@llvm/pr-subscribers-clang @llvm/pr-subscribers-backend-risc-v Author: Brandon Wu (4vtomat) ChangesIndexed segment load/store intrinsics don't have SEW information encoded Patch is 38.46 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/111476.diff 561 Files Affected:
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c
index 3c92b6865097df..e3a627bce6931d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg2ei16.c
@@ -11,7 +11,7 @@
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei16_v_bf16mf4x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
//
vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1,
@@ -22,7 +22,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei16_v_bf16mf2x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
//
vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1,
@@ -33,7 +33,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei16_v_bf16m1x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
//
vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2,
@@ -44,7 +44,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei16_v_bf16m2x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
//
vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2,
@@ -55,7 +55,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei16_v_bf16m4x2(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
//
vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2,
@@ -66,7 +66,7 @@ vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei16_v_bf16mf4x2_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
//
vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm,
@@ -78,7 +78,7 @@ vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei16_v_bf16mf2x2_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
//
vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm,
@@ -90,7 +90,7 @@ vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei16_v_bf16m1x2_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
//
vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
@@ -101,7 +101,7 @@ vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei16_v_bf16m2x2_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
//
vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
@@ -112,7 +112,7 @@ vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei16_v_bf16m4x2_m(
// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i16> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
//
vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c
index ff506a20cc2630..feb9dcd24a51a0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/vloxseg3ei16.c
@@ -11,7 +11,7 @@
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei16_v_bf16mf4x3(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
//
vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1,
@@ -22,7 +22,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei16_v_bf16mf2x3(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
//
vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1,
@@ -33,7 +33,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei16_v_bf16m1x3(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
//
vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2,
@@ -44,7 +44,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei16_v_bf16m2x3(
// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i16> [[RS2]], i64 [[VL]], i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
//
vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2,
@@ -55,7 +55,7 @@ vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei16_v_bf16mf4x3_m(
// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i16> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
//
vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm,
@@ -67,7 +67,7 @@ vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei16_v_bf16mf2x3_m(
// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i16> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
//
vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm,
@@ -79,7 +79,7 @@ vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei16_v_bf16m1x3_m(
// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i16> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i16> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
//
vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
@@ -90,7 +90,7 @@ vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei16_v_bf16m2x3_m(
// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i16> [[R...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
…irst segment intrinsics
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Make sense to me. It was wrong since when?
It's from the tuple interface change patches, #97992. Maybe the intrinsics currently don't have many users, so the bug was not found I guess? |
This patch seems to introduce a new compile-time warning
|
… only first segment intrinsics
…gment intrinsics (llvm#111476) Indexed segment load/store intrinsics don't have SEW information encoded in the name, so we need to get the information from its pointer type argument at runtime.
Im seeing failures during
|
This will be fixed by #111481 |
Indexed segment load/store intrinsics don't have SEW information encoded
in the name, so we need to get the information from its pointer type
argument at runtime.