diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp index 4d3ead20fb5cd..9e3257a62b12f 100644 --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -51,12 +51,14 @@ class AffineApplyExpander loc(loc) {} template - Value buildBinaryExpr(AffineBinaryOpExpr expr) { + Value buildBinaryExpr(AffineBinaryOpExpr expr, + arith::IntegerOverflowFlags overflowFlags = + arith::IntegerOverflowFlags::none) { auto lhs = visit(expr.getLHS()); auto rhs = visit(expr.getRHS()); if (!lhs || !rhs) return nullptr; - auto op = builder.create(loc, lhs, rhs); + auto op = builder.create(loc, lhs, rhs, overflowFlags); return op.getResult(); } @@ -65,7 +67,8 @@ class AffineApplyExpander } Value visitMulExpr(AffineBinaryOpExpr expr) { - return buildBinaryExpr(expr); + return buildBinaryExpr(expr, + arith::IntegerOverflowFlags::nsw); } /// Euclidean modulo operation: negative RHS is not allowed. diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir index 58580a194df0c..f2e0306073f27 100644 --- a/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir +++ b/mlir/test/Conversion/AffineToStandard/lower-affine-to-vector.mlir @@ -26,7 +26,7 @@ func.func @affine_vector_store(%arg0 : index) { // CHECK: %[[buf:.*]] = memref.alloc // CHECK: %[[val:.*]] = arith.constant dense // CHECK: %[[c_1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[a:.*]] = arith.muli %arg0, %[[c_1]] : index +// CHECK-NEXT: %[[a:.*]] = arith.muli %arg0, %[[c_1]] overflow : index // CHECK-NEXT: %[[b:.*]] = arith.addi %{{.*}}, %[[a]] : index // CHECK-NEXT: %[[c7:.*]] = arith.constant 7 : index // CHECK-NEXT: %[[c:.*]] = arith.addi %[[b]], %[[c7]] : index diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir index 00d7b6b8d65f6..550ea71882e14 100644 --- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir +++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir @@ -156,7 +156,7 @@ func.func private @get_idx() -> (index) // CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] : index +// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] overflow : index // CHECK-NEXT: %[[c20:.*]] = arith.constant 20 : index // CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v1]], %[[c20]] : index // CHECK-NEXT: %[[v3:.*]] = arith.cmpi sge, %[[v2]], %[[c0]] : index @@ -177,7 +177,7 @@ func.func @if_only() { // CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] : index +// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] overflow : index // CHECK-NEXT: %[[c20:.*]] = arith.constant 20 : index // CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v1]], %[[c20]] : index // CHECK-NEXT: %[[v3:.*]] = arith.cmpi sge, %[[v2]], %[[c0]] : index @@ -202,7 +202,7 @@ func.func @if_else() { // CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] : index +// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] overflow : index // CHECK-NEXT: %[[c20:.*]] = arith.constant 20 : index // CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v1]], %[[c20]] : index // CHECK-NEXT: %[[v3:.*]] = arith.cmpi sge, %[[v2]], %[[c0]] : index @@ -272,7 +272,7 @@ func.func @if_with_yield() -> (i64) { // CHECK-NEXT: %[[v0:.*]] = call @get_idx() : () -> index // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] : index +// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] overflow : index // CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v1]], %{{.*}} : index // CHECK-NEXT: %[[c1:.*]] = arith.constant 1 : index // CHECK-NEXT: %[[v3:.*]] = arith.addi %[[v2]], %[[c1]] : index @@ -316,7 +316,7 @@ func.func @if_for() { %i = call @get_idx() : () -> (index) // CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] : index +// CHECK-NEXT: %[[v1:.*]] = arith.muli %[[v0]], %[[cm1]] overflow : index // CHECK-NEXT: %[[c20:.*]] = arith.constant 20 : index // CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v1]], %[[c20]] : index // CHECK-NEXT: %[[v3:.*]] = arith.cmpi sge, %[[v2]], %[[c0]] : index @@ -371,7 +371,7 @@ func.func @if_for() { // CHECK-NEXT: %[[c1:.*]] = arith.constant 1 : index // CHECK-NEXT: for %{{.*}} = %[[c0]] to %[[c42]] step %[[c1]] { // CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[mul0:.*]] = arith.muli %{{.*}}, %[[cm1]] : index +// CHECK-NEXT: %[[mul0:.*]] = arith.muli %{{.*}}, %[[cm1]] overflow : index // CHECK-NEXT: %[[add0:.*]] = arith.addi %[[mul0]], %{{.*}} : index // CHECK-NEXT: %[[max:.*]] = arith.maxsi %{{.*}}, %[[add0]] : index // CHECK-NEXT: %[[c10:.*]] = arith.constant 10 : index @@ -448,22 +448,22 @@ func.func @affine_applies(%arg0 : index) { %one = affine.apply #map3(%symbZero)[%zero] // CHECK-NEXT: %[[c2:.*]] = arith.constant 2 : index -// CHECK-NEXT: %[[v2:.*]] = arith.muli %arg0, %[[c2]] : index +// CHECK-NEXT: %[[v2:.*]] = arith.muli %arg0, %[[c2]] overflow : index // CHECK-NEXT: %[[v3:.*]] = arith.addi %arg0, %[[v2]] : index // CHECK-NEXT: %[[c3:.*]] = arith.constant 3 : index -// CHECK-NEXT: %[[v4:.*]] = arith.muli %arg0, %[[c3]] : index +// CHECK-NEXT: %[[v4:.*]] = arith.muli %arg0, %[[c3]] overflow : index // CHECK-NEXT: %[[v5:.*]] = arith.addi %[[v3]], %[[v4]] : index // CHECK-NEXT: %[[c4:.*]] = arith.constant 4 : index -// CHECK-NEXT: %[[v6:.*]] = arith.muli %arg0, %[[c4]] : index +// CHECK-NEXT: %[[v6:.*]] = arith.muli %arg0, %[[c4]] overflow : index // CHECK-NEXT: %[[v7:.*]] = arith.addi %[[v5]], %[[v6]] : index // CHECK-NEXT: %[[c5:.*]] = arith.constant 5 : index -// CHECK-NEXT: %[[v8:.*]] = arith.muli %arg0, %[[c5]] : index +// CHECK-NEXT: %[[v8:.*]] = arith.muli %arg0, %[[c5]] overflow : index // CHECK-NEXT: %[[v9:.*]] = arith.addi %[[v7]], %[[v8]] : index // CHECK-NEXT: %[[c6:.*]] = arith.constant 6 : index -// CHECK-NEXT: %[[v10:.*]] = arith.muli %arg0, %[[c6]] : index +// CHECK-NEXT: %[[v10:.*]] = arith.muli %arg0, %[[c6]] overflow : index // CHECK-NEXT: %[[v11:.*]] = arith.addi %[[v9]], %[[v10]] : index // CHECK-NEXT: %[[c7:.*]] = arith.constant 7 : index -// CHECK-NEXT: %[[v12:.*]] = arith.muli %arg0, %[[c7]] : index +// CHECK-NEXT: %[[v12:.*]] = arith.muli %arg0, %[[c7]] overflow : index // CHECK-NEXT: %[[v13:.*]] = arith.addi %[[v11]], %[[v12]] : index %four = affine.apply #map4(%arg0, %arg0, %arg0, %arg0)[%arg0, %arg0, %arg0] return @@ -610,7 +610,7 @@ func.func @affine_store(%arg0 : index) { affine.store %1, %0[%i0 - symbol(%arg0) + 7] : memref<10xf32> } // CHECK: %[[cm1:.*]] = arith.constant -1 : index -// CHECK-NEXT: %[[a:.*]] = arith.muli %{{.*}}, %[[cm1]] : index +// CHECK-NEXT: %[[a:.*]] = arith.muli %{{.*}}, %[[cm1]] overflow : index // CHECK-NEXT: %[[b:.*]] = arith.addi %{{.*}}, %[[a]] : index // CHECK-NEXT: %[[c7:.*]] = arith.constant 7 : index // CHECK-NEXT: %[[c:.*]] = arith.addi %[[b]], %[[c7]] : index diff --git a/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir index a78db9733b7ee..1fe4217cde982 100644 --- a/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir @@ -59,7 +59,7 @@ func.func @subview(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, %arg0 : in // CHECK: %[[BASE:.*]] = llvm.extractvalue %[[MEMREF]][0] : !llvm.struct<(ptr, ptr, i64 // CHECK: %[[BASE_ALIGNED:.*]] = llvm.extractvalue %[[MEMREF]][1] : !llvm.struct<(ptr, ptr, i64 // CHECK: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] overflow : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[DESCSTRIDE0]] : i64 to index // CHECK: %[[DESCSTRIDE0_V2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 @@ -95,10 +95,10 @@ func.func @subview_non_zero_addrspace(%0 : memref<64x4xf32, strided<[4, 1], offs // CHECK: %[[BASE:.*]] = llvm.extractvalue %[[MEMREF]][0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_ALIGNED:.*]] = llvm.extractvalue %[[MEMREF]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] overflow : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[DESCSTRIDE0]] : i64 to index // CHECK: %[[DESCSTRIDE0_V2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[OFF2]] : i64 to index // CHECK: %[[OFF2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> @@ -131,10 +131,10 @@ func.func @subview_const_size(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, // CHECK: %[[BASE:.*]] = llvm.extractvalue %[[MEMREF]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_ALIGNED:.*]] = llvm.extractvalue %[[MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[C4]] : i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[C4]] overflow : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[DESCSTRIDE0]] : i64 to index // CHECK: %[[DESCSTRIDE0_V2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[OFF2]] : i64 to index // CHECK: %[[OFF2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -168,8 +168,8 @@ func.func @subview_const_stride(%0 : memref<64x4xf32, strided<[4, 1], offset: 0> // CHECK: %[[BASE:.*]] = llvm.extractvalue %[[MEMREF]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_ALIGNED:.*]] = llvm.extractvalue %[[MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK: %[[OFF0:.*]] = llvm.mul %[[ARG0]], %[[C4]] : i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF0]], %[[ARG1]] : i64 + // CHECK: %[[OFF0:.*]] = llvm.mul %[[ARG0]], %[[C4]] overflow : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF0]], %[[ARG1]] : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[OFF2]] : i64 to index // CHECK: %[[OFF2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -234,12 +234,12 @@ func.func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, strided<[4, 1], of // CHECK: %[[BASE:.*]] = llvm.extractvalue %[[MEMREF]][0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[BASE_ALIGNED:.*]] = llvm.extractvalue %[[MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] overflow : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[DESCSTRIDE0]] : i64 to index // CHECK: %[[DESCSTRIDE0_V2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 - // CHECK: %[[OFF0:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : i64 + // CHECK: %[[OFF0:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] overflow : i64 // CHECK: %[[BASE_OFF:.*]] = llvm.mlir.constant(8 : index) : i64 - // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF0]], %[[BASE_OFF]] : i64 + // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF0]], %[[BASE_OFF]] : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[OFF2]] : i64 to index // CHECK: %[[OFF2:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -301,7 +301,7 @@ func.func @subview_leading_operands_dynamic(%0 : memref<5x?xf32>) -> memref<3x?x // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // Compute and insert offset from 2 + dynamic value. // CHECK: %[[CST_OFF0:.*]] = llvm.mlir.constant(2 : index) : i64 - // CHECK: %[[OFF0:.*]] = llvm.mul %[[STRIDE0]], %[[CST_OFF0]] : i64 + // CHECK: %[[OFF0:.*]] = llvm.mul %[[STRIDE0]], %[[CST_OFF0]] overflow : i64 // CHECK: %[[TMP:.*]] = builtin.unrealized_conversion_cast %[[OFF0]] : i64 to index // CHECK: %[[OFF0:.*]] = builtin.unrealized_conversion_cast %[[TMP]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -425,7 +425,7 @@ func.func @collapse_shape_dynamic_with_non_identity_layout( // CHECK: %[[SIZE1:.*]] = llvm.extractvalue %[[MEM]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: %[[SIZE2:.*]] = llvm.extractvalue %[[MEM]][3, 2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEM]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> -// CHECK: %[[FINAL_SIZE1:.*]] = llvm.mul %[[SIZE1]], %[[SIZE2]] : i64 +// CHECK: %[[FINAL_SIZE1:.*]] = llvm.mul %[[SIZE1]], %[[SIZE2]] overflow : i64 // CHECK: %[[SIZE1_TO_IDX:.*]] = builtin.unrealized_conversion_cast %[[FINAL_SIZE1]] : i64 to index // CHECK: %[[FINAL_SIZE1:.*]] = builtin.unrealized_conversion_cast %[[SIZE1_TO_IDX]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -547,7 +547,7 @@ func.func @collapse_shape_dynamic(%arg0 : memref<1x2x?xf32>) -> memref<1x?xf32> // CHECK: %[[SIZE2:.*]] = llvm.extractvalue %[[MEM]][3, 2] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEM]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64 -// CHECK: %[[FINAL_SIZE1:.*]] = llvm.mul %[[SIZE2]], %[[C2]] : i64 +// CHECK: %[[FINAL_SIZE1:.*]] = llvm.mul %[[SIZE2]], %[[C2]] overflow : i64 // CHECK: %[[SIZE1_TO_IDX:.*]] = builtin.unrealized_conversion_cast %[[FINAL_SIZE1]] : i64 to index // CHECK: %[[FINAL_SIZE1:.*]] = builtin.unrealized_conversion_cast %[[SIZE1_TO_IDX]] : index to i64 // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> diff --git a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir index f6d3387d99b3c..2785b50886122 100644 --- a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir +++ b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-target-tag.mlir @@ -28,7 +28,7 @@ func.func @subview(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, %arg0 : in // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64> // CHECK-DAG: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK-DAG: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK-DAG: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] overflow : i64 // CHECK-DAG: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 // CHECK-DAG: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> diff --git a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-top-level-named-sequence.mlir b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-top-level-named-sequence.mlir index a74553cc2268e..c1f30c7eaf643 100644 --- a/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-top-level-named-sequence.mlir +++ b/mlir/test/Dialect/LLVM/lower-to-llvm-e2e-with-top-level-named-sequence.mlir @@ -27,7 +27,7 @@ func.func @subview(%0 : memref<64x4xf32, strided<[4, 1], offset: 0>>, %arg0 : in // CHECK-SAME: -> !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64> // CHECK-DAG: %[[STRIDE0:.*]] = llvm.mlir.constant(4 : index) : i64 - // CHECK-DAG: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64 + // CHECK-DAG: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] overflow : i64 // CHECK-DAG: %[[OFF2:.*]] = llvm.add %[[DESCSTRIDE0]], %[[ARG1]] : i64 // CHECK-DAG: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>