Skip to content

Commit 1935747

Browse files
aartbikzahiraam
authored andcommitted
[mlir][sparse][tensor] replace bufferization with empty tensor (llvm#66450)
Rationale: A bufferization.alloc_tensor can be directly replaced with tensor.empty since these are more or less semantically equivalent. The latter is considered a bit more "pure" with respect to SSA semantics.
1 parent 0a1886f commit 1935747

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+121
-121
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ module {
7171
%c2 = arith.constant 2.0 : f64
7272
%d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #SparseMatrix>
7373
%d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #SparseMatrix>
74-
%init = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DenseMatrix>
74+
%init = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DenseMatrix>
7575
%0 = linalg.generic #trait_assign
7676
ins(%arga: tensor<?x?xf64, #SparseMatrix>)
7777
outs(%init: tensor<?x?xf64, #DenseMatrix>) {

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ module {
4848
%argb: tensor<?xbf16, #SparseVector>) -> tensor<?xbf16, #DenseVector> {
4949
%c = arith.constant 0 : index
5050
%d = tensor.dim %arga, %c : tensor<?xbf16, #SparseVector>
51-
%xv = bufferization.alloc_tensor (%d) : tensor<?xbf16, #DenseVector>
51+
%xv = tensor.empty (%d) : tensor<?xbf16, #DenseVector>
5252
%0 = linalg.generic #trait_vec_op
5353
ins(%arga, %argb: tensor<?xbf16, #SparseVector>, tensor<?xbf16, #SparseVector>)
5454
outs(%xv: tensor<?xbf16, #DenseVector>) {

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ module {
4949
%argb: tensor<?xf16, #SparseVector>) -> tensor<?xf16, #DenseVector> {
5050
%c = arith.constant 0 : index
5151
%d = tensor.dim %arga, %c : tensor<?xf16, #SparseVector>
52-
%xv = bufferization.alloc_tensor (%d) : tensor<?xf16, #DenseVector>
52+
%xv = tensor.empty (%d) : tensor<?xf16, #DenseVector>
5353
%0 = linalg.generic #trait_vec_op
5454
ins(%arga, %argb: tensor<?xf16, #SparseVector>, tensor<?xf16, #SparseVector>)
5555
outs(%xv: tensor<?xf16, #DenseVector>) {

mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ module {
5151

5252
func.func @conv2d_all_sparse_DCSR(%input: tensor<8x8xi32, #DCSR>,
5353
%filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
54-
%s = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
54+
%s = tensor.empty() : tensor<6x6xi32, #DCSR>
5555
%0 = linalg.conv_2d
5656
ins (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32, #DCSR>)
5757
outs (%s: tensor<6x6xi32, #DCSR>) -> tensor<6x6xi32, #DCSR>
@@ -60,7 +60,7 @@ module {
6060

6161
func.func @conv2d_all_sparse_CSR(%input: tensor<8x8xi32, #CSR>,
6262
%filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
63-
%s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSR>
63+
%s = tensor.empty() : tensor<6x6xi32, #CSR>
6464
%0 = linalg.conv_2d
6565
ins (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32, #CSR>)
6666
outs (%s: tensor<6x6xi32, #CSR>) -> tensor<6x6xi32, #CSR>
@@ -69,7 +69,7 @@ module {
6969

7070
func.func @conv2d_all_sparse_CD(%input: tensor<8x8xi32, #CDR>,
7171
%filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
72-
%s = bufferization.alloc_tensor() : tensor<6x6xi32, #CDR>
72+
%s = tensor.empty() : tensor<6x6xi32, #CDR>
7373
%0 = linalg.conv_2d
7474
ins (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32, #CDR>)
7575
outs (%s: tensor<6x6xi32, #CDR>) -> tensor<6x6xi32, #CDR>
@@ -78,7 +78,7 @@ module {
7878

7979
func.func @conv2d_all_sparse_CSC(%input: tensor<8x8xi32, #CSC>,
8080
%filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
81-
%s = bufferization.alloc_tensor() : tensor<6x6xi32, #CSC>
81+
%s = tensor.empty() : tensor<6x6xi32, #CSC>
8282
%0 = linalg.conv_2d
8383
ins (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32, #CSC>)
8484
outs (%s: tensor<6x6xi32, #CSC>) -> tensor<6x6xi32, #CSC>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ module {
4646
-> tensor<?xf64, #SparseVector> {
4747
%c0 = arith.constant 0 : index
4848
%d = tensor.dim %arg0, %c0 : tensor<?xf64, #SparseVector>
49-
%xin = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
49+
%xin = tensor.empty(%d) : tensor<?xf64, #SparseVector>
5050
%0 = linalg.generic #trait_op
5151
ins(%arg0: tensor<?xf64, #SparseVector>)
5252
outs(%xin: tensor<?xf64, #SparseVector>) {
@@ -61,7 +61,7 @@ module {
6161
-> tensor<?xi32, #SparseVector> {
6262
%c0 = arith.constant 0 : index
6363
%d = tensor.dim %arg0, %c0 : tensor<?xi32, #SparseVector>
64-
%xin = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
64+
%xin = tensor.empty(%d) : tensor<?xi32, #SparseVector>
6565
%0 = linalg.generic #trait_op
6666
ins(%arg0: tensor<?xi32, #SparseVector>)
6767
outs(%xin: tensor<?xi32, #SparseVector>) {

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ module {
7373
%argb: tensor<?xi32, #SparseVector>) -> tensor<?xi32, #SparseVector> {
7474
%c = arith.constant 0 : index
7575
%d = tensor.dim %arga, %c : tensor<?xi32, #SparseVector>
76-
%xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
76+
%xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
7777
%0 = linalg.generic #trait_vec_op
7878
ins(%arga, %argb: tensor<?xi32, #SparseVector>, tensor<?xi32, #SparseVector>)
7979
outs(%xv: tensor<?xi32, #SparseVector>) {
@@ -97,7 +97,7 @@ module {
9797
%argb: tensor<?xf64>) -> tensor<?xf64, #SparseVector> {
9898
%c = arith.constant 0 : index
9999
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
100-
%xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
100+
%xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
101101
%0 = linalg.generic #trait_vec_op
102102
ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64>)
103103
outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -121,7 +121,7 @@ module {
121121
%argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
122122
%c = arith.constant 0 : index
123123
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
124-
%xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
124+
%xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
125125
%0 = linalg.generic #trait_vec_op
126126
ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
127127
outs(%xv: tensor<?xf64, #SparseVector>) {
@@ -139,7 +139,7 @@ module {
139139
func.func @vector_index(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> {
140140
%c = arith.constant 0 : index
141141
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
142-
%xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector>
142+
%xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
143143
%0 = linalg.generic #trait_vec_scale
144144
ins(%arga: tensor<?xf64, #SparseVector>)
145145
outs(%xv: tensor<?xi32, #SparseVector>) {
@@ -166,7 +166,7 @@ module {
166166
%c1 = arith.constant 1 : index
167167
%d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
168168
%d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
169-
%xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
169+
%xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
170170
%0 = linalg.generic #trait_mat_op
171171
ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
172172
outs(%xv: tensor<?x?xf64, #DCSR>) {
@@ -191,7 +191,7 @@ module {
191191
// Tensor addition (use semi-ring binary operation).
192192
func.func @add_tensor_1(%A: tensor<4x4xf64, #DCSR>,
193193
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
194-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
194+
%C = tensor.empty() : tensor<4x4xf64, #DCSR>
195195
%0 = linalg.generic #trait_mat_op
196196
ins(%A, %B: tensor<4x4xf64, #DCSR>,
197197
tensor<4x4xf64, #DCSR>)
@@ -213,7 +213,7 @@ module {
213213
// Same as @add_tensor_1, but use sparse_tensor.yield instead of identity to yield value.
214214
func.func @add_tensor_2(%A: tensor<4x4xf64, #DCSR>,
215215
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
216-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
216+
%C = tensor.empty() : tensor<4x4xf64, #DCSR>
217217
%0 = linalg.generic #trait_mat_op
218218
ins(%A, %B: tensor<4x4xf64, #DCSR>,
219219
tensor<4x4xf64, #DCSR>)
@@ -241,7 +241,7 @@ module {
241241
// Performs triangular add/sub operation (using semi-ring binary op).
242242
func.func @triangular(%A: tensor<4x4xf64, #DCSR>,
243243
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
244-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
244+
%C = tensor.empty() : tensor<4x4xf64, #DCSR>
245245
%0 = linalg.generic #trait_mat_op
246246
ins(%A, %B: tensor<4x4xf64, #DCSR>,
247247
tensor<4x4xf64, #DCSR>)
@@ -274,7 +274,7 @@ module {
274274
// Perform sub operation (using semi-ring binary op) with a constant threshold.
275275
func.func @sub_with_thres(%A: tensor<4x4xf64, #DCSR>,
276276
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
277-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
277+
%C = tensor.empty() : tensor<4x4xf64, #DCSR>
278278
// Defines out-block constant bounds.
279279
%thres_out_up = arith.constant 2.0 : f64
280280
%thres_out_lo = arith.constant -2.0 : f64
@@ -323,7 +323,7 @@ module {
323323
// Performs isEqual only on intersecting elements.
324324
func.func @intersect_equal(%A: tensor<4x4xf64, #DCSR>,
325325
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> {
326-
%C = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
326+
%C = tensor.empty() : tensor<4x4xi8, #DCSR>
327327
%0 = linalg.generic #trait_mat_op
328328
ins(%A, %B: tensor<4x4xf64, #DCSR>,
329329
tensor<4x4xf64, #DCSR>)
@@ -346,7 +346,7 @@ module {
346346
// Keeps values on left, negate value on right, ignore value when overlapping.
347347
func.func @only_left_right(%A: tensor<4x4xf64, #DCSR>,
348348
%B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
349-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
349+
%C = tensor.empty() : tensor<4x4xf64, #DCSR>
350350
%0 = linalg.generic #trait_mat_op
351351
ins(%A, %B: tensor<4x4xf64, #DCSR>,
352352
tensor<4x4xf64, #DCSR>)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ module {
6666

6767
func.func @cmp_lhs_sparse(%arga: tensor<4x4xf64, #DCSR>,
6868
%argb: tensor<4x4xf64>) -> tensor<4x4xi8, #DCSR> {
69-
%argx = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
69+
%argx = tensor.empty() : tensor<4x4xi8, #DCSR>
7070
%0 = linalg.generic #trait
7171
ins(%arga, %argb: tensor<4x4xf64, #DCSR>, tensor<4x4xf64>)
7272
outs(%argx: tensor<4x4xi8, #DCSR>) {
@@ -80,7 +80,7 @@ module {
8080

8181
func.func @cmp_all_sparse(%arga: tensor<4x4xf64, #DCSR>,
8282
%argb: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> {
83-
%argx = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR>
83+
%argx = tensor.empty() : tensor<4x4xi8, #DCSR>
8484
%0 = linalg.generic #trait
8585
ins(%arga, %argb: tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>)
8686
outs(%argx: tensor<4x4xi8, #DCSR>) {

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ module {
4343
%c1 = arith.constant 1 : index
4444
%c2 = arith.constant 2 : index
4545
%c3 = arith.constant 3 : index
46-
%t1 = bufferization.alloc_tensor() : tensor<4x5xf64, #DCSR>
47-
%t2 = bufferization.alloc_tensor(%c2, %c3) : tensor<?x?xf64, #DCSR>
46+
%t1 = tensor.empty() : tensor<4x5xf64, #DCSR>
47+
%t2 = tensor.empty(%c2, %c3) : tensor<?x?xf64, #DCSR>
4848

4949
%d1_0 = tensor.dim %t1, %c0 : tensor<4x5xf64, #DCSR>
5050
%d2_0 = tensor.dim %t2, %c0 : tensor<?x?xf64, #DCSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ module {
4848
-> tensor<?xcomplex<f32>, #SparseVector> {
4949
%c = arith.constant 0 : index
5050
%d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
51-
%xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f32>, #SparseVector>
51+
%xv = tensor.empty(%d) : tensor<?xcomplex<f32>, #SparseVector>
5252
%0 = linalg.generic #trait_op
5353
ins(%arga, %argb: tensor<?xcomplex<f32>, #SparseVector>,
5454
tensor<?xcomplex<f32>, #SparseVector>)
@@ -65,7 +65,7 @@ module {
6565
-> tensor<?xcomplex<f32>, #SparseVector> {
6666
%c = arith.constant 0 : index
6767
%d = tensor.dim %arga, %c : tensor<?xcomplex<f32>, #SparseVector>
68-
%xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f32>, #SparseVector>
68+
%xv = tensor.empty(%d) : tensor<?xcomplex<f32>, #SparseVector>
6969
%0 = linalg.generic #trait_op
7070
ins(%arga, %argb: tensor<?xcomplex<f32>, #SparseVector>,
7171
tensor<?xcomplex<f32>, #SparseVector>)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ module {
4545
-> tensor<?xcomplex<f64>, #SparseVector> {
4646
%c = arith.constant 0 : index
4747
%d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector>
48-
%xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
48+
%xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
4949
%0 = linalg.generic #trait_op
5050
ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
5151
tensor<?xcomplex<f64>, #SparseVector>)
@@ -62,7 +62,7 @@ module {
6262
-> tensor<?xcomplex<f64>, #SparseVector> {
6363
%c = arith.constant 0 : index
6464
%d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector>
65-
%xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
65+
%xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
6666
%0 = linalg.generic #trait_op
6767
ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
6868
tensor<?xcomplex<f64>, #SparseVector>)

0 commit comments

Comments
 (0)