diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index 85fd07e3883ea..8b79fbf726495 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -206,8 +206,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", // Doubly compressed sparse column storage with specific bitwidths. #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i, j) -> (j, i)>, + map = (d0, d1) -> (d1 : compressed, d0 : compressed), posWidth = 32, crdWidth = 8 }> diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp index 770349d6d1db0..fee32a5717f62 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -351,8 +351,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl, /// Get the total number of compound affine expressions in the /// `getMatchingIndexingMap` for the given tensor. For the following inputs: /// -/// map = (d0, d1, d2) => (d0 + d1, d2) -/// lvlTypes = ["compressed", "compressed"] +/// map = (d0, d1, d2) => (d0 + d1 : compressed, d2 : compressed) /// /// Returns 1 (because the first level is compressed and its corresponding /// indexing-expression is `d0 + d1`) diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir index 7c92193ab068d..ad3e657cd37e3 100644 --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -58,7 +58,7 @@ func.func @escape_attr_non_bufferizable(%m0: memref) { // ----- -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> { // expected-error @+1{{sparse tensor allocation should not escape function}} @@ -68,7 +68,7 @@ func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> { // ----- -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> func.func private @foo(tensor<20x40xf32, #DCSR>) -> () diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir index 43d86a9f158f0..c5061c40eb0b1 100644 --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -9,13 +9,13 @@ }> #Dense2D = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ], + map = (d0, d1) -> (d0 : dense, d1 : dense), crdWidth = 64, posWidth = 32 }> #Row = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], + map = (d0, d1) -> (d0 : compressed, d1 : dense), crdWidth = 64, posWidth = 32 }> @@ -35,7 +35,7 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], + map = (d0, d1) -> (d0 : compressed, d1 : compressed), crdWidth = 64, posWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir index 8d37a8d7b6625..485a5cbb178af 100644 --- a/mlir/test/Dialect/SparseTensor/dense.mlir +++ b/mlir/test/Dialect/SparseTensor/dense.mlir @@ -7,7 +7,7 @@ // latter class is linearized into one-dimensional buffers that are backed // by the runtime support library. -#DenseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }> +#DenseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : dense) }> #trait_2d = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir index 3091b0b8505d2..8e25cf06bcb62 100644 --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -371,7 +371,7 @@ func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor { // expected-error@+1 {{unexpected conversion mismatch in rank}} @@ -714,7 +714,7 @@ func.func @invalid_concat_size_mismatch(%arg0: tensor<2x4xf64, #DC>, // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched number of arguments in the block}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -725,7 +725,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Expecting Index type for argument at index 1}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -736,7 +736,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched element type between input tensor and block argument}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -747,7 +747,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched element type between input tensor and block argument}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -758,7 +758,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in number of init arguments and results}} sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 do { @@ -769,7 +769,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> ( // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in types of init arguments and results}} %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> i32 do { @@ -780,7 +780,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> ( // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in types of yield values and results}} %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> f32 do { diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir index 0ccce5121ce1a..fc9695f8c3c98 100644 --- a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir @@ -2,8 +2,7 @@ // RUN: mlir-opt %s -test-tensor-copy-insertion="bufferize-function-boundaries allow-return-allocs" | FileCheck %s --check-prefix=CHECK-FUNC #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (i,j)> + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func @bufferization_alloc_tensor diff --git a/mlir/test/Dialect/SparseTensor/one_trip.mlir b/mlir/test/Dialect/SparseTensor/one_trip.mlir index ad6816616c8bc..5a15be651c892 100644 --- a/mlir/test/Dialect/SparseTensor/one_trip.mlir +++ b/mlir/test/Dialect/SparseTensor/one_trip.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification -cse | FileCheck %s #Dense = #sparse_tensor.encoding<{ - lvlTypes = [ "dense" , "dense" ] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #trait_scale = { diff --git a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir index ab334496aaad5..93fc610b64b33 100644 --- a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir @@ -5,7 +5,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func.func @expand_dense( diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir index 0c5f32b0b5510..1245cb0eeed3c 100644 --- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir @@ -9,7 +9,7 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #Slice = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir index cb178e4257b1c..d3f07fd298d72 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -283,7 +283,7 @@ func.func @sparse_noe(%arg0: tensor<128xf64, #SparseVector>) -> index { // ----- -#DenseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","dense"]}> +#DenseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : dense)}> // CHECK-LABEL: func @sparse_load( // CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>) @@ -296,7 +296,7 @@ func.func @sparse_load(%arg0: tensor<16x32xf64, #DenseMatrix>) -> tensor<16x32xf // ----- -#DenseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","dense"]}> +#DenseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : dense)}> // CHECK-LABEL: func @sparse_load_ins( // CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>) @@ -364,7 +364,7 @@ func.func @sparse_push_back_n(%arg0: index, %arg1: memref, %arg2: f64, %a // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_expansion( // CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>) @@ -378,7 +378,7 @@ func.func @sparse_expansion(%tensor: tensor<8x8xf64, #SparseMatrix>) -> index { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_compression( // CHECK-SAME: %[[A0:.*0]]: memref, @@ -402,7 +402,7 @@ func.func @sparse_compression(%values: memref, // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_out( // CHECK-SAME: %[[A:.*]]: tensor>, @@ -416,7 +416,7 @@ func.func @sparse_out(%arg0: tensor, %arg1: !llvm.ptr +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_binary( // CHECK-SAME: %[[A:.*]]: f64, %[[B:.*]]: i64) -> f64 { @@ -450,7 +450,7 @@ func.func @sparse_binary(%arg0: f64, %arg1: i64) -> f64 { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_unary( // CHECK-SAME: %[[A:.*]]: f64) -> f64 { @@ -480,7 +480,7 @@ func.func @sparse_unary(%arg0: f64) -> f64 { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_unary( // CHECK-SAME: %[[A:.*]]: f64) -> i64 { @@ -507,7 +507,7 @@ func.func @sparse_unary(%arg0: f64) -> i64 { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_reduce_2d_to_1d( // CHECK-SAME: %[[A:.*]]: f64, %[[B:.*]]: f64) -> f64 { @@ -529,7 +529,7 @@ func.func @sparse_reduce_2d_to_1d(%arg0: f64, %arg1: f64) -> f64 { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_select( // CHECK-SAME: %[[A:.*]]: f64) -> f64 { @@ -553,7 +553,7 @@ func.func @sparse_select(%arg0: f64) -> f64 { // ----- -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @concat_sparse_sparse( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64 @@ -577,7 +577,7 @@ func.func @concat_sparse_sparse(%arg0: tensor<2x4xf64, #SparseMatrix>, // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_tensor_foreach( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64 @@ -592,7 +592,7 @@ func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // ----- -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // CHECK-LABEL: func @sparse_tensor_foreach( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{{{.*}}}>>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir index 57e7459b302a4..44c731e930274 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -1,10 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#Tdd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }> +#Tdd = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : dense) }> #Tds = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> -#Tsd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }> -#Tss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#Tsd = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : dense) }> +#Tss = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait2 = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir index 856c4d8a2e7d0..b3f6ae9f12ee4 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -3,7 +3,7 @@ #SpVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> #CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> -#Row = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }> +#Row = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : dense) }> #EncDenseVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }> #trait1 = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir index e79e1856df918..3af4614dfe93e 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --sparsification --canonicalize --cse | FileCheck %s -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir index 4aecea4e0c2b4..5f412e59dba9f 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir @@ -1,16 +1,10 @@ // RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> -#SparseMatrix_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> -}> +#SparseMatrix_P = #sparse_tensor.encoding<{map = (d0, d1) -> (d1 : compressed, d0 : compressed)}> -#SparseMatrix_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> -}> +#SparseMatrix_D_P = #sparse_tensor.encoding<{map = (d0, d1) -> (d1 : dense, d0 : dense)}> // CHECK-LABEL: func.func @concat_mix_dense( // CHECK-SAME: %[[TMP_arg0:.*]]: tensor<2x4xf64>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir index af6780396c386..6213128c9782c 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir @@ -1,12 +1,9 @@ // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \ // RUN: | FileCheck %s -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> -#DENSE = #sparse_tensor.encoding<{lvlTypes = ["dense", "dense"]}> -#DENSE_P = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"], - dimToLvl = affine_map<(i,j) -> (j,i)> -}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> +#DENSE = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : dense)}> +#DENSE_P = #sparse_tensor.encoding<{map = (d0, d1) -> (d1 : dense, d0 : dense)}> // CHECK-LABEL: @concat_sparse_sparse( // CHECK-SAME: %[[TMP_arg0:.*]]: tensor<2x4xf64, #sparse_tensor // CHECK-SAME: %[[TMP_arg1:.*]]: tensor<3x4xf64, #sparse_tensor diff --git a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir index 866b228917ad3..23360dac26a49 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir @@ -4,7 +4,7 @@ #map1 = affine_map<(d0, d1, d2, d3) -> (d2, d3)> #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func.func @conv2d_all_sparse_CSR( // CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir index 13b0cd43c775f..ee3613a268def 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir @@ -16,8 +16,7 @@ }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed), }> #SV = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir index 50a9ba64aac62..db7bef05e11bd 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --linalg-generalize-named-ops --pre-sparsification-rewrite --sparsification --sparse-tensor-conversion --canonicalize --cse | FileCheck %s -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func.func @fill_zero_after_alloc( // CHECK-SAME: %[[Arg0:.*]]: !llvm.ptr, diff --git a/mlir/test/Dialect/SparseTensor/sparse_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_index.mlir index 11ea4f1a470f9..bdd0225dc41c3 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_index.mlir @@ -1,11 +1,11 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #DenseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir index 8fd19c53249ec..7f14934a4ef20 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir @@ -4,7 +4,7 @@ #SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func.func @matmul1( // CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir index 04eccc7fb6d18..128c290e966b9 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir @@ -5,8 +5,7 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (i,j)> + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #SparseTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir index 4dabb92aa0f47..dab70e6a3e6f1 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir @@ -10,11 +10,11 @@ // RUN: FileCheck %s --check-prefix=CHECK-PAR4 #DenseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #CSR = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir index f0e16189a94d0..488be3dee5838 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir @@ -4,7 +4,7 @@ // RUN: --cse --canonicalize | FileCheck %s --check-prefix=CHECK-RWT #SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> -#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // // roundtrip: diff --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir index 5ad1dadcfb4f7..85ab2a654098a 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // A contrived example that demonstrates the many different ways // in which scalar values can be involved in a sparse kernel diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir index 873181e1774c4..610ff30a48c4a 100755 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --test-tensor-copy-insertion --pre-sparsification-rewrite --sparsification --cse | FileCheck %s -#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_matmul = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir index e1dfc17e49f11..e6bbc565b01e0 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --pre-sparsification-rewrite --sparsification --cse | FileCheck %s -#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_matmul = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir index f026f1384b9f8..10972fdb8b84c 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \ // RUN: --cse --canonicalize | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK: func.func @sparse_reshape( // CHECK-SAME: %[[S:.*]]: diff --git a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir index 71c4319aa797a..e43d77dfd8479 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #transpose_trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir index 13aaf37dcb55b..e5521228c433a 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir @@ -5,13 +5,11 @@ }> #MAT_C_C_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #MAT_C_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense) }> // diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir index 91489b99b42bd..0170efeb33f56 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparse-compiler="vl=8" | FileCheck %s #Dense = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #matvec = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir index 17ce38edd9086..0523ce8ed9efa 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir @@ -30,22 +30,19 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_C_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> #MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#MAT_C_D = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #MAT_D_D = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : dense, d0 : dense) }> #MAT_C_C_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #MAT_C_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense), }> #MAT_D_C_P = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir index e3ec5241c9733..ba92efc6257c3 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir @@ -30,22 +30,19 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_C_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> #MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#MAT_C_D = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #MAT_D_D = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : dense, d0 : dense) }> #MAT_C_C_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #MAT_C_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense) }> #MAT_D_C_P = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir index f9455dffa3ad1..e02bafe720fc7 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir @@ -27,22 +27,19 @@ // REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true // RUN: %{compile} | %{run} | FileCheck %s -#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_C_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> #MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#MAT_C_D = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #MAT_D_D = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : dense, d0 : dense) }> #MAT_C_C_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #MAT_C_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense) }> #MAT_D_C_P = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir index 9bb5f95e9c929..e0988f044454f 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir @@ -30,22 +30,19 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_C_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> #MAT_D_C = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#MAT_C_D = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #MAT_D_D = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : dense, d0 : dense) }> #MAT_C_C_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #MAT_C_D_P = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense) }> #MAT_D_C_P = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir index 140c237d3e79c..89bf215a2c778 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -34,8 +34,7 @@ !Filename = !llvm.ptr #DenseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ], - dimToLvl = affine_map<(i,j) -> (i,j)> + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #SparseMatrix = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir index 77f550d35237a..0488f5186a4a7 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir @@ -30,9 +30,9 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#CDR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #CSC = #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed) }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir index ed34d3f71832e..917f8a4838f4d 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}> -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // // Traits for tensor operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir index 82d0580e0ac06..f6c72581153bf 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir index 754a4ad0f2ba2..3203473f68b32 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir index 37cda138f6756..af2c296459d5e 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Row = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ] + map = (d0, d1) -> (d0 : compressed, d1 : dense) }> #CSR = #sparse_tensor.encoding<{ @@ -39,8 +39,7 @@ }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #SortedCOO = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir index ada14f4696a92..cbb4a81694602 100755 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir @@ -35,7 +35,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #Sparse3dTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir index 8d5829f2c5b58..37aae871a0a94 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Tensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir index e00d74d108c71..015b7153cdefe 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir @@ -30,9 +30,9 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> -#CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> +#CDR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : dense)}> #CSC = #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed) }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir index e6742d649ed00..256965c8c591c 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -31,12 +31,11 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir index e364125e3fc67..1567d1f51d8e3 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -31,14 +31,13 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], + map = (d0, d1) -> (d0 : compressed, d1 : compressed), posWidth = 8, crdWidth = 8 }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)>, + map = (d0, d1) -> (d1 : compressed, d0 : compressed), posWidth = 64, crdWidth = 64 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir index 9d47b3ab1a7ec..ae93f71e99a4a 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir @@ -35,7 +35,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #Sparse3dTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir index 883ddee85a3be..ddffd9498060f 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -30,7 +30,7 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // An example of a 2D convolution with a sparse filter. module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir index 1633194408ec0..8ade31d4eb048 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir @@ -36,7 +36,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_1d = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir index 5a030ac93110e..30a9c68ed2348 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir @@ -35,7 +35,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_1d = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir index a683c063e64a1..656fae6b7084d 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -28,7 +28,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Dense = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "dense"] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #SortedCOO = #sparse_tensor.encoding<{ @@ -40,11 +40,11 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #Row = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ] + map = (d0, d1) -> (d0 : compressed, d1 : dense) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir index 35fcaeec1c616..0ae03e5c4c2c6 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -45,8 +45,7 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (i,j)> + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir index 2de3ca82dcdee..ac2a9c58220c9 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -23,7 +23,7 @@ // TODO: support lib path. #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #DCSR_SLICE = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir index 201c5020f8703..af53f66a4c2ea 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -30,7 +30,7 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // // Traits for 2-d tensor (aka matrix) operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir index b42fb9fa22e10..6f70264135234 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_mult_elt = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir index 6058ad5dabe8a..7749f2845e115 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #SparseTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir index 91b6b1517d8db..b466cf242da52 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -31,8 +31,7 @@ !Filename = !llvm.ptr #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (i,j)> + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #eltwise_mult = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir index c5b53c5580ca3..36e0ba888bd81 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -30,7 +30,7 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // An example of a quantized sparse matmul. With the zero offset for the // sparse input, the sparse compiler generates very efficient code for the diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir index 64d27dfb91bf8..74803ec0e5584 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir @@ -35,7 +35,7 @@ }> #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #Sparse3dTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir index 491fadf4616c0..b1249c73806b1 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -35,7 +35,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], + map = (d0, d1) -> (d0 : compressed, d1 : compressed), posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir index 8a680c30097c9..15684be1bcc1c 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -30,7 +30,7 @@ // Do the same run, but now with direct IR generation and VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_sampled_dense_dense = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir index ec06f414ce011..7f72d0659eeb0 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed"] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #sel_trait = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir index fe8043bf10630..aeb6c8cabb7d0 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -35,7 +35,7 @@ // #Dense = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense" ] + map = (d0, d1) -> (d0 : dense, d1 : dense) }> #CSR = #sparse_tensor.encoding<{ @@ -43,7 +43,7 @@ }> #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #CSC = #sparse_tensor.encoding<{ @@ -51,17 +51,15 @@ }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #BlockRow = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ] + map = (d0, d1) -> (d0 : compressed, d1 : dense) }> #BlockCol = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : dense) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir index a00f4d0018a20..99b596f869ec0 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -37,7 +37,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir index 2c3b01f100159..cbedd2300b0ee 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir @@ -34,7 +34,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir index 10a05b6a7ba01..b3a6c8a229e00 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir @@ -37,7 +37,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir index cb797753cf972..dfb23d6afc64b 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir @@ -33,7 +33,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir index ce9bec9df3305..b750bdf5559e2 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir @@ -31,12 +31,11 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #DCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ] + map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> #DCSC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed, d0 : compressed) }> #transpose_trait = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir index cf9113f81ef53..2df3cfb703a2b 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}> -#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> // // Traits for tensor operations.