diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index 8b79fbf726495..e671c5e323a09 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -200,7 +200,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", // Sorted Coordinate Scheme. #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> ... tensor ... @@ -214,8 +214,12 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", // Block sparse row storage (2x3 blocks). #BCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "dense", "dense" ], - dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> + map = ( i, j ) -> + ( i floordiv 2 : compressed, + j floordiv 3 : compressed, + i mod 2 : dense, + j mod 3 : dense + ) }> ... tensor<20x30xf32, #BCSR> ... diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir index 4f25322df2c90..50ff81cb6ecd0 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification="enable-gpu-libgen" | FileCheck %s #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> module { diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir index c5061c40eb0b1..f1317f23d6568 100644 --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -27,7 +27,7 @@ }> #UCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed_no" ] + map = (d0, d1) -> (d0 : dense, d1 : compressed(nonordered)) }> #CSC = #sparse_tensor.encoding<{ @@ -41,21 +41,19 @@ }> #Dense3D = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "dense" ], - dimToLvl = affine_map<(i, j, k) -> (k, i, j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense) }> #Coo = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CooPNo = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton_no" ], - dimToLvl = affine_map<(i, j) -> (j, i)> + map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton(nonordered)) }> #ccoo = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed(nonunique), d2 : singleton) }> // CHECK-LABEL: func @sparse_nop( diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir index 479642e5db4ed..50ad4a0a1ea24 100644 --- a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s #CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}> +#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> // CHECK-LABEL: func.func @sparse_alloc_copy_CSR( // CHECK-SAME: %[[VAL_0:.*0]]: memref, diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir index f8e30872a0756..64f66c5cb65eb 100644 --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -25,8 +25,7 @@ }> #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "compressed", "compressed"], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func @sparse_nop( diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir index 4707b199222ad..9fb1946d56263 100644 --- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir @@ -15,8 +15,7 @@ }> #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "compressed", "compressed"], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func @sparse_convert_1d( diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir index 363a63eb8ed1e..621235182c9a8 100644 --- a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir @@ -12,8 +12,7 @@ }> #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = ["dense", "compressed", "compressed"], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed) }> // CHECK-LABEL: func @sparse_convert_1d( diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir index 296e1bf9030c6..b3eb50f1755da 100644 --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -26,17 +26,16 @@ }> #SortedCOO2D = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ], + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), }> #SortedCOO3D = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton) }> #TsssPermuted = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed) }> #COOSlice = #sparse_tensor.encoding<{ @@ -115,13 +114,13 @@ func.func @sparse_convert(%arg0: tensor) -> tensor (d0 : singleton), posWidth = 64, crdWidth = 64 }> #SparseSingleton32 = #sparse_tensor.encoding<{ - lvlTypes = ["singleton"], + map = (d0) -> (d0 : singleton), posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir index 8e25cf06bcb62..71e6eebb30261 100644 --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -32,7 +32,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord // ----- -#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}> func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) -> tensor<100x2xf64, #SparseVector> { @@ -68,7 +68,7 @@ func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: ten // ----- -#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth=32, crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) { // expected-error@+1 {{input/output trailing COO level-ranks don't match}} @@ -270,7 +270,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) // ----- -#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}> +#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index { // expected-error@+1 {{requested position memory size on a singleton level}} @@ -658,7 +658,7 @@ func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>, #C = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}> #DC = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}> -#DCC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed", "compressed"]}> +#DCC = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed)}> func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>, %arg1: tensor<3x4xf64, #DC>, %arg2: tensor<4x4x4xf64, #DCC>) -> tensor<9x4xf64, #DC> { diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir index 1245cb0eeed3c..5aa4acaf86393 100644 --- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir @@ -5,7 +5,7 @@ }> #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #DCSR = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir index 0312758722bea..913059e2197f1 100644 --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -10,7 +10,7 @@ }> #COO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> // CHECK-LABEL: func.func @sparse_new( diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir index d3f07fd298d72..d1262cb7aea02 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -77,7 +77,7 @@ func.func @sparse_convert_1d_to_sparse(%arg0: tensor<64xf32>) -> tensor<64xf32, // ----- -#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }> +#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }> // CHECK-LABEL: func @sparse_convert_3d_from_sparse( // CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>) @@ -103,7 +103,7 @@ func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref +#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> // CHECK-LABEL: func @sparse_indices_buffer( // CHECK-SAME: %[[A:.*]]: tensor) diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir index 1cc5c0e3f6152..60367b43a6ee0 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -85,8 +85,12 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>) // ----- #BCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "dense", "dense" ], - dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> + map = ( i, j ) -> + ( i floordiv 2 : compressed, + j floordiv 3 : compressed, + i mod 2 : dense, + j mod 3 : dense + ) }> // CHECK-LABEL: func private @sparse_bcsr( diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir index e4bc55fcdb062..95c410a08b0e4 100644 --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #trait_scale = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir index 44c731e930274..00bf4ea440628 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -1050,7 +1050,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T } #BatchedVector = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed_hi" ], + map = (d0, d1) -> (d0 : dense, d1 : compressed(high)) }> // CHECK-LABEL: func.func @sub_ss_batched( // CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir index 9019b9984d8f6..3513bc48ffc0e 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -3,14 +3,14 @@ #Td = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }> -#Tddd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }> -#Tdds = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }> -#Tdsd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }> -#Tdss = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }> -#Tsdd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }> -#Tsds = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }> -#Tssd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }> -#Tsss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> +#Tddd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : dense) }> +#Tdds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) }> +#Tdsd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) }> +#Tdss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed) }> +#Tsdd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : dense) }> +#Tsds = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) }> +#Tssd = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : dense) }> +#Tsss = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #trait3 = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir index 3af4614dfe93e..ae5b941259f65 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --sparsification --canonicalize --cse | FileCheck %s #DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> -#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> +#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #trait = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir index baa30c9457d2d..822cfb0148f24 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir @@ -141,7 +141,7 @@ func.func @foreach_print_slice(%A: tensor<4x4xf64, #CSR_SLICE>) { } #BCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ], + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) }> // CHECK-LABEL: func.func @foreach_bcoo( diff --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir index 742d42be3f8c5..d05cebd55ce31 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -5,8 +5,10 @@ // but an acyclic iteration graph using sparse constraints only. #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "dense", "compressed", - "compressed", "dense", "dense", "dense" ] + map = (d0, d1, d2, d3, + d4, d5, d6, d7) -> (d0 : dense, d1 : dense, d2 : dense, + d3 : compressed, d4 : compressed, d5 : dense, + d6 : dense, d7 : dense) }> #trait_mul = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir index 128c290e966b9..97d8da213423d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir @@ -9,7 +9,7 @@ }> #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #trait_scale_inpl = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir index 45906f4983567..8caf756362777 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse --canonicalize | FileCheck %s #COO = #sparse_tensor.encoding<{ - lvlTypes = ["compressed_nu", "singleton"], + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), crdWidth=32 }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir index 438f2c496d891..26712f2c7b001 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir @@ -2,8 +2,7 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #X = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "dense" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense) }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir index 2e3d723889cdd..fd8aaf28697e4 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir @@ -4,8 +4,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-MIR #X = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "dense" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : dense, d1 : dense) }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir index 43cf36fc0ac72..395a915f32806 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir @@ -4,8 +4,8 @@ // // RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s -#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }> // CHECK-LABEL: func.func @sparse_reshape_fused( diff --git a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir index 2cef1afad2b89..7112d7dc8ca90 100644 --- a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir +++ b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir @@ -15,8 +15,8 @@ } #VEC = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 32, crdWidth = 32 }> -#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }> -#CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }> +#COO = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }> +#CCC = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed), posWidth = 32, crdWidth = 32 }> // // This kernel can be sparsified as all unsparsifiable operations' diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir index a530f61959c20..27c2793a7688d 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir @@ -33,8 +33,8 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} -#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton), posWidth = 32, crdWidth = 32 }> module { func.func private @printMemref3dF32(%ptr : tensor) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir index af2c296459d5e..801c041eef1e3 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -43,17 +43,15 @@ }> #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #SortedCOOPerm = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton) }> #CCCPerm = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed"], - dimToLvl = affine_map<(d0, d1, d2) -> (d1, d2, d0)> + map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir index cbb4a81694602..bb0b69286d2b6 100755 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir @@ -39,11 +39,11 @@ }> #Sparse3dTensor = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed", "compressed"] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #Sparse4dTensor = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed", "compressed", "compressed"] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir index a2eafb8cc134c..8ed81cef82802 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir @@ -31,10 +31,11 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #CCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ] }> + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #CDC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) + // FIXME: Still inadmissible might need investigation // dimToLvl = affine_map<(i,j,k) -> (j,k,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir index 0d2e2582bd371..18a32ab8b503a 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir @@ -35,12 +35,12 @@ #CDCD = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed", "dense" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : dense, d2 : compressed, d3 : dense) }> #CCCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed) }> // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir index c5ce85593d845..3f9db4e144e14 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir @@ -31,15 +31,15 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #CCCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed) }> #CDCD = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed", "dense" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : dense, d2 : compressed, d3 : dense) }> #DCCD = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "compressed", "dense" ] + map = (d0, d1, d2, d3) -> (d0 : dense, d1 : compressed, d2 : compressed, d3 : dense) }> // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir index 3695bf3ffa3d6..cde0d4a750cdc 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir @@ -31,15 +31,15 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #CCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #CDC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) }> #DDC = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "compressed" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed) }> // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir index 97b73175dc5f5..aa153f98339ba 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir @@ -31,11 +31,11 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #CCCCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed", "compressed", "compressed" ] + map = (d0, d1, d2, d3, d4) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed, d4 : compressed) }> #CDCDC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed", "dense", "compressed"] + map = (d0, d1, d2, d3, d4) -> (d0 : compressed, d1 : dense, d2 : compressed, d3 : dense, d4 : compressed) }> // Creates and returns 5-D buffer of size (%s1, %s2, %s3, %s4, %s5) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir index 783117954bc68..fe237588fff22 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -31,18 +31,15 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Tensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (i,j,k)> + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #Tensor2 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (j,k,i)> + map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed) }> #Tensor3 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir index 2ba3e57940bef..16eaca7663aaf 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir @@ -29,16 +29,15 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Tensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton) }> #Tensor2 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "dense" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) }> #Tensor3 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (i,k,j)> + map = (d0, d1, d2) -> (d0 : dense, d2 : dense, d1 : compressed) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir index 0e5de2e5ec9e8..04299101e40e4 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -31,33 +31,27 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Tensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (i,j,k)> + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #Tensor2 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (j,k,i)> + map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed), }> #Tensor3 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed), }> #Tensor4 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (i,j,k)> + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : compressed) }> #Tensor5 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (j,k,i)> + map = (d0, d1, d2) -> (d1 : dense, d2 : compressed, d0 : compressed) }> #Tensor6 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir index 0f953a3c4e3c0..1c74b6827d980 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir @@ -32,32 +32,37 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #Tensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "compressed" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) + }> // NOTE: dense after compressed is not currently supported for the target // of direct-sparse2sparse conversion. (It's fine for the source though.) #Tensor2 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "dense" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) + }> #Tensor3 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "compressed" ], - dimToLvl = affine_map<(i,j,k) -> (i,k,j)> + map = (d0, d1, d2) -> (d0 : dense, d2 : dense, d1 : compressed) + }> #SingletonTensor1 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed", "singleton" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : singleton) + }> // This also checks the compressed->dense conversion (when there are zeros). #SingletonTensor2 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "singleton" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : singleton) + }> // This also checks the singleton->compressed conversion. #SingletonTensor3 = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "dense", "compressed" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) + }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir index fdbe84b9d332c..a1d8ab53fcba1 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CSR = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir index ae93f71e99a4a..c642f537411f4 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir @@ -39,11 +39,11 @@ }> #Sparse3dTensor = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed", "compressed"] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #Sparse4dTensor = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed", "compressed", "compressed"] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed) }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir index 56a26ec08b610..60f2e22ab4a82 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -34,12 +34,14 @@ !Filename = !llvm.ptr #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed", "compressed", - "compressed", "compressed", "compressed", "compressed" ], // Note that any dimToLvl permutation should give the same results // since, even though it impacts the sparse storage scheme layout, // it should not change the semantics. - dimToLvl = affine_map<(i,j,k,l,m,n,o,p) -> (p,o,j,k,i,l,m,n)> + map = (d0, d1, d2, d3, + d4, d5, d6, d7) -> (d7 : compressed, d6 : compressed, + d1 : compressed, d2 : compressed, + d0 : compressed, d3 : compressed, + d4 : compressed, d5 : compressed) }> #trait_flatten = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir index cc472fc2ed848..d10ae8aee8141 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -38,7 +38,7 @@ }> #COO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #COO_SLICE = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir index 656fae6b7084d..a96e1a93cc56b 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -32,7 +32,7 @@ }> #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CSR = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir index 6c39796ac6648..d71ebf98cc943 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir @@ -28,19 +28,19 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #TensorCSR = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : dense, d2 : compressed) }> #TensorRow = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "dense" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : dense) }> #CCoo = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed(nonunique), d2 : singleton) }> #DCoo = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique), d2 : singleton) }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir index ac2a9c58220c9..6be70c3e41856 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -41,7 +41,7 @@ }> #COO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CSR_SLICE_1 = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir index e91fa97586b8c..306b88149e736 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -35,7 +35,7 @@ !Filename = !llvm.ptr #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #mttkrp = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir index 7749f2845e115..f58094c8303e8 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -35,7 +35,7 @@ }> #SparseTensor = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed" ] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> #redsum = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir index 171c4afb3ea70..c464d01bf2ab3 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -27,11 +27,11 @@ // TODO: support sparse_tensor.unpack on libgen path. #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #SortedCOOI32 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ], + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }> @@ -43,7 +43,7 @@ }> #BCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir index 9a48e7d466f92..b24dd8304b883 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir @@ -27,11 +27,11 @@ // after sparse_tensor.unpack is supported on libgen path. #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #SortedCOOI32 = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ], + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), posWidth = 32, crdWidth = 32 }> @@ -44,7 +44,7 @@ // TODO: "compressed_hi" is not supported by libgen path. // #BCOO = #sparse_tensor.encoding<{ -// lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ] +// map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 : singleton) //}> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir index fb1f766495529..adea21aa8bdd0 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir @@ -24,7 +24,7 @@ // REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true // RUN: %{compile} | %{run} | FileCheck %s -#CCCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }> +#CCCC = #sparse_tensor.encoding<{ map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed), posWidth = 32, crdWidth = 32 }> func.func @pooling_nhwc_sum_CCCC(%input: tensor<1x4x4x1xf32, #CCCC>, %filter: tensor<2x2xf32>) -> tensor<1x3x3x1xf32, #CCCC> { %init = bufferization.alloc_tensor() : tensor<1x3x3x1xf32, #CCCC> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir index 74803ec0e5584..cf6cee2f3b733 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir @@ -39,7 +39,7 @@ }> #Sparse3dTensor = #sparse_tensor.encoding<{ - lvlTypes = ["compressed", "compressed", "compressed"] + map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir index f451ae43cdef8..615c1d7bc4798 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir @@ -31,7 +31,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> module @func_sparse.2 { // Do elementwise x+1 when true, x-1 when false diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir index 177272e45a10c..b789450b4f88b 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -35,21 +35,20 @@ !Filename = !llvm.ptr #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #SortedCOOPermuted = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ], - dimToLvl = affine_map<(i,j) -> (j,i)> + map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton), }> #SortedCOO3D = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ] + map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton) }> #SortedCOO3DPermuted = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], - dimToLvl = affine_map<(i,j,k) -> (k,i,j)> + map = (d0, d1, d2) -> (d2 : compressed(nonunique), d0 : singleton(nonunique), d1 : singleton) + }> #trait_scale = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir index 748b740d99abd..4c2af8cfa4327 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_strided_conv_2d_nhwc_hwcf.mlir @@ -31,11 +31,11 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #CCCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : compressed, d2 : compressed, d3 : compressed) }> #CDCC = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed", "dense", "compressed", "compressed" ] + map = (d0, d1, d2, d3) -> (d0 : compressed, d1 : dense, d2 : compressed, d3 : compressed) }> // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir index 9da1405163cb0..b37dc9e3d6316 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir @@ -30,7 +30,7 @@ // Do the same run, but now with VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#ST = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> +#ST = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)}> // // Trait for 3-d tensor element wise multiplication. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir index b0a3691adae34..4d4ae1f19f963 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -30,8 +30,8 @@ // Do the same run, but now with VLA vectorization. // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} -#ST1 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> -#ST2 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "dense"]}> +#ST1 = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)}> +#ST2 = #sparse_tensor.encoding<{map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : dense)}> // // Trait for 3-d tensor operation. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir index 5cb3e5e6cd45d..e2e474a5492ed 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir @@ -32,7 +32,7 @@ // RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir index bb7efc8c3c2ae..6782f2d0e2014 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir @@ -25,7 +25,7 @@ // RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run} #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CSR = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir index 409eb96972971..eadc408fcc441 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir @@ -25,7 +25,7 @@ // #SortedCOO = #sparse_tensor.encoding<{ - lvlTypes = [ "compressed_nu", "singleton" ] + map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }> #CSR = #sparse_tensor.encoding<{