Skip to content

Commit 13802d4

Browse files
update function name
1 parent fcc828d commit 13802d4

File tree

2 files changed

+22
-6
lines changed

2 files changed

+22
-6
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -472,8 +472,11 @@ class SparseInsertGenerator
472472
llvm::raw_svector_ostream nameOstream(nameBuffer);
473473
nameOstream << kInsertFuncNamePrefix;
474474
const Level lvlRank = stt.getLvlRank();
475-
for (Level l = 0; l < lvlRank; l++)
476-
nameOstream << toMLIRString(stt.getLvlType(l)) << "_";
475+
for (Level l = 0; l < lvlRank; l++) {
476+
std::string lvlType = toMLIRString(stt.getLvlType(l));
477+
replaceWithUnderscore(lvlType);
478+
nameOstream << lvlType << "_";
479+
}
477480
// Static dim sizes are used in the generated code while dynamic sizes are
478481
// loaded from the dimSizes buffer. This is the reason for adding the shape
479482
// to the function name.
@@ -489,6 +492,19 @@ class SparseInsertGenerator
489492

490493
private:
491494
TensorType rtp;
495+
void replaceWithUnderscore(std::string &lvlType) {
496+
for (auto it = lvlType.begin(); it != lvlType.end();) {
497+
if (*it == '(') {
498+
*it = '_';
499+
} else if (*it == ')' || *it == ' ') {
500+
it = lvlType.erase(it);
501+
continue;
502+
} else if (*it == ',') {
503+
*it = '_';
504+
}
505+
it++;
506+
}
507+
}
492508
};
493509

494510
/// Generations insertion finalization code.

mlir/test/Dialect/SparseTensor/codegen.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
507507
return %1 : tensor<8x8xf64, #CSR>
508508
}
509509

510-
// CHECK-LABEL: func.func private @"_insert_dense_compressed(nonordered)_8_8_f64_0_0"(
510+
// CHECK-LABEL: func.func private @_insert_dense_compressed_nonordered_8_8_f64_0_0(
511511
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
512512
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
513513
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
533533
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
534534
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
535535
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
536-
// CHECK: %[[A21:.*]]:4 = func.call @"_insert_dense_compressed(nonordered)_8_8_f64_0_0"(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
536+
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_nonordered_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
537537
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
538538
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
539539
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
@@ -611,7 +611,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
611611
return %1 : tensor<128xf64, #SparseVector>
612612
}
613613

614-
// CHECK-LABEL: func.func private @"_insert_compressed(nonunique)_singleton_5_6_f64_0_0"(
614+
// CHECK-LABEL: func.func private @_insert_compressed_nonunique_singleton_5_6_f64_0_0(
615615
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
616616
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
617617
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
627627
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
628628
// CHECK-SAME: %[[A4:.*4]]: index,
629629
// CHECK-SAME: %[[A5:.*5]]: f64)
630-
// CHECK: %[[R:.*]]:4 = call @"_insert_compressed(nonunique)_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
630+
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nonunique_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
631631
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
632632
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
633633
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>

0 commit comments

Comments
 (0)