Skip to content

[mlir][sparse] deallocate tmp coo buffer generated during stage-spars… #82017

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
//===- SparseTensorInterfaces.h - sparse tensor operations
//interfaces-------===//
//===- SparseTensorInterfaces.h - sparse tensor operations interfaces------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
Expand All @@ -20,7 +19,7 @@ class StageWithSortSparseOp;

namespace detail {
LogicalResult stageWithSortImpl(sparse_tensor::StageWithSortSparseOp op,
PatternRewriter &rewriter);
PatternRewriter &rewriter, Value &tmpBufs);
} // namespace detail
} // namespace sparse_tensor
} // namespace mlir
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ def StageWithSortSparseOpInterface : OpInterface<"StageWithSortSparseOp"> {
/*desc=*/"Stage the operation, return the final result value after staging.",
/*retTy=*/"::mlir::LogicalResult",
/*methodName=*/"stageWithSort",
/*args=*/(ins "::mlir::PatternRewriter &":$rewriter),
/*args=*/(ins "::mlir::PatternRewriter &":$rewriter,
"Value &":$tmpBuf),
/*methodBody=*/[{
return detail::stageWithSortImpl($_op, rewriter);
return detail::stageWithSortImpl($_op, rewriter, tmpBuf);
}]>,
];
}
Expand Down
23 changes: 17 additions & 6 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,14 @@ using namespace mlir::sparse_tensor;

#include "mlir/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp.inc"

LogicalResult
sparse_tensor::detail::stageWithSortImpl(StageWithSortSparseOp op,
PatternRewriter &rewriter) {
/// Stage the operations into a sequence of simple operations as follow:
/// op -> unsorted_coo +
/// unsorted_coo -> sorted_coo +
/// sorted_coo -> dstTp.
///
/// return `tmpBuf` if a intermediate memory is allocated.
LogicalResult sparse_tensor::detail::stageWithSortImpl(
StageWithSortSparseOp op, PatternRewriter &rewriter, Value &tmpBufs) {
if (!op.needsExtraSort())
return failure();

Expand All @@ -44,9 +49,15 @@ sparse_tensor::detail::stageWithSortImpl(StageWithSortSparseOp op,
rewriter.replaceOp(op, dstCOO);
} else {
// Need an extra conversion if the target type is not COO.
rewriter.replaceOpWithNewOp<ConvertOp>(op, finalTp, dstCOO);
auto c = rewriter.replaceOpWithNewOp<ConvertOp>(op, finalTp, dstCOO);
rewriter.setInsertionPointAfter(c);
// Informs the caller about the intermediate buffer we allocated. We can not
// create a bufferization::DeallocateTensorOp here because it would
// introduce cyclic dependency between the SparseTensorDialect and the
// BufferizationDialect. Besides, whether the buffer need to be deallocated
// by SparseTensorDialect or by BufferDeallocationPass is still TBD.
tmpBufs = dstCOO;
}
// TODO: deallocate extra COOs, we should probably delegate it to buffer
// deallocation pass.

return success();
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
Expand All @@ -21,8 +22,16 @@ struct StageUnorderedSparseOps : public OpRewritePattern<StageWithSortOp> {

LogicalResult matchAndRewrite(StageWithSortOp op,
PatternRewriter &rewriter) const override {
return llvm::cast<StageWithSortSparseOp>(op.getOperation())
.stageWithSort(rewriter);
Location loc = op.getLoc();
Value tmpBuf = nullptr;
auto itOp = llvm::cast<StageWithSortSparseOp>(op.getOperation());
LogicalResult stageResult = itOp.stageWithSort(rewriter, tmpBuf);
// Deallocate tmpBuf.
// TODO: Delegate to buffer deallocation pass in the future.
if (succeeded(stageResult) && tmpBuf)
rewriter.create<bufferization::DeallocTensorOp>(loc, tmpBuf);

return stageResult;
}
};
} // namespace
Expand Down
3 changes: 2 additions & 1 deletion mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,11 @@ func.func @sparse_constant_csc() -> tensor<8x7xf32, #CSC>{
// CHECK: scf.if
// CHECK: tensor.insert
// CHECK: sparse_tensor.load
// CHECK: sparse_tensor.reorder_coo
// CHECK: %[[TMP:.*]] = sparse_tensor.reorder_coo
// CHECK: sparse_tensor.foreach
// CHECK: tensor.insert
// CHECK: sparse_tensor.load
// CHECK: bufferization.dealloc_tensor %[[TMP]]
func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
%0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor>
return %0 : tensor<?x?x?xf64, #SparseTensor>
Expand Down