-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir][sparse] minor cleanup of transform/utils #75396
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
+50
−35
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Consistent include macro naming Modified and added comments
@llvm/pr-subscribers-mlir Author: Aart Bik (aartbik) ChangesConsistent include macro naming Full diff: https://github.com/llvm/llvm-project/pull/75396.diff 6 Files Affected:
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
index cd626041834b12..728af841cc7b17 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
#include "CodegenUtils.h"
#include "LoopEmitter.h"
@@ -206,4 +206,4 @@ class CodegenEnv {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
index 57de437e2311c0..8dc57e1b5479bf 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
@@ -434,4 +434,4 @@ inline bool isZeroRankedTensorOrScalar(Type type) {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
index b6011727f4127c..588400f3dbaf09 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
@@ -1,4 +1,4 @@
-//===- LoopScheduler.cpp -------------------------------------------------===//
+//===- IterationGraphSorter.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -20,11 +20,10 @@ using namespace mlir::sparse_tensor;
namespace {
-/// A helper class that visits an affine expression and tries to find an
-/// AffineDimExpr to which the corresponding iterator from a GenericOp matches
-/// the desired iterator type.
-/// If there is no matched iterator type, returns the first DimExpr in the
-/// expression.
+/// A helper class that visits an affine expression and tries to find
+/// an AffineDimExpr to which the corresponding iterator from a GenericOp
+/// matches the desired iterator type. If there is no matched iterator
+/// type, the method returns the first DimExpr in the expression.
class AffineDimFinder : public AffineExprVisitor<AffineDimFinder> {
public:
explicit AffineDimFinder(ArrayRef<utils::IteratorType> itTypes)
@@ -81,11 +80,9 @@ inline static bool includesDenseOutput(SortMask mask) {
return includesAny(mask, SortMask::kIncludeDenseOutput);
}
-/// A helper to compute a topological sort. O(n^2) time complexity
-/// as we use adj matrix for the graph.
-/// The sorted result will put the first Reduction iterator to the
-/// latest possible position.
AffineMap IterationGraphSorter::topoSort() {
+ // The sorted result will put the first Reduction iterator to the
+ // latest possible position.
std::vector<unsigned> redIt; // reduce iterator with 0 degree
std::vector<unsigned> parIt; // parallel iterator with 0 degree
const unsigned numLoops = getNumLoops();
@@ -170,6 +167,7 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Reset the interation graph.
for (auto &row : itGraph)
std::fill(row.begin(), row.end(), false);
+
// Reset cached in-degree.
std::fill(inDegree.begin(), inDegree.end(), 0);
@@ -179,7 +177,6 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Skip dense inputs when not requested.
if ((!enc && !includesDenseInput(mask)) || in == ignored)
continue;
-
addConstraints(in, map);
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
index 52ee1170293009..be94bb5dffde63 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
@@ -1,10 +1,17 @@
-//===- LoopScheduler.h -----------------------------------------*- C++ -*-===//
+//===- IterationGraphSorter.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+//
+// This header file defines the iteration graph sorter (top-sort scheduling).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
#include "mlir/IR/AffineMap.h"
@@ -21,7 +28,7 @@ class GenericOp;
namespace sparse_tensor {
-/// Iteration graph sorting.
+/// Iteration graph sorting mask,
enum class SortMask : unsigned {
// The individual mask bits.
kIncludeDenseOutput = 0x1, // b001
@@ -34,40 +41,52 @@ enum class SortMask : unsigned {
class IterationGraphSorter {
public:
- // Constructs a scheduler from linalg.generic
- // Maybe reuses the class to schedule foreach as well (to address
- // non-permutation, e.g, traverse CSR in BSR order).
+ /// Factory method that construct an iteration graph sorter
+ /// for the given linalg.generic operation.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp);
- // Returns a permutation that represents the scheduled loop order.
- // Note that the returned AffineMap could be null if the kernel can not be
- // schedule due to cycles in the iteration graph.
+ /// Returns a permutation that represents the scheduled loop order.
+ /// Note that the returned AffineMap could be null if the kernel
+ /// cannot be scheduled due to cyclic iteration graph.
[[nodiscard]] AffineMap sort(SortMask mask, Value ignored = nullptr);
+
+ /// Returns the number of loops in the iteration graph.
unsigned getNumLoops() const { return loop2OutLvl.getNumDims(); }
private:
+ // Private constructor.
IterationGraphSorter(SmallVector<Value> &&ins,
SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl,
SmallVector<utils::IteratorType> &&iterTypes);
+ // Adds all the constraints in the given loop to level map.
void addConstraints(Value t, AffineMap loop2LvlMap);
+
+ /// A helper to compute a topological sort. The method has an
+ /// O(n^2) time complexity since we use an adjacency matrix
+ /// representation for the iteration graph.
AffineMap topoSort();
// Input tensors and associated loop to level maps.
SmallVector<Value> ins;
SmallVector<AffineMap> loop2InsLvl;
+
// Output tensor and associated loop to level map.
Value out;
AffineMap loop2OutLvl;
- // Loop type;
+
+ // Loop itation types;
SmallVector<utils::IteratorType> iterTypes;
// Adjacent matrix that represents the iteration graph.
std::vector<std::vector<bool>> itGraph;
+
// InDegree used for topo sort.
std::vector<unsigned> inDegree;
};
} // namespace sparse_tensor
} // namespace mlir
+
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
index fa8b0076f733b4..78bb53e4483f60 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
#include <vector>
@@ -22,7 +22,7 @@ namespace sparse_tensor {
// A compressed <tensor id, level> pair.
using TensorLevel = unsigned;
-//===----------------------------------------------------------------------===//
+//
// SparseTensorLoopEmiter class, manages sparse tensors and helps to
// generate loop structure to (co)-iterate sparse tensors.
//
@@ -48,8 +48,7 @@ using TensorLevel = unsigned;
// loopEmiter.exitCurrentLoop(); // exit k
// loopEmiter.exitCurrentLoop(); // exit j
// loopEmiter.exitCurrentLoop(); // exit i
-//===----------------------------------------------------------------------===//
-
+//
class LoopEmitter {
public:
/// Optional callback function to setup dense output tensors when
@@ -705,4 +704,4 @@ class LoopEmitter {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
index 5c7d8aa4c9d967..3a61ec7a2236f3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h"
@@ -262,4 +262,4 @@ getMutDescriptorFromTensorTuple(Value tensor, SmallVectorImpl<Value> &fields) {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSODESCRIPTOR_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSODESCRIPTOR_H_
|
@llvm/pr-subscribers-mlir-sparse Author: Aart Bik (aartbik) ChangesConsistent include macro naming Full diff: https://github.com/llvm/llvm-project/pull/75396.diff 6 Files Affected:
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
index cd626041834b12..728af841cc7b17 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
#include "CodegenUtils.h"
#include "LoopEmitter.h"
@@ -206,4 +206,4 @@ class CodegenEnv {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
index 57de437e2311c0..8dc57e1b5479bf 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
@@ -434,4 +434,4 @@ inline bool isZeroRankedTensorOrScalar(Type type) {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
index b6011727f4127c..588400f3dbaf09 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp
@@ -1,4 +1,4 @@
-//===- LoopScheduler.cpp -------------------------------------------------===//
+//===- IterationGraphSorter.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -20,11 +20,10 @@ using namespace mlir::sparse_tensor;
namespace {
-/// A helper class that visits an affine expression and tries to find an
-/// AffineDimExpr to which the corresponding iterator from a GenericOp matches
-/// the desired iterator type.
-/// If there is no matched iterator type, returns the first DimExpr in the
-/// expression.
+/// A helper class that visits an affine expression and tries to find
+/// an AffineDimExpr to which the corresponding iterator from a GenericOp
+/// matches the desired iterator type. If there is no matched iterator
+/// type, the method returns the first DimExpr in the expression.
class AffineDimFinder : public AffineExprVisitor<AffineDimFinder> {
public:
explicit AffineDimFinder(ArrayRef<utils::IteratorType> itTypes)
@@ -81,11 +80,9 @@ inline static bool includesDenseOutput(SortMask mask) {
return includesAny(mask, SortMask::kIncludeDenseOutput);
}
-/// A helper to compute a topological sort. O(n^2) time complexity
-/// as we use adj matrix for the graph.
-/// The sorted result will put the first Reduction iterator to the
-/// latest possible position.
AffineMap IterationGraphSorter::topoSort() {
+ // The sorted result will put the first Reduction iterator to the
+ // latest possible position.
std::vector<unsigned> redIt; // reduce iterator with 0 degree
std::vector<unsigned> parIt; // parallel iterator with 0 degree
const unsigned numLoops = getNumLoops();
@@ -170,6 +167,7 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Reset the interation graph.
for (auto &row : itGraph)
std::fill(row.begin(), row.end(), false);
+
// Reset cached in-degree.
std::fill(inDegree.begin(), inDegree.end(), 0);
@@ -179,7 +177,6 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Skip dense inputs when not requested.
if ((!enc && !includesDenseInput(mask)) || in == ignored)
continue;
-
addConstraints(in, map);
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
index 52ee1170293009..be94bb5dffde63 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h
@@ -1,10 +1,17 @@
-//===- LoopScheduler.h -----------------------------------------*- C++ -*-===//
+//===- IterationGraphSorter.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+//
+// This header file defines the iteration graph sorter (top-sort scheduling).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
#include "mlir/IR/AffineMap.h"
@@ -21,7 +28,7 @@ class GenericOp;
namespace sparse_tensor {
-/// Iteration graph sorting.
+/// Iteration graph sorting mask,
enum class SortMask : unsigned {
// The individual mask bits.
kIncludeDenseOutput = 0x1, // b001
@@ -34,40 +41,52 @@ enum class SortMask : unsigned {
class IterationGraphSorter {
public:
- // Constructs a scheduler from linalg.generic
- // Maybe reuses the class to schedule foreach as well (to address
- // non-permutation, e.g, traverse CSR in BSR order).
+ /// Factory method that construct an iteration graph sorter
+ /// for the given linalg.generic operation.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp);
- // Returns a permutation that represents the scheduled loop order.
- // Note that the returned AffineMap could be null if the kernel can not be
- // schedule due to cycles in the iteration graph.
+ /// Returns a permutation that represents the scheduled loop order.
+ /// Note that the returned AffineMap could be null if the kernel
+ /// cannot be scheduled due to cyclic iteration graph.
[[nodiscard]] AffineMap sort(SortMask mask, Value ignored = nullptr);
+
+ /// Returns the number of loops in the iteration graph.
unsigned getNumLoops() const { return loop2OutLvl.getNumDims(); }
private:
+ // Private constructor.
IterationGraphSorter(SmallVector<Value> &&ins,
SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl,
SmallVector<utils::IteratorType> &&iterTypes);
+ // Adds all the constraints in the given loop to level map.
void addConstraints(Value t, AffineMap loop2LvlMap);
+
+ /// A helper to compute a topological sort. The method has an
+ /// O(n^2) time complexity since we use an adjacency matrix
+ /// representation for the iteration graph.
AffineMap topoSort();
// Input tensors and associated loop to level maps.
SmallVector<Value> ins;
SmallVector<AffineMap> loop2InsLvl;
+
// Output tensor and associated loop to level map.
Value out;
AffineMap loop2OutLvl;
- // Loop type;
+
+ // Loop itation types;
SmallVector<utils::IteratorType> iterTypes;
// Adjacent matrix that represents the iteration graph.
std::vector<std::vector<bool>> itGraph;
+
// InDegree used for topo sort.
std::vector<unsigned> inDegree;
};
} // namespace sparse_tensor
} // namespace mlir
+
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
index fa8b0076f733b4..78bb53e4483f60 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
#include <vector>
@@ -22,7 +22,7 @@ namespace sparse_tensor {
// A compressed <tensor id, level> pair.
using TensorLevel = unsigned;
-//===----------------------------------------------------------------------===//
+//
// SparseTensorLoopEmiter class, manages sparse tensors and helps to
// generate loop structure to (co)-iterate sparse tensors.
//
@@ -48,8 +48,7 @@ using TensorLevel = unsigned;
// loopEmiter.exitCurrentLoop(); // exit k
// loopEmiter.exitCurrentLoop(); // exit j
// loopEmiter.exitCurrentLoop(); // exit i
-//===----------------------------------------------------------------------===//
-
+//
class LoopEmitter {
public:
/// Optional callback function to setup dense output tensors when
@@ -705,4 +704,4 @@ class LoopEmitter {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
index 5c7d8aa4c9d967..3a61ec7a2236f3 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
-#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
+#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_
+#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h"
@@ -262,4 +262,4 @@ getMutDescriptorFromTensorTuple(Value tensor, SmallVectorImpl<Value> &fields) {
} // namespace sparse_tensor
} // namespace mlir
-#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSODESCRIPTOR_H_
+#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSODESCRIPTOR_H_
|
yinying-lisa-li
approved these changes
Dec 13, 2023
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Consistent include macro naming
Modified and added comments