Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -791,9 +791,8 @@ struct FoldFillWithPad final : public OpRewritePattern<tensor::PadOp> {
tensor::EmptyOp::create(rewriter, padOp.getLoc(), reifiedShape.front(),
padOp.getResultType().getElementType());
Value replacement =
rewriter
.create<FillOp>(fillOp.getLoc(), ValueRange{padValue},
ValueRange{emptyTensor})
FillOp::create(rewriter, fillOp.getLoc(), ValueRange{padValue},
ValueRange{emptyTensor})
.getResult(0);
if (replacement.getType() != padOp.getResultType()) {
replacement = tensor::CastOp::create(rewriter, fillOp.getLoc(),
Expand Down Expand Up @@ -2154,9 +2153,8 @@ struct SwapTransposeWithBroadcast : OpRewritePattern<linalg::TransposeOp> {

// Create broadcast(transpose(input)).
Value transposeResult =
rewriter
.create<TransposeOp>(loc, broadcastOp.getInput(), transposeInit,
resultPerms)
TransposeOp::create(rewriter, loc, broadcastOp.getInput(),
transposeInit, resultPerms)
->getResult(0);
rewriter.replaceOpWithNewOp<BroadcastOp>(
transposeOp, transposeResult, transposeOp.getInit(), resultDimensions);
Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4133,9 +4133,8 @@ DiagnosedSilenceableFailure doit(RewriterBase &rewriter, OpTy target,
Value extracted = tensor::ExtractSliceOp::create(
rewriter, target.getLoc(), target.getDest(), target.getMixedOffsets(),
target.getMixedSizes(), target.getMixedStrides());
Value copied = rewriter
.create<linalg::CopyOp>(target.getLoc(),
target.getSource(), extracted)
Value copied = linalg::CopyOp::create(rewriter, target.getLoc(),
target.getSource(), extracted)
.getResult(0);
// Reset the insertion point.
rewriter.setInsertionPoint(target);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1143,10 +1143,9 @@ pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp,

// Insert an unPackOp right after the packed generic.
Value unPackOpRes =
rewriter
.create<linalg::UnPackOp>(genericOp.getLoc(), newResult,
destPack.getSource(), innerDimsPos,
mixedTiles, outerDimsPerm)
linalg::UnPackOp::create(rewriter, genericOp.getLoc(), newResult,
destPack.getSource(), innerDimsPos, mixedTiles,
outerDimsPerm)
.getResult();

return std::make_tuple(newGenericOp, unPackOpRes);
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,8 @@ expandValue(RewriterBase &rewriter, Location loc, Value result, Value origDest,
assert(rankReductionStrategy ==
ControlDropUnitDims::RankReductionStrategy::ReassociativeReshape &&
"unknown rank reduction strategy");
return rewriter
.create<tensor::ExpandShapeOp>(loc, origResultType, result, reassociation)
return tensor::ExpandShapeOp::create(rewriter, loc, origResultType, result,
reassociation)
.getResult();
}

Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1572,12 +1572,12 @@ static Value getCollapsedOpOperand(Location loc, LinalgOp op,

// Insert a reshape to collapse the dimensions.
if (isa<MemRefType>(operand.getType())) {
return builder
.create<memref::CollapseShapeOp>(loc, operand, operandReassociation)
return memref::CollapseShapeOp::create(builder, loc, operand,
operandReassociation)
.getResult();
}
return builder
.create<tensor::CollapseShapeOp>(loc, operand, operandReassociation)
return tensor::CollapseShapeOp::create(builder, loc, operand,
operandReassociation)
.getResult();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,8 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
ArrayRef<ReassociationIndices> reassociation) const {
if (operand.getType() == newOperandType)
return operand;
return rewriter
.create<tensor::ExpandShapeOp>(loc, newOperandType, operand,
reassociation)
return tensor::ExpandShapeOp::create(rewriter, loc, newOperandType, operand,
reassociation)
.getResult();
}

Expand Down
11 changes: 5 additions & 6 deletions mlir/lib/Dialect/Linalg/Transforms/Padding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -333,17 +333,16 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
for (auto it :
llvm::zip(paddedSubtensorResults, opToPad.getDpsInitsMutable())) {
if (options.copyBackOp == LinalgPaddingOptions::CopyBackOp::LinalgCopy) {
replacements.push_back(rewriter
.create<linalg::CopyOp>(loc, std::get<0>(it),
std::get<1>(it).get())
replacements.push_back(linalg::CopyOp::create(rewriter, loc,
std::get<0>(it),
std::get<1>(it).get())
.getResult(0));
} else if (options.copyBackOp ==
LinalgPaddingOptions::CopyBackOp::
BufferizationMaterializeInDestination) {
replacements.push_back(
rewriter
.create<bufferization::MaterializeInDestinationOp>(
loc, std::get<0>(it), std::get<1>(it).get())
bufferization::MaterializeInDestinationOp::create(
rewriter, loc, std::get<0>(it), std::get<1>(it).get())
->getResult(0));
} else {
llvm_unreachable("unsupported copy back op");
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -947,9 +947,9 @@ DecomposePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
auto getIdxValue = [&](OpFoldResult ofr) {
if (auto val = llvm::dyn_cast_if_present<Value>(ofr))
return val;
return rewriter
.create<arith::ConstantIndexOp>(
padOp.getLoc(), cast<IntegerAttr>(cast<Attribute>(ofr)).getInt())
return arith::ConstantIndexOp::create(
rewriter, padOp.getLoc(),
cast<IntegerAttr>(cast<Attribute>(ofr)).getInt())
.getResult();
};

Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,8 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
input = tensor::EmptyOp::create(rewriter, loc, newFilterShape, elementTy)
.getResult();
} else {
input = rewriter
.create<memref::AllocOp>(
loc, MemRefType::get(newFilterShape, elementTy))
input = memref::AllocOp::create(rewriter, loc,
MemRefType::get(newFilterShape, elementTy))
.getResult();
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3714,8 +3714,8 @@ struct Conv1DGenerator
}
}

return rewriter
.create<vector::TransferWriteOp>(loc, res, resShaped, resPadding)
return vector::TransferWriteOp::create(rewriter, loc, res, resShaped,
resPadding)
.getOperation();
}

Expand Down
75 changes: 33 additions & 42 deletions mlir/lib/Dialect/Linalg/Transforms/WinogradConv2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -398,10 +398,9 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,

retRows = GMatrix.rows;
auto matmulType = RankedTensorType::get({retRows, filterW}, elementType);
auto empty =
builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
.getResult();
auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
elementType)
.getResult();
auto init =
linalg::FillOp::create(builder, loc, zero, empty).getResult(0);

Expand All @@ -422,10 +421,9 @@ Value filterTransform(RewriterBase &rewriter, Location loc, Value filter,

auto matmulType =
RankedTensorType::get({retRows, GTMatrix.cols}, elementType);
auto empty =
builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
.getResult();
auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
elementType)
.getResult();
auto init =
linalg::FillOp::create(builder, loc, zero, empty).getResult(0);

Expand Down Expand Up @@ -547,10 +545,9 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,

retRows = BTMatrix.rows;
auto matmulType = RankedTensorType::get({retRows, alphaW}, elementType);
auto empty =
builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
.getResult();
auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
elementType)
.getResult();
auto init =
linalg::FillOp::create(builder, loc, zero, empty).getResult(0);

Expand All @@ -572,10 +569,9 @@ Value inputTransform(RewriterBase &rewriter, Location loc, Value input,

retCols = BMatrix.cols;
auto matmulType = RankedTensorType::get({retRows, retCols}, elementType);
auto empty =
builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(), elementType)
.getResult();
auto empty = tensor::EmptyOp::create(builder, loc, matmulType.getShape(),
elementType)
.getResult();
auto init =
linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
Value B =
Expand Down Expand Up @@ -661,9 +657,8 @@ static Value matrixMultiply(RewriterBase &rewriter, Location loc,
{inputShape[0] * inputShape[1],
inputShape[2] * inputShape[3] * inputShape[4], filterShape[3]},
outputElementType);
Value empty = rewriter
.create<tensor::EmptyOp>(loc, matmulType.getShape(),
outputElementType)
Value empty = tensor::EmptyOp::create(rewriter, loc, matmulType.getShape(),
outputElementType)
.getResult();
Value zero = arith::ConstantOp::create(
rewriter, loc, rewriter.getZeroAttr(outputElementType));
Expand Down Expand Up @@ -782,9 +777,8 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
auto matmulType = RankedTensorType::get({retRows, valueW}, elementType);
Value init = outInitVal;
if (rightTransform || scalarFactor != 1) {
auto empty = builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(),
elementType)
auto empty = tensor::EmptyOp::create(builder, loc,
matmulType.getShape(), elementType)
.getResult();
init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
}
Expand All @@ -802,9 +796,8 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
RankedTensorType::get({retRows, AMatrix.cols}, elementType);
Value init = outInitVal;
if (scalarFactor != 1) {
auto empty = builder
.create<tensor::EmptyOp>(loc, matmulType.getShape(),
elementType)
auto empty = tensor::EmptyOp::create(builder, loc,
matmulType.getShape(), elementType)
.getResult();
init = linalg::FillOp::create(builder, loc, zero, empty).getResult(0);
}
Expand All @@ -827,23 +820,21 @@ Value outputTransform(RewriterBase &rewriter, Location loc, Value value,
AffineMap::get(2, 0, context), identityAffineMap, identityAffineMap};

matmulRetValue =
rewriter
.create<linalg::GenericOp>(
loc, matmulType,
ValueRange{scalarFactorValue, matmulRetValue},
ValueRange{outInitVal}, affineMaps,
llvm::ArrayRef<utils::IteratorType>{
utils::IteratorType::parallel,
utils::IteratorType::parallel},
[&](OpBuilder &nestedBuilder, Location nestedLoc,
ValueRange args) {
auto mulf = arith::MulFOp::create(nestedBuilder, nestedLoc,
args[0], args[1]);
auto addf = arith::AddFOp::create(
nestedBuilder, nestedLoc, mulf.getResult(), args[2]);
linalg::YieldOp::create(nestedBuilder, nestedLoc,
addf.getResult());
})
linalg::GenericOp::create(
rewriter, loc, matmulType,
ValueRange{scalarFactorValue, matmulRetValue},
ValueRange{outInitVal}, affineMaps,
llvm::ArrayRef<utils::IteratorType>{
utils::IteratorType::parallel, utils::IteratorType::parallel},
[&](OpBuilder &nestedBuilder, Location nestedLoc,
ValueRange args) {
auto mulf = arith::MulFOp::create(nestedBuilder, nestedLoc,
args[0], args[1]);
auto addf = arith::AddFOp::create(nestedBuilder, nestedLoc,
mulf.getResult(), args[2]);
linalg::YieldOp::create(nestedBuilder, nestedLoc,
addf.getResult());
})
.getResult(0);
}

Expand Down
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/Vector/IR/VectorOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -372,9 +372,8 @@ SmallVector<Value> vector::getAsValues(OpBuilder &builder, Location loc,
llvm::transform(foldResults, std::back_inserter(values),
[&](OpFoldResult foldResult) {
if (auto attr = dyn_cast<Attribute>(foldResult))
return builder
.create<arith::ConstantIndexOp>(
loc, cast<IntegerAttr>(attr).getInt())
return arith::ConstantIndexOp::create(
builder, loc, cast<IntegerAttr>(attr).getInt())
.getResult();

return cast<Value>(foldResult);
Expand Down
7 changes: 3 additions & 4 deletions mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,10 @@ struct Gather1DToConditionalLoads : OpRewritePattern<vector::GatherOp> {
scf::YieldOp::create(b, loc, result);
};

result =
rewriter
.create<scf::IfOp>(loc, condition, /*thenBuilder=*/loadBuilder,
result = scf::IfOp::create(rewriter, loc, condition,
/*thenBuilder=*/loadBuilder,
/*elseBuilder=*/passThruBuilder)
.getResult(0);
.getResult(0);
}

rewriter.replaceOp(op, result);
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,8 @@ struct TransferReadPermutationLowering

// Transpose result of transfer_read.
SmallVector<int64_t> transposePerm(permutation.begin(), permutation.end());
return rewriter
.create<vector::TransposeOp>(op.getLoc(), newRead, transposePerm)
return vector::TransposeOp::create(rewriter, op.getLoc(), newRead,
transposePerm)
.getResult();
}
};
Expand Down Expand Up @@ -371,8 +371,8 @@ struct TransferOpReduceRank
rewriter, op.getLoc(), newReadType, op.getBase(), op.getIndices(),
AffineMapAttr::get(newMap), op.getPadding(), op.getMask(),
newInBoundsAttr);
return rewriter
.create<vector::BroadcastOp>(op.getLoc(), originalVecType, newRead)
return vector::BroadcastOp::create(rewriter, op.getLoc(), originalVecType,
newRead)
.getVector();
}
};
Expand Down
39 changes: 18 additions & 21 deletions mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -451,10 +451,9 @@ struct WarpOpTransferWrite : public WarpDistributionPattern {
}
SmallVector<Value> delinearized;
if (map.getNumResults() > 1) {
delinearized = rewriter
.create<mlir::affine::AffineDelinearizeIndexOp>(
newWarpOp.getLoc(), newWarpOp.getLaneid(),
delinearizedIdSizes)
delinearized = mlir::affine::AffineDelinearizeIndexOp::create(
rewriter, newWarpOp.getLoc(), newWarpOp.getLaneid(),
delinearizedIdSizes)
.getResults();
} else {
// If there is only one map result, we can elide the delinearization
Expand Down Expand Up @@ -1538,19 +1537,18 @@ struct WarpOpInsertScalar : public WarpDistributionPattern {
arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::eq,
newWarpOp.getLaneid(), insertingLane);
Value newResult =
rewriter
.create<scf::IfOp>(
loc, isInsertingLane,
/*thenBuilder=*/
[&](OpBuilder &builder, Location loc) {
Value newInsert = vector::InsertOp::create(
builder, loc, newSource, distributedVec, newPos);
scf::YieldOp::create(builder, loc, newInsert);
},
/*elseBuilder=*/
[&](OpBuilder &builder, Location loc) {
scf::YieldOp::create(builder, loc, distributedVec);
})
scf::IfOp::create(
rewriter, loc, isInsertingLane,
/*thenBuilder=*/
[&](OpBuilder &builder, Location loc) {
Value newInsert = vector::InsertOp::create(
builder, loc, newSource, distributedVec, newPos);
scf::YieldOp::create(builder, loc, newInsert);
},
/*elseBuilder=*/
[&](OpBuilder &builder, Location loc) {
scf::YieldOp::create(builder, loc, distributedVec);
})
.getResult(0);
rewriter.replaceAllUsesWith(newWarpOp->getResult(operandNumber), newResult);
return success();
Expand Down Expand Up @@ -1661,10 +1659,9 @@ struct WarpOpInsert : public WarpDistributionPattern {
auto nonInsertingBuilder = [&](OpBuilder &builder, Location loc) {
scf::YieldOp::create(builder, loc, distributedDest);
};
newResult = rewriter
.create<scf::IfOp>(loc, isInsertingLane,
/*thenBuilder=*/insertingBuilder,
/*elseBuilder=*/nonInsertingBuilder)
newResult = scf::IfOp::create(rewriter, loc, isInsertingLane,
/*thenBuilder=*/insertingBuilder,
/*elseBuilder=*/nonInsertingBuilder)
.getResult(0);
}

Expand Down
Loading