diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index d7518943229ea..3a5ea64476b1f 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -1273,7 +1273,7 @@ def Vector_TransferReadOp : AttrSizedOperandSegments, DestinationStyleOpInterface ]>, - Arguments<(ins AnyShaped:$source, + Arguments<(ins AnyShaped:$base, Variadic:$indices, AffineMapAttr:$permutation_map, AnyType:$padding, @@ -1522,7 +1522,7 @@ def Vector_TransferWriteOp : DestinationStyleOpInterface ]>, Arguments<(ins AnyVectorOfAnyRank:$valueToStore, - AnyShaped:$source, + AnyShaped:$base, Variadic:$indices, AffineMapAttr:$permutation_map, Optional>:$mask, @@ -1663,7 +1663,7 @@ def Vector_TransferWriteOp : /// ops of other dialects. Value getValue() { return getVector(); } - MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); } + MutableOperandRange getDpsInitsMutable() { return getBaseMutable(); } }]; let hasFolder = 1; diff --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td index 8ea9d925b3790..c72ca58cedafd 100644 --- a/mlir/include/mlir/Interfaces/VectorInterfaces.td +++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td @@ -111,7 +111,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> { TODO: Change name of operand, which is not accurate for xfer_write. }], /*retTy=*/"::mlir::Value", - /*methodName=*/"getSource", + /*methodName=*/"getBase", /*args=*/(ins) >, InterfaceMethod< @@ -187,6 +187,12 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> { return inBounds; } + /// Wrapper for getBase, which replaced getSource. + [[deprecated("Use getBase instead!")]] + ::mlir::Value getSource() { + return $_op.getBase(); + } + /// Return the number of leading shaped dimensions (of the "source" operand) /// that do not participate in the permutation map. unsigned getLeadingShapedRank() { @@ -203,7 +209,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> { /// Return the shaped type of the "source" operand value. ::mlir::ShapedType getShapedType() { - return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType()); + return ::llvm::cast<::mlir::ShapedType>($_op.getBase().getType()); } /// Return the number of dimensions that participate in the permutation map. diff --git a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp index 58b85bc0ea6ac..d6f9495b2567c 100644 --- a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp +++ b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp @@ -58,7 +58,7 @@ struct TransferReadToArmSMELowering return rewriter.notifyMatchFailure(transferReadOp, "not a valid vector type for SME"); - if (!llvm::isa(transferReadOp.getSource().getType())) + if (!llvm::isa(transferReadOp.getBase().getType())) return rewriter.notifyMatchFailure(transferReadOp, "not a memref source"); // Out-of-bounds dims are not supported. @@ -84,7 +84,7 @@ struct TransferReadToArmSMELowering auto mask = transferReadOp.getMask(); auto padding = mask ? transferReadOp.getPadding() : nullptr; rewriter.replaceOpWithNewOp( - transferReadOp, vectorType, transferReadOp.getSource(), + transferReadOp, vectorType, transferReadOp.getBase(), transferReadOp.getIndices(), padding, mask, layout); return success(); @@ -128,7 +128,7 @@ struct TransferWriteToArmSMELowering if (!arm_sme::isValidSMETileVectorType(vType)) return failure(); - if (!llvm::isa(writeOp.getSource().getType())) + if (!llvm::isa(writeOp.getBase().getType())) return failure(); // Out-of-bounds dims are not supported. @@ -149,7 +149,7 @@ struct TransferWriteToArmSMELowering : arm_sme::TileSliceLayout::Horizontal; rewriter.replaceOpWithNewOp( - writeOp, writeOp.getVector(), writeOp.getSource(), writeOp.getIndices(), + writeOp, writeOp.getVector(), writeOp.getBase(), writeOp.getIndices(), writeOp.getMask(), layout); return success(); } @@ -686,7 +686,7 @@ struct FoldTransferWriteOfExtractTileSlice LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp, PatternRewriter &rewriter) const final { - if (!isa(writeOp.getSource().getType())) + if (!isa(writeOp.getBase().getType())) return rewriter.notifyMatchFailure(writeOp, "destination not a memref"); if (writeOp.hasOutOfBoundsDim()) @@ -713,7 +713,7 @@ struct FoldTransferWriteOfExtractTileSlice rewriter.replaceOpWithNewOp( writeOp, extractTileSlice.getTile(), - extractTileSlice.getTileSliceIndex(), mask, writeOp.getSource(), + extractTileSlice.getTileSliceIndex(), mask, writeOp.getBase(), writeOp.getIndices(), extractTileSlice.getLayout()); return success(); } diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index ba05a5a000cb9..0b9ebdc0d66bb 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -486,7 +486,7 @@ struct CombineTransferReadOpTranspose final Value result = rewriter .create( - loc, resultType, transferReadOp.getSource(), + loc, resultType, transferReadOp.getBase(), transferReadOp.getIndices(), AffineMapAttr::get(newMap), transferReadOp.getPadding(), transferReadOp.getMask(), transferReadOp.getInBoundsAttr()) @@ -581,7 +581,7 @@ convertTransferReadOp(RewriterBase &rewriter, vector::TransferReadOp op, gpu::MMAMatrixType type = gpu::MMAMatrixType::get(op.getVectorType().getShape(), elType, fragType); Value load = rewriter.create( - op.getLoc(), type, op.getSource(), op.getIndices(), + op.getLoc(), type, op.getBase(), op.getIndices(), rewriter.getIndexAttr(*stride), isTranspose ? rewriter.getUnitAttr() : UnitAttr()); valueMapping[mappingResult] = load; @@ -612,7 +612,7 @@ convertTransferWriteOp(RewriterBase &rewriter, vector::TransferWriteOp op, Value matrix = it->second; auto store = rewriter.create( - op.getLoc(), matrix, op.getSource(), op.getIndices(), + op.getLoc(), matrix, op.getBase(), op.getIndices(), rewriter.getIndexAttr(*stride), /*transpose=*/UnitAttr()); (void)store; @@ -759,7 +759,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op, indices); nvgpu::LdMatrixOp newOp = rewriter.create( - loc, vectorType, op.getSource(), indices, *transpose, params->numTiles); + loc, vectorType, op.getBase(), indices, *transpose, params->numTiles); valueMapping[op] = newOp->getResult(0); return success(); } @@ -819,7 +819,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op, rewriter, op, *coords, {laneId, logicalValueId}, newIndices); Value el = rewriter.create(loc, loadedElType, - op.getSource(), newIndices); + op.getBase(), newIndices); result = rewriter.create(loc, el, result, i); } } else { @@ -842,7 +842,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op, getXferIndices( rewriter, op, *coords, {laneId, logicalValueId}, newIndices); Value el = rewriter.create(op.getLoc(), loadedElType, - op.getSource(), newIndices); + op.getBase(), newIndices); result = rewriter.create( op.getLoc(), el, result, ArrayRef{i, innerIdx}); } @@ -876,7 +876,7 @@ convertTransferReadToLoads(RewriterBase &rewriter, vector::TransferReadOp op, return rewriter.notifyMatchFailure(op, "no warpMatrixInfo"); bool isLdMatrixCompatible = - isSharedMemory(cast(op.getSource().getType())) && + isSharedMemory(cast(op.getBase().getType())) && nvgpu::inferTileWidthInBits(*warpMatrixInfo) == 128; VectorType vecTy = op.getVectorType(); @@ -934,7 +934,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op, SmallVector newIndices; getXferIndices( rewriter, op, *coords, {laneId, logicalValueId}, newIndices); - rewriter.create(loc, el, op.getSource(), newIndices); + rewriter.create(loc, el, op.getBase(), newIndices); } LLVM_DEBUG(DBGS() << "erase: " << op << "\n"); diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp index b9b598c02b4a2..cc5623068ab10 100644 --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -198,8 +198,7 @@ static Value generateInBoundsCheck( Location loc = xferOp.getLoc(); ImplicitLocOpBuilder lb(xferOp.getLoc(), b); if (!xferOp.isDimInBounds(0) && !isBroadcast) { - Value memrefDim = - vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim); + Value memrefDim = vector::createOrFoldDimOp(b, loc, xferOp.getBase(), *dim); AffineExpr d0, d1; bindDims(xferOp.getContext(), d0, d1); Value base = xferOp.getIndices()[*dim]; @@ -426,7 +425,7 @@ struct Strategy { auto vecType = dyn_cast(bufferType.getElementType()); auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( - loc, vecType, xferOp.getSource(), xferIndices, + loc, vecType, xferOp.getBase(), xferIndices, AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), xferOp.getPadding(), Value(), inBoundsAttr); @@ -512,7 +511,7 @@ struct Strategy { Location loc = xferOp.getLoc(); auto vec = b.create(loc, buffer, loadIndices); auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); - auto source = loopState.empty() ? xferOp.getSource() : loopState[0]; + auto source = loopState.empty() ? xferOp.getBase() : loopState[0]; Type type = isTensorOp(xferOp) ? xferOp.getShapedType() : Type(); auto newXferOp = b.create( loc, type, vec, source, xferIndices, @@ -544,7 +543,7 @@ struct Strategy { /// Return the initial loop state for the generated scf.for loop. static Value initialLoopState(TransferWriteOp xferOp) { - return isTensorOp(xferOp) ? xferOp.getSource() : Value(); + return isTensorOp(xferOp) ? xferOp.getBase() : Value(); } }; @@ -1145,7 +1144,7 @@ struct ScalableTransposeTransferWriteConversion ArrayRef(*maskDims).drop_front()); } - Value initDest = isTensorOp(writeOp) ? writeOp.getSource() : Value{}; + Value initDest = isTensorOp(writeOp) ? writeOp.getBase() : Value{}; ValueRange initLoopArgs = initDest ? initDest : ValueRange{}; auto result = rewriter.create( loc, lb, ub, step, initLoopArgs, @@ -1165,7 +1164,7 @@ struct ScalableTransposeTransferWriteConversion // Create the transfer_write for the slice. Value dest = - loopIterArgs.empty() ? writeOp.getSource() : loopIterArgs.front(); + loopIterArgs.empty() ? writeOp.getBase() : loopIterArgs.front(); auto newWriteOp = b.create( loc, sliceVec, dest, xferIndices, ArrayRef(writeOp.getInBoundsValues()).drop_front()); @@ -1340,7 +1339,7 @@ struct UnrollTransferReadConversion auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr()); auto newXferOp = b.create( - loc, newXferVecType, xferOp.getSource(), xferIndices, + loc, newXferVecType, xferOp.getBase(), xferIndices, AffineMapAttr::get(unpackedPermutationMap(b, xferOp)), xferOp.getPadding(), Value(), inBoundsAttr); maybeAssignMask(b, xferOp, newXferOp, i); @@ -1449,7 +1448,7 @@ struct UnrollTransferWriteConversion } int64_t dimSize = inputVectorTy.getShape()[0]; - Value source = xferOp.getSource(); // memref or tensor to be written to. + Value source = xferOp.getBase(); // memref or tensor to be written to. auto sourceType = isTensorOp(xferOp) ? xferOp.getShapedType() : Type(); // Generate fully unrolled loop of transfer ops. @@ -1567,8 +1566,7 @@ struct Strategy1d { b, xferOp, iv, dim, TypeRange(xferOp.getVectorType()), /*inBoundsCase=*/ [&](OpBuilder &b, Location loc) { - Value val = - b.create(loc, xferOp.getSource(), indices); + Value val = b.create(loc, xferOp.getBase(), indices); return b.create(loc, val, vec, iv); }, /*outOfBoundsCase=*/ @@ -1599,7 +1597,7 @@ struct Strategy1d { /*inBoundsCase=*/[&](OpBuilder &b, Location loc) { auto val = b.create(loc, xferOp.getVector(), iv); - b.create(loc, val, xferOp.getSource(), indices); + b.create(loc, val, xferOp.getBase(), indices); }); b.create(loc); } diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp index 0bc0f2fca2c3b..adcee1933e8a7 100644 --- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp +++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp @@ -192,7 +192,7 @@ struct TransferReadLowering : public OpRewritePattern { xegpu::CreateNdDescOp ndDesc = createNdDescriptor(rewriter, loc, descType, - dyn_cast>(readOp.getSource()), + dyn_cast>(readOp.getBase()), readOp.getIndices()); DenseI64ArrayAttr transposeAttr = @@ -231,10 +231,10 @@ struct TransferWriteLowering vecTy.getShape(), vecTy.getElementType(), /*array_length=*/1, /*boundary_check=*/writeOp.hasOutOfBoundsDim(), xegpu::MemorySpace::Global); - xegpu::CreateNdDescOp ndDesc = createNdDescriptor( - rewriter, loc, descType, - dyn_cast>(writeOp.getSource()), - writeOp.getIndices()); + xegpu::CreateNdDescOp ndDesc = + createNdDescriptor(rewriter, loc, descType, + dyn_cast>(writeOp.getBase()), + writeOp.getIndices()); // By default, no specific caching policy is assigned. xegpu::CachePolicyAttr hint = nullptr; diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp index 9f64abb5a8860..cd41765dec2a2 100644 --- a/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp +++ b/mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp @@ -118,7 +118,7 @@ static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc, Value fill = builder.create(loc, unbroadcastedVectorType, readOp.getPadding()); Value load = builder.create( - loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices()); + loc, unbroadcastedVectorType, readOp.getBase(), readOp.getIndices()); Value res = builder.create(loc, unbroadcastedVectorType, readOp.getMask(), load, fill); // Insert a broadcasting op if required. @@ -149,7 +149,7 @@ struct TransferReadLowering final : OpRewritePattern { } Location loc = readOp.getLoc(); - Value src = readOp.getSource(); + Value src = readOp.getBase(); VectorType vectorType = readOp.getVectorType(); int64_t vectorSize = vectorType.getNumElements(); diff --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp index 62a148d2b7e62..95965872f4098 100644 --- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp +++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp @@ -315,7 +315,7 @@ struct LegalizeTransferReadOpsByDecomposition decomposeToSMETiles(rewriter, vectorType, smeTileType, transposed)) { auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile); auto smeRead = rewriter.create( - loc, smeTileType, readOp.getSource(), + loc, smeTileType, readOp.getBase(), getSMESubTileIndices(rewriter, loc, readOp.getIndices(), smeTile), readOp.getPermutationMapAttr(), readOp.getPadding(), smeMask, readOp.getInBoundsAttr()); @@ -359,7 +359,7 @@ struct LegalizeTransferWriteOpsByDecomposition auto smeTileType = getSMETileTypeForElement(vectorType.getElementType()); auto inputSMETiles = adaptor.getValueToStore(); - Value destTensorOrMemref = writeOp.getSource(); + Value destTensorOrMemref = writeOp.getBase(); for (auto [index, smeTile] : llvm::enumerate(decomposeToSMETiles( rewriter, vectorType, smeTileType, transposed))) { auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile); @@ -497,7 +497,7 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop auto slice = rewriter.create(loc, tile, tileSliceIndex); rewriter.create( - loc, slice, writeOp.getSource(), ValueRange{storeRow, storeCol}, + loc, slice, writeOp.getBase(), ValueRange{storeRow, storeCol}, AffineMapAttr::get(writeOp.getPermutationMap().dropResult(0)), sliceMask, rewriter.getBoolArrayAttr( @@ -677,7 +677,7 @@ struct LiftIllegalVectorTransposeToMemory }); SmallVector strides(readType.getRank(), Value(one)); auto readSubview = rewriter.create( - loc, illegalRead.getSource(), illegalRead.getIndices(), readSizes, + loc, illegalRead.getBase(), illegalRead.getIndices(), readSizes, strides); // Apply the transpose to all values/attributes of the transfer_read: @@ -851,7 +851,7 @@ struct LowerIllegalTransposeStoreViaZA // Note: We need to use `get_tile` as there's no vector-level `undef`. Value undefTile = rewriter.create(loc, smeTileType); - Value destTensorOrMemref = writeOp.getSource(); + Value destTensorOrMemref = writeOp.getBase(); auto numSlicesPerTile = std::min(sourceType.getDimSize(0), smeTileType.getDimSize(0)); auto numSlices = diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp index 20e4e3cee7ed4..707b63ff9335b 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -171,7 +171,7 @@ void mlir::linalg::hoistRedundantVectorBroadcasts(RewriterBase &rewriter, static bool noAliasingUseInLoop(vector::TransferReadOp transferRead, LoopLikeOpInterface loop) { - Value source = transferRead.getSource(); + Value source = transferRead.getBase(); // Skip view-like Ops and retrive the actual soruce Operation while (auto srcOp = @@ -276,7 +276,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root, for (auto *sliceOp : llvm::reverse(forwardSlice)) { auto candidateWrite = dyn_cast(sliceOp); if (!candidateWrite || - candidateWrite.getSource() != transferRead.getSource()) + candidateWrite.getBase() != transferRead.getBase()) continue; transferWrite = candidateWrite; } @@ -312,11 +312,11 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root, transferRead.getPermutationMap() != transferWrite.getPermutationMap()) return WalkResult::advance(); - auto *source = transferRead.getSource().getDefiningOp(); + auto *source = transferRead.getBase().getDefiningOp(); if (source && isa_and_nonnull(source)) return WalkResult::advance(); - source = transferWrite.getSource().getDefiningOp(); + source = transferWrite.getBase().getDefiningOp(); if (source && isa_and_nonnull(source)) return WalkResult::advance(); @@ -325,7 +325,7 @@ void mlir::linalg::hoistRedundantVectorTransfers(Operation *root, DominanceInfo dom(loop); if (!dom.properlyDominates(transferRead.getOperation(), transferWrite)) return WalkResult::advance(); - for (auto &use : transferRead.getSource().getUses()) { + for (auto &use : transferRead.getBase().getUses()) { if (!loop->isAncestor(use.getOwner())) continue; if (use.getOwner() == transferRead.getOperation() || diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index a477c2fb3f8cb..63f88d02ff3a0 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -2627,7 +2627,7 @@ struct PadOpVectorizationWithTransferReadPattern SmallVector inBounds(xferOp.getVectorType().getRank(), false); xferOp->setAttr(xferOp.getInBoundsAttrName(), rewriter.getBoolArrayAttr(inBounds)); - xferOp.getSourceMutable().assign(padOp.getSource()); + xferOp.getBaseMutable().assign(padOp.getSource()); xferOp.getPaddingMutable().assign(padValue); }); @@ -3114,7 +3114,7 @@ LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite( return rewriter.notifyMatchFailure(xferOp, "unsupported mask"); // Transfer into `view`. - Value viewOrAlloc = xferOp.getSource(); + Value viewOrAlloc = xferOp.getBase(); if (!viewOrAlloc.getDefiningOp() && !viewOrAlloc.getDefiningOp()) return rewriter.notifyMatchFailure(xferOp, "source not a view or alloc"); @@ -3191,7 +3191,7 @@ LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite( return rewriter.notifyMatchFailure(xferOp, "unsupported mask"); // Transfer into `viewOrAlloc`. - Value viewOrAlloc = xferOp.getSource(); + Value viewOrAlloc = xferOp.getBase(); if (!viewOrAlloc.getDefiningOp() && !viewOrAlloc.getDefiningOp()) return rewriter.notifyMatchFailure(xferOp, "source not a view or alloc"); diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp index 05ba6a3f38708..b906c727604dc 100644 --- a/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/ExtractAddressComputations.cpp @@ -119,7 +119,7 @@ static nvgpu::LdMatrixOp rebuildLdMatrixOp(RewriterBase &rewriter, template static FailureOr getTransferLikeOpSrcMemRef(TransferLikeOp transferLikeOp) { - Value src = transferLikeOp.getSource(); + Value src = transferLikeOp.getBase(); if (isa(src.getType())) return src; return failure(); diff --git a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp index ec55b9e561914..a0d6f4c32627c 100644 --- a/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp +++ b/mlir/lib/Dialect/MemRef/Transforms/FoldMemRefAliasOps.cpp @@ -225,7 +225,7 @@ static Value getMemRefOperand(LoadOrStoreOpTy op) { } static Value getMemRefOperand(vector::TransferReadOp op) { - return op.getSource(); + return op.getBase(); } static Value getMemRefOperand(nvgpu::LdMatrixOp op) { @@ -241,7 +241,7 @@ static Value getMemRefOperand(vector::MaskedLoadOp op) { return op.getBase(); } static Value getMemRefOperand(vector::MaskedStoreOp op) { return op.getBase(); } static Value getMemRefOperand(vector::TransferWriteOp op) { - return op.getSource(); + return op.getBase(); } static Value getMemRefOperand(gpu::SubgroupMmaLoadMatrixOp op) { diff --git a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp index 556922a64b093..75dbe0becf80d 100644 --- a/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp +++ b/mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp @@ -172,7 +172,7 @@ static Value getValueLoadedFromGlobal(Operation *op) { if (!load) return nullptr; - auto loadType = dyn_cast(load.getSource().getType()); + auto loadType = dyn_cast(load.getBase().getType()); if (!loadType || !hasDefaultMemorySpace(loadType)) return nullptr; return load; @@ -185,7 +185,7 @@ static bool isStoreToShared(Operation *op, Value v) { if (!store || store.getVector() != v) return false; - auto storeType = dyn_cast(store.getSource().getType()); + auto storeType = dyn_cast(store.getBase().getType()); return storeType || hasSharedMemorySpace(storeType); } diff --git a/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp b/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp index a782ed5ddd85e..5904e42be0905 100644 --- a/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp +++ b/mlir/lib/Dialect/NVGPU/Transforms/Utils.cpp @@ -71,9 +71,9 @@ Value nvgpu::getMemrefOperand(Operation *op) { if (auto storeOp = dyn_cast(op)) return storeOp.getMemref(); if (auto transferWrite = dyn_cast(op)) - return transferWrite.getSource(); + return transferWrite.getBase(); if (auto transferRead = dyn_cast(op)) - return transferRead.getSource(); + return transferRead.getBase(); if (auto storeOp = dyn_cast(op)) return storeOp.getBase(); if (auto loadOp = dyn_cast(op)) diff --git a/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp b/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp index e80360aa08ed5..c5a9938519a2a 100644 --- a/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp +++ b/mlir/lib/Dialect/NVGPU/Utils/MMAUtils.cpp @@ -285,7 +285,7 @@ bool nvgpu::canLowerToWarpMatrixOperation(vector::TransferReadOp op) { // information to ensure correctness of downstream assumptions. It is possible // to enable this if caller can assert that tensor will be lowered in a // particular manner. - auto sourceType = dyn_cast(op.getSource().getType()); + auto sourceType = dyn_cast(op.getBase().getType()); if (!sourceType) return false; @@ -309,7 +309,7 @@ bool nvgpu::canLowerToWarpMatrixOperation(vector::TransferWriteOp op) { return false; // Currently we can't support reads on tensor types because we need stride // information to ensure correctness of downstream assumptions. - auto sourceType = dyn_cast(op.getSource().getType()); + auto sourceType = dyn_cast(op.getBase().getType()); if (!sourceType) return false; diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp index 998b0fb6eb4b7..a19bfb278f060 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/FoldTensorSubsetOps.cpp @@ -36,7 +36,7 @@ namespace tensor { using namespace mlir; static Value getTensorOperand(vector::TransferReadOp op) { - return op.getSource(); + return op.getBase(); } static Value getTensorOperand(tensor::InsertSliceOp op) { diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 67e3aa564a184..c49df88f3f55b 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -314,7 +314,7 @@ bool mlir::vector::isDisjointTransferIndices( bool mlir::vector::isDisjointTransferSet(VectorTransferOpInterface transferA, VectorTransferOpInterface transferB, bool testDynamicValueUsingBounds) { - if (transferA.getSource() != transferB.getSource()) + if (transferA.getBase() != transferB.getBase()) return false; return isDisjointTransferIndices(transferA, transferB, testDynamicValueUsingBounds); @@ -4206,7 +4206,7 @@ static void printTransferAttrs(OpAsmPrinter &p, VectorTransferOpInterface op) { } void TransferReadOp::print(OpAsmPrinter &p) { - p << " " << getSource() << "[" << getIndices() << "], " << getPadding(); + p << " " << getBase() << "[" << getIndices() << "], " << getPadding(); if (getMask()) p << ", " << getMask(); printTransferAttrs(p, *this); @@ -4465,7 +4465,7 @@ static LogicalResult foldTransferFullMask(TransferOp op) { static Value foldRAW(TransferReadOp readOp) { if (!llvm::isa(readOp.getShapedType())) return {}; - auto defWrite = readOp.getSource().getDefiningOp(); + auto defWrite = readOp.getBase().getDefiningOp(); while (defWrite) { if (checkSameValueRAW(defWrite, readOp)) return defWrite.getVector(); @@ -4473,7 +4473,7 @@ static Value foldRAW(TransferReadOp readOp) { cast(defWrite.getOperation()), cast(readOp.getOperation()))) break; - defWrite = defWrite.getSource().getDefiningOp(); + defWrite = defWrite.getBase().getDefiningOp(); } return {}; } @@ -4501,7 +4501,7 @@ void TransferReadOp::getEffects( SmallVectorImpl> &effects) { if (llvm::isa(getShapedType())) - effects.emplace_back(MemoryEffects::Read::get(), &getSourceMutable(), + effects.emplace_back(MemoryEffects::Read::get(), &getBaseMutable(), SideEffects::DefaultResource::get()); } @@ -4543,7 +4543,7 @@ struct TransferReadAfterWriteToBroadcast if (readOp.hasOutOfBoundsDim() || !llvm::isa(readOp.getShapedType())) return failure(); - auto defWrite = readOp.getSource().getDefiningOp(); + auto defWrite = readOp.getBase().getDefiningOp(); if (!defWrite) return failure(); // TODO: If the written transfer chunk is a superset of the read transfer @@ -4728,7 +4728,7 @@ ParseResult TransferWriteOp::parse(OpAsmParser &parser, } void TransferWriteOp::print(OpAsmPrinter &p) { - p << " " << getVector() << ", " << getSource() << "[" << getIndices() << "]"; + p << " " << getVector() << ", " << getBase() << "[" << getIndices() << "]"; if (getMask()) p << ", " << getMask(); printTransferAttrs(p, *this); @@ -4807,7 +4807,7 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write, if (write.getTransferRank() == 0) return failure(); auto rankedTensorType = - llvm::dyn_cast(write.getSource().getType()); + llvm::dyn_cast(write.getBase().getType()); // If not operating on tensors, bail. if (!rankedTensorType) return failure(); @@ -4829,7 +4829,7 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write, if (read.hasOutOfBoundsDim() || write.hasOutOfBoundsDim()) return failure(); // Tensor types must be the same. - if (read.getSource().getType() != rankedTensorType) + if (read.getBase().getType() != rankedTensorType) return failure(); // Vector types must be the same. if (read.getVectorType() != write.getVectorType()) @@ -4846,13 +4846,13 @@ static LogicalResult foldReadInitWrite(TransferWriteOp write, llvm::any_of(write.getIndices(), isNotConstantZero)) return failure(); // Success. - results.push_back(read.getSource()); + results.push_back(read.getBase()); return success(); } static bool checkSameValueWAR(vector::TransferReadOp read, vector::TransferWriteOp write) { - return read.getSource() == write.getSource() && + return read.getBase() == write.getBase() && read.getIndices() == write.getIndices() && read.getPermutationMap() == write.getPermutationMap() && read.getVectorType() == write.getVectorType() && !read.getMask() && @@ -4874,7 +4874,7 @@ static bool checkSameValueWAR(vector::TransferReadOp read, /// ``` static LogicalResult foldWAR(TransferWriteOp write, SmallVectorImpl &results) { - if (!llvm::isa(write.getSource().getType())) + if (!llvm::isa(write.getBase().getType())) return failure(); auto read = write.getVector().getDefiningOp(); if (!read) @@ -4882,7 +4882,7 @@ static LogicalResult foldWAR(TransferWriteOp write, if (!checkSameValueWAR(read, write)) return failure(); - results.push_back(read.getSource()); + results.push_back(read.getBase()); return success(); } @@ -4954,12 +4954,11 @@ class FoldWaw final : public OpRewritePattern { return failure(); vector::TransferWriteOp writeToModify = writeOp; - auto defWrite = - writeOp.getSource().getDefiningOp(); + auto defWrite = writeOp.getBase().getDefiningOp(); while (defWrite) { if (checkSameValueWAW(writeOp, defWrite)) { rewriter.modifyOpInPlace(writeToModify, [&]() { - writeToModify.getSourceMutable().assign(defWrite.getSource()); + writeToModify.getBaseMutable().assign(defWrite.getBase()); }); return success(); } @@ -4972,7 +4971,7 @@ class FoldWaw final : public OpRewritePattern { if (!defWrite->hasOneUse()) break; writeToModify = defWrite; - defWrite = defWrite.getSource().getDefiningOp(); + defWrite = defWrite.getBase().getDefiningOp(); } return failure(); } diff --git a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp index 1caec5bb8644f..b2272c5fda876 100644 --- a/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.cpp @@ -52,7 +52,7 @@ struct TransferReadOpInterface auto readOp = cast(op); assert(isa(readOp.getShapedType()) && "only tensor types expected"); - FailureOr buffer = getBuffer(rewriter, readOp.getSource(), options); + FailureOr buffer = getBuffer(rewriter, readOp.getBase(), options); if (failed(buffer)) return failure(); replaceOpWithNewBufferizedOp( @@ -110,7 +110,7 @@ struct TransferWriteOpInterface // Create a new transfer_write on buffer that doesn't have a return value. FailureOr resultBuffer = - getBuffer(rewriter, writeOp.getSource(), options); + getBuffer(rewriter, writeOp.getBase(), options); if (failed(resultBuffer)) return failure(); rewriter.create( diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp index 1f6cac2aa6f96..ba21092d2af3c 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp @@ -222,7 +222,7 @@ struct MaskedTransferReadOpPattern // Replace the `vector.mask` operation. rewriter.replaceOpWithNewOp( - maskingOp.getOperation(), readOp.getVectorType(), readOp.getSource(), + maskingOp.getOperation(), readOp.getVectorType(), readOp.getBase(), readOp.getIndices(), readOp.getPermutationMap(), readOp.getPadding(), maskingOp.getMask(), readOp.getInBounds()); return success(); @@ -245,7 +245,7 @@ struct MaskedTransferWriteOpPattern // Replace the `vector.mask` operation. rewriter.replaceOpWithNewOp( maskingOp.getOperation(), resultType, writeOp.getVector(), - writeOp.getSource(), writeOp.getIndices(), writeOp.getPermutationMap(), + writeOp.getBase(), writeOp.getIndices(), writeOp.getPermutationMap(), maskingOp.getMask(), writeOp.getInBounds()); return success(); } diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp index c5d29c09b39b3..5b81d0d33d484 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp @@ -139,7 +139,7 @@ struct TransferReadPermutationLowering VectorType newReadType = VectorType::get( newVectorShape, op.getVectorType().getElementType(), newScalableDims); Value newRead = rewriter.create( - op.getLoc(), newReadType, op.getSource(), op.getIndices(), + op.getLoc(), newReadType, op.getBase(), op.getIndices(), AffineMapAttr::get(newMap), op.getPadding(), op.getMask(), newInBoundsAttr); @@ -214,7 +214,7 @@ struct TransferWritePermutationLowering auto newMap = AffineMap::getMinorIdentityMap( map.getNumDims(), map.getNumResults(), rewriter.getContext()); auto newWrite = rewriter.create( - op.getLoc(), newVec, op.getSource(), op.getIndices(), + op.getLoc(), newVec, op.getBase(), op.getIndices(), AffineMapAttr::get(newMap), op.getMask(), newInBoundsAttr); if (newWrite.hasPureTensorSemantics()) return newWrite.getResult(); @@ -300,7 +300,7 @@ struct TransferWriteNonPermutationLowering } ArrayAttr newInBoundsAttr = rewriter.getBoolArrayAttr(newInBoundsValues); auto newWrite = rewriter.create( - op.getLoc(), newVec, op.getSource(), op.getIndices(), + op.getLoc(), newVec, op.getBase(), op.getIndices(), AffineMapAttr::get(newMap), newMask, newInBoundsAttr); if (newWrite.hasPureTensorSemantics()) return newWrite.getResult(); @@ -371,7 +371,7 @@ struct TransferOpReduceRank op.getInBoundsAttr().getValue().take_back(reducedShapeRank)) : ArrayAttr(); Value newRead = rewriter.create( - op.getLoc(), newReadType, op.getSource(), op.getIndices(), + op.getLoc(), newReadType, op.getBase(), op.getIndices(), AffineMapAttr::get(newMap), op.getPadding(), op.getMask(), newInBoundsAttr); return rewriter @@ -474,12 +474,12 @@ struct TransferReadToVectorLoadLowering Value fill = rewriter.create( read.getLoc(), unbroadcastedVectorType, read.getPadding()); res = rewriter.create( - read.getLoc(), unbroadcastedVectorType, read.getSource(), + read.getLoc(), unbroadcastedVectorType, read.getBase(), read.getIndices(), read.getMask(), fill); } else { - res = rewriter.create( - read.getLoc(), unbroadcastedVectorType, read.getSource(), - read.getIndices()); + res = rewriter.create(read.getLoc(), + unbroadcastedVectorType, + read.getBase(), read.getIndices()); } // Insert a broadcasting op if required. @@ -570,11 +570,11 @@ struct TransferWriteToVectorStoreLowering }); rewriter.create( - write.getLoc(), write.getSource(), write.getIndices(), - write.getMask(), write.getVector()); + write.getLoc(), write.getBase(), write.getIndices(), write.getMask(), + write.getVector()); } else { rewriter.create(write.getLoc(), write.getVector(), - write.getSource(), write.getIndices()); + write.getBase(), write.getIndices()); } // There's no return value for StoreOps. Use Value() to signal success to // matchAndRewrite. diff --git a/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp index e8e178fe75962..392bbb1c42201 100644 --- a/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.cpp @@ -37,7 +37,7 @@ struct TransferReadOpSubsetExtractionOpInterface : public SubsetExtractionOpInterface::ExternalModel< TransferReadOpSubsetExtractionOpInterface, vector::TransferReadOp> { OpOperand &getSourceOperand(Operation *op) const { - return cast(op).getSourceMutable(); + return cast(op).getBaseMutable(); } }; @@ -49,7 +49,7 @@ struct TransferWriteOpSubsetInsertionOpInterface } OpOperand &getDestinationOperand(Operation *op) const { - return cast(op).getSourceMutable(); + return cast(op).getBaseMutable(); } Value buildSubsetExtraction(Operation *op, OpBuilder &builder, diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp index 19f408ad1b570..c1f5517aa82da 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp @@ -718,7 +718,7 @@ struct WarpOpTransferRead : public WarpDistributionPattern { auto read = operand->get().getDefiningOp(); // Source must be defined outside of the region. - if (!warpOp.isDefinedOutsideOfRegion(read.getSource())) + if (!warpOp.isDefinedOutsideOfRegion(read.getBase())) return rewriter.notifyMatchFailure( read, "source must be defined outside of the region"); @@ -802,7 +802,7 @@ struct WarpOpTransferRead : public WarpDistributionPattern { hasMask ? newWarpOp.getResult(newRetIndices[newRetIndices.size() - 1]) : Value(); auto newRead = rewriter.create( - read.getLoc(), distributedVal.getType(), read.getSource(), newIndices, + read.getLoc(), distributedVal.getType(), read.getBase(), newIndices, read.getPermutationMapAttr(), newPadding, newMask, read.getInBoundsAttr()); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp index 68a44ea889470..067d4e3491391 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp @@ -230,7 +230,7 @@ struct CastAwayTransferReadLeadingOneDim if (read.getTransferRank() == 0) return failure(); - auto shapedType = cast(read.getSource().getType()); + auto shapedType = cast(read.getBase().getType()); if (shapedType.getElementType() != read.getVectorType().getElementType()) return failure(); @@ -260,7 +260,7 @@ struct CastAwayTransferReadLeadingOneDim } auto newRead = rewriter.create( - read.getLoc(), newType, read.getSource(), read.getIndices(), + read.getLoc(), newType, read.getBase(), read.getIndices(), AffineMapAttr::get(newMap), read.getPadding(), mask, inBoundsAttr); rewriter.replaceOpWithNewOp(read, oldType, newRead); @@ -284,7 +284,7 @@ struct CastAwayTransferWriteLeadingOneDim if (write.getTransferRank() == 0) return failure(); - auto shapedType = dyn_cast(write.getSource().getType()); + auto shapedType = dyn_cast(write.getBase().getType()); if (shapedType.getElementType() != write.getVectorType().getElementType()) return failure(); @@ -314,13 +314,13 @@ struct CastAwayTransferWriteLeadingOneDim Value newMask = dropUnitDimsFromMask( rewriter, write.getLoc(), write.getMask(), newType, newMap, maskType); rewriter.replaceOpWithNewOp( - write, newVector, write.getSource(), write.getIndices(), + write, newVector, write.getBase(), write.getIndices(), AffineMapAttr::get(newMap), newMask, inBoundsAttr); return success(); } rewriter.replaceOpWithNewOp( - write, newVector, write.getSource(), write.getIndices(), + write, newVector, write.getBase(), write.getIndices(), AffineMapAttr::get(newMap), inBoundsAttr); return success(); } diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp index a560aa1b1e680..004beadc9ec7d 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @@ -1249,7 +1249,7 @@ struct ConvertVectorTransferRead final auto loc = op.getLoc(); auto containerElemTy = - cast(adaptor.getSource().getType()).getElementType(); + cast(adaptor.getBase().getType()).getElementType(); Type emulatedElemTy = op.getType().getElementType(); int emulatedBits = emulatedElemTy.getIntOrFloatBitWidth(); int containerBits = containerElemTy.getIntOrFloatBitWidth(); @@ -1272,7 +1272,7 @@ struct ConvertVectorTransferRead final adaptor.getPadding()); auto stridedMetadata = - rewriter.create(loc, op.getSource()); + rewriter.create(loc, op.getBase()); OpFoldResult linearizedIndices; memref::LinearizedMemRefInfo linearizedInfo; @@ -1294,7 +1294,7 @@ struct ConvertVectorTransferRead final emulatedPerContainerElem); auto newRead = rewriter.create( - loc, VectorType::get(numElements, containerElemTy), adaptor.getSource(), + loc, VectorType::get(numElements, containerElemTy), adaptor.getBase(), getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices), newPadding); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp index 999fb9c415886..d4d07c7eadc77 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -92,7 +92,7 @@ void TransferOptimization::deadStoreOp(vector::TransferWriteOp write) { << "\n"); llvm::SmallVector blockingAccesses; Operation *firstOverwriteCandidate = nullptr; - Value source = memref::skipViewLikeOps(cast(write.getSource())); + Value source = memref::skipViewLikeOps(cast(write.getBase())); llvm::SmallVector users(source.getUsers().begin(), source.getUsers().end()); llvm::SmallDenseSet processed; @@ -112,8 +112,8 @@ void TransferOptimization::deadStoreOp(vector::TransferWriteOp write) { if (auto nextWrite = dyn_cast(user)) { // Check candidate that can override the store. if (memref::isSameViewOrTrivialAlias( - cast(nextWrite.getSource()), - cast(write.getSource())) && + cast(nextWrite.getBase()), + cast(write.getBase())) && checkSameValueWAW(nextWrite, write) && postDominators.postDominates(nextWrite, write)) { if (firstOverwriteCandidate == nullptr || @@ -178,7 +178,7 @@ void TransferOptimization::storeToLoadForwarding(vector::TransferReadOp read) { << "\n"); SmallVector blockingWrites; vector::TransferWriteOp lastwrite = nullptr; - Value source = memref::skipViewLikeOps(cast(read.getSource())); + Value source = memref::skipViewLikeOps(cast(read.getBase())); llvm::SmallVector users(source.getUsers().begin(), source.getUsers().end()); llvm::SmallDenseSet processed; @@ -202,8 +202,8 @@ void TransferOptimization::storeToLoadForwarding(vector::TransferReadOp read) { /*testDynamicValueUsingBounds=*/true)) continue; if (memref::isSameViewOrTrivialAlias( - cast(read.getSource()), - cast(write.getSource())) && + cast(read.getBase()), + cast(write.getBase())) && dominators.dominates(write, read) && checkSameValueRAW(write, read)) { if (lastwrite == nullptr || dominators.dominates(lastwrite, write)) lastwrite = write; @@ -351,7 +351,7 @@ class TransferReadDropUnitDimsPattern auto loc = transferReadOp.getLoc(); Value vector = transferReadOp.getVector(); VectorType vectorType = cast(vector.getType()); - Value source = transferReadOp.getSource(); + Value source = transferReadOp.getBase(); MemRefType sourceType = dyn_cast(source.getType()); // TODO: support tensor types. if (!sourceType) @@ -433,7 +433,7 @@ class TransferWriteDropUnitDimsPattern auto loc = transferWriteOp.getLoc(); Value vector = transferWriteOp.getVector(); VectorType vectorType = cast(vector.getType()); - Value source = transferWriteOp.getSource(); + Value source = transferWriteOp.getBase(); MemRefType sourceType = dyn_cast(source.getType()); // TODO: support tensor type. if (!sourceType) @@ -604,7 +604,7 @@ class FlattenContiguousRowMajorTransferReadPattern auto loc = transferReadOp.getLoc(); Value vector = transferReadOp.getVector(); VectorType vectorType = cast(vector.getType()); - auto source = transferReadOp.getSource(); + auto source = transferReadOp.getBase(); MemRefType sourceType = dyn_cast(source.getType()); // 0. Check pre-conditions @@ -695,7 +695,7 @@ class FlattenContiguousRowMajorTransferWritePattern auto loc = transferWriteOp.getLoc(); Value vector = transferWriteOp.getVector(); VectorType vectorType = cast(vector.getType()); - Value source = transferWriteOp.getSource(); + Value source = transferWriteOp.getBase(); MemRefType sourceType = dyn_cast(source.getType()); // 0. Check pre-conditions @@ -851,12 +851,12 @@ class RewriteScalarExtractElementOfTransferRead *getConstantIntValue(ofr)); } } - if (isa(xferOp.getSource().getType())) { - rewriter.replaceOpWithNewOp(extractOp, xferOp.getSource(), + if (isa(xferOp.getBase().getType())) { + rewriter.replaceOpWithNewOp(extractOp, xferOp.getBase(), newIndices); } else { rewriter.replaceOpWithNewOp( - extractOp, xferOp.getSource(), newIndices); + extractOp, xferOp.getBase(), newIndices); } return success(); @@ -899,12 +899,12 @@ class RewriteScalarExtractOfTransferRead extractOp.getLoc(), *getConstantIntValue(ofr)); } } - if (isa(xferOp.getSource().getType())) { - rewriter.replaceOpWithNewOp(extractOp, xferOp.getSource(), + if (isa(xferOp.getBase().getType())) { + rewriter.replaceOpWithNewOp(extractOp, xferOp.getBase(), newIndices); } else { rewriter.replaceOpWithNewOp( - extractOp, xferOp.getSource(), newIndices); + extractOp, xferOp.getBase(), newIndices); } return success(); @@ -932,12 +932,12 @@ class RewriteScalarWrite : public OpRewritePattern { Value scalar = rewriter.create(xferOp.getLoc(), xferOp.getVector()); // Construct a scalar store. - if (isa(xferOp.getSource().getType())) { + if (isa(xferOp.getBase().getType())) { rewriter.replaceOpWithNewOp( - xferOp, scalar, xferOp.getSource(), xferOp.getIndices()); + xferOp, scalar, xferOp.getBase(), xferOp.getIndices()); } else { rewriter.replaceOpWithNewOp( - xferOp, scalar, xferOp.getSource(), xferOp.getIndices()); + xferOp, scalar, xferOp.getBase(), xferOp.getIndices()); } return success(); } diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp index b801692e9ecc9..256c8cb69b1ba 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp @@ -58,7 +58,7 @@ static Value createInBoundsCond(RewriterBase &b, b, loc, b.getAffineDimExpr(0) + b.getAffineConstantExpr(vectorSize), {xferOp.getIndices()[indicesIdx]}); OpFoldResult dimSz = - memref::getMixedSize(b, loc, xferOp.getSource(), indicesIdx); + memref::getMixedSize(b, loc, xferOp.getBase(), indicesIdx); auto maybeCstSum = getConstantIntValue(sum); auto maybeCstDimSz = getConstantIntValue(dimSz); if (maybeCstSum && maybeCstDimSz && *maybeCstSum <= *maybeCstDimSz) @@ -185,7 +185,7 @@ static Value castToCompatibleMemRefType(OpBuilder &b, Value memref, } /// Operates under a scoped context to build the intersection between the -/// view `xferOp.getSource()` @ `xferOp.getIndices()` and the view `alloc`. +/// view `xferOp.getbase()` @ `xferOp.getIndices()` and the view `alloc`. // TODO: view intersection/union/differences should be a proper std op. static std::pair createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp, @@ -202,8 +202,8 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp, auto isaWrite = isa(xferOp); xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) { using MapList = ArrayRef>; - Value dimMemRef = b.create(xferOp.getLoc(), - xferOp.getSource(), indicesIdx); + Value dimMemRef = + b.create(xferOp.getLoc(), xferOp.getBase(), indicesIdx); Value dimAlloc = b.create(loc, alloc, resultIdx); Value index = xferOp.getIndices()[indicesIdx]; AffineExpr i, j, k; @@ -221,9 +221,9 @@ createSubViewIntersection(RewriterBase &b, VectorTransferOpInterface xferOp, SmallVector destIndices(memrefRank, b.getIndexAttr(0)); SmallVector strides(memrefRank, b.getIndexAttr(1)); auto copySrc = b.create( - loc, isaWrite ? alloc : xferOp.getSource(), srcIndices, sizes, strides); + loc, isaWrite ? alloc : xferOp.getBase(), srcIndices, sizes, strides); auto copyDest = b.create( - loc, isaWrite ? xferOp.getSource() : alloc, destIndices, sizes, strides); + loc, isaWrite ? xferOp.getBase() : alloc, destIndices, sizes, strides); return std::make_pair(copySrc, copyDest); } @@ -252,7 +252,7 @@ createFullPartialLinalgCopy(RewriterBase &b, vector::TransferReadOp xferOp, MemRefType compatibleMemRefType, Value alloc) { Location loc = xferOp.getLoc(); Value zero = b.create(loc, 0); - Value memref = xferOp.getSource(); + Value memref = xferOp.getBase(); return b.create( loc, inBoundsCond, [&](OpBuilder &b, Location loc) { @@ -305,7 +305,7 @@ static scf::IfOp createFullPartialVectorTransferRead( Location loc = xferOp.getLoc(); scf::IfOp fullPartialIfOp; Value zero = b.create(loc, 0); - Value memref = xferOp.getSource(); + Value memref = xferOp.getBase(); return b.create( loc, inBoundsCond, [&](OpBuilder &b, Location loc) { @@ -352,7 +352,7 @@ getLocationToWriteFullVec(RewriterBase &b, vector::TransferWriteOp xferOp, MemRefType compatibleMemRefType, Value alloc) { Location loc = xferOp.getLoc(); Value zero = b.create(loc, 0); - Value memref = xferOp.getSource(); + Value memref = xferOp.getBase(); return b .create( loc, inBoundsCond, @@ -509,7 +509,7 @@ static Operation *getAutomaticAllocationScope(Operation *op) { /// /// Preconditions: /// 1. `xferOp.getPermutationMap()` must be a minor identity map -/// 2. the rank of the `xferOp.getSource()` and the rank of the +/// 2. the rank of the `xferOp.getBase()` and the rank of the /// `xferOp.getVector()` must be equal. This will be relaxed in the future /// but requires rank-reducing subviews. LogicalResult mlir::vector::splitFullAndPartialTransfer( @@ -611,7 +611,7 @@ LogicalResult mlir::vector::splitFullAndPartialTransfer( // The operation is cloned to prevent deleting information needed for the // later IR creation. IRMapping mapping; - mapping.map(xferWriteOp.getSource(), memrefAndIndices.front()); + mapping.map(xferWriteOp.getBase(), memrefAndIndices.front()); mapping.map(xferWriteOp.getIndices(), memrefAndIndices.drop_front()); auto *clone = b.clone(*xferWriteOp, mapping); clone->setAttr(xferWriteOp.getInBoundsAttrName(), inBoundsAttr); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp index b94c5fce64f83..c635be6e83b6a 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -1265,7 +1265,7 @@ struct MaterializeTransferMask : public OpRewritePattern { unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1; Value off = xferOp.getIndices()[lastIndex]; Value dim = - vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex); + vector::createOrFoldDimOp(rewriter, loc, xferOp.getBase(), lastIndex); Value b = rewriter.create(loc, dim.getType(), dim, off); Value mask = rewriter.create( loc, @@ -1437,7 +1437,7 @@ class DropInnerMostUnitDimsTransferRead if (readOp.getMask()) return failure(); - auto srcType = dyn_cast(readOp.getSource().getType()); + auto srcType = dyn_cast(readOp.getBase().getType()); if (!srcType) return failure(); @@ -1469,7 +1469,7 @@ class DropInnerMostUnitDimsTransferRead auto loc = readOp.getLoc(); SmallVector sizes = - memref::getMixedSizes(rewriter, loc, readOp.getSource()); + memref::getMixedSizes(rewriter, loc, readOp.getBase()); SmallVector offsets(srcType.getRank(), rewriter.getIndexAttr(0)); SmallVector strides(srcType.getRank(), @@ -1480,7 +1480,7 @@ class DropInnerMostUnitDimsTransferRead ArrayAttr inBoundsAttr = rewriter.getArrayAttr( readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)); Value rankedReducedView = rewriter.create( - loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides); + loc, resultMemrefType, readOp.getBase(), offsets, sizes, strides); auto permMap = getTransferMinorIdentityMap( cast(rankedReducedView.getType()), resultTargetVecType); Value result = rewriter.create( @@ -1527,7 +1527,7 @@ class DropInnerMostUnitDimsTransferWrite if (writeOp.getMask()) return failure(); - auto srcType = dyn_cast(writeOp.getSource().getType()); + auto srcType = dyn_cast(writeOp.getBase().getType()); if (!srcType) return failure(); @@ -1559,7 +1559,7 @@ class DropInnerMostUnitDimsTransferWrite Location loc = writeOp.getLoc(); SmallVector sizes = - memref::getMixedSizes(rewriter, loc, writeOp.getSource()); + memref::getMixedSizes(rewriter, loc, writeOp.getBase()); SmallVector offsets(srcType.getRank(), rewriter.getIndexAttr(0)); SmallVector strides(srcType.getRank(), @@ -1571,7 +1571,7 @@ class DropInnerMostUnitDimsTransferWrite writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)); Value rankedReducedView = rewriter.create( - loc, resultMemrefType, writeOp.getSource(), offsets, sizes, strides); + loc, resultMemrefType, writeOp.getBase(), offsets, sizes, strides); auto permMap = getTransferMinorIdentityMap( cast(rankedReducedView.getType()), resultTargetVecType); diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp index dffb13c3a7923..1cc477d9dca91 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp @@ -164,7 +164,7 @@ struct UnrollTransferReadPattern sliceTransferIndices(elementOffsets, originalIndices, readOp.getPermutationMap(), loc, rewriter); auto slicedRead = rewriter.create( - loc, targetType, readOp.getSource(), indices, + loc, targetType, readOp.getBase(), indices, readOp.getPermutationMapAttr(), readOp.getPadding(), readOp.getMask(), readOp.getInBoundsAttr()); @@ -215,7 +215,7 @@ struct UnrollTransferWritePattern sliceTransferIndices(elementOffsets, originalIndices, writeOp.getPermutationMap(), loc, rewriter); Operation *slicedWrite = rewriter.create( - loc, slicedVector, resultTensor ? resultTensor : writeOp.getSource(), + loc, slicedVector, resultTensor ? resultTensor : writeOp.getBase(), indices, writeOp.getPermutationMapAttr(), writeOp.getInBoundsAttr()); // For the tensor case update the destination for the next transfer write. if (!slicedWrite->getResults().empty()) diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index 16a2732b53e97..399e902af4062 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -312,7 +312,7 @@ SmallVector vector::getMixedSizesXfer(bool hasTensorSemantics, Value base = TypeSwitch(xfer) .Case( - [&](auto readOp) { return readOp.getSource(); }) + [&](auto readOp) { return readOp.getBase(); }) .Case( [&](auto writeOp) { return writeOp.getOperand(1); });