Skip to content

[SLP]Fix graph traversal in getSpillCost #124984

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 50 additions & 45 deletions llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1395,7 +1395,7 @@ class BoUpSLP {

/// \returns the cost incurred by unwanted spills and fills, caused by
/// holding live values over call sites.
InstructionCost getSpillCost() const;
InstructionCost getSpillCost();

/// \returns the vectorization cost of the subtree that starts at \p VL.
/// A negative number means that this is profitable.
Expand Down Expand Up @@ -2958,7 +2958,7 @@ class BoUpSLP {
}

/// Check if the value is vectorized in the tree.
bool isVectorized(Value *V) const {
bool isVectorized(const Value *V) const {
assert(V && "V cannot be nullptr.");
return ScalarToTreeEntries.contains(V);
}
Expand Down Expand Up @@ -12160,78 +12160,80 @@ bool BoUpSLP::isTreeNotExtendable() const {
return Res;
}

InstructionCost BoUpSLP::getSpillCost() const {
InstructionCost BoUpSLP::getSpillCost() {
// Walk from the bottom of the tree to the top, tracking which values are
// live. When we see a call instruction that is not part of our tree,
// query TTI to see if there is a cost to keeping values live over it
// (for example, if spills and fills are required).
unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
InstructionCost Cost = 0;

SmallPtrSet<Instruction *, 4> LiveValues;
Instruction *PrevInst = nullptr;
SmallPtrSet<const TreeEntry *, 4> LiveEntries;
const TreeEntry *Prev = nullptr;

// The entries in VectorizableTree are not necessarily ordered by their
// position in basic blocks. Collect them and order them by dominance so later
// instructions are guaranteed to be visited first. For instructions in
// different basic blocks, we only scan to the beginning of the block, so
// their order does not matter, as long as all instructions in a basic block
// are grouped together. Using dominance ensures a deterministic order.
SmallVector<Instruction *, 16> OrderedScalars;
SmallVector<TreeEntry *, 16> OrderedEntries;
for (const auto &TEPtr : VectorizableTree) {
if (TEPtr->State != TreeEntry::Vectorize)
if (TEPtr->isGather())
continue;
Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
if (!Inst)
continue;
OrderedScalars.push_back(Inst);
}
llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
auto *NodeA = DT->getNode(A->getParent());
auto *NodeB = DT->getNode(B->getParent());
OrderedEntries.push_back(TEPtr.get());
}
llvm::stable_sort(OrderedEntries, [&](const TreeEntry *TA,
const TreeEntry *TB) {
Instruction &A = getLastInstructionInBundle(TA);
Instruction &B = getLastInstructionInBundle(TB);
auto *NodeA = DT->getNode(A.getParent());
auto *NodeB = DT->getNode(B.getParent());
assert(NodeA && "Should only process reachable instructions");
assert(NodeB && "Should only process reachable instructions");
assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
"Different nodes should have different DFS numbers");
if (NodeA != NodeB)
return NodeA->getDFSNumIn() > NodeB->getDFSNumIn();
return B->comesBefore(A);
return B.comesBefore(&A);
});

for (Instruction *Inst : OrderedScalars) {
if (!PrevInst) {
PrevInst = Inst;
for (const TreeEntry *TE : OrderedEntries) {
if (!Prev) {
Prev = TE;
continue;
}

// Update LiveValues.
LiveValues.erase(PrevInst);
for (auto &J : PrevInst->operands()) {
if (isa<Instruction>(&*J) && isVectorized(&*J))
LiveValues.insert(cast<Instruction>(&*J));
LiveEntries.erase(Prev);
for (unsigned I : seq<unsigned>(Prev->getNumOperands())) {
const TreeEntry *Op = getVectorizedOperand(Prev, I);
if (!Op)
continue;
assert(!Op->isGather() && "Expected vectorized operand.");
LiveEntries.insert(Op);
}

LLVM_DEBUG({
dbgs() << "SLP: #LV: " << LiveValues.size();
for (auto *X : LiveValues)
dbgs() << " " << X->getName();
dbgs() << "SLP: #LV: " << LiveEntries.size();
for (auto *X : LiveEntries)
X->dump();
dbgs() << ", Looking at ";
Inst->dump();
TE->dump();
});

// Now find the sequence of instructions between PrevInst and Inst.
unsigned NumCalls = 0;
BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
PrevInstIt =
PrevInst->getIterator().getReverse();
const Instruction *PrevInst = &getLastInstructionInBundle(Prev);
BasicBlock::const_reverse_iterator
InstIt = ++getLastInstructionInBundle(TE).getIterator().getReverse(),
PrevInstIt = PrevInst->getIterator().getReverse();
while (InstIt != PrevInstIt) {
if (PrevInstIt == PrevInst->getParent()->rend()) {
PrevInstIt = Inst->getParent()->rbegin();
PrevInstIt = getLastInstructionInBundle(TE).getParent()->rbegin();
continue;
}

auto NoCallIntrinsic = [this](Instruction *I) {
auto *II = dyn_cast<IntrinsicInst>(I);
auto NoCallIntrinsic = [this](const Instruction *I) {
const auto *II = dyn_cast<IntrinsicInst>(I);
if (!II)
return false;
if (II->isAssumeLikeIntrinsic())
Expand All @@ -12252,25 +12254,28 @@ InstructionCost BoUpSLP::getSpillCost() const {
};

// Debug information does not impact spill cost.
if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) &&
&*PrevInstIt != PrevInst)
// Vectorized calls, represented as vector intrinsics, do not impact spill
// cost.
if (const auto *CB = dyn_cast<CallBase>(&*PrevInstIt);
CB && !NoCallIntrinsic(CB) && !isVectorized(CB))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It think vectorized calls might still impact spill cost. E.g. call <2 x i64> @llvm.sin(<2 x i64> %x) will get scalarized during codegen to:

	vs1r.v	v8, (a0)                        # Unknown-size Folded Spill
	vslidedown.vi	v8, v8, 1
	vfmv.f.s	fa0, v8
	call	tanh
	fmv.d	fs0, fa0
	fld	fa0, 16(sp)                     # 8-byte Folded Reload
	call	tanh

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It think vectorized calls might still impact spill cost. E.g. call <2 x i64> @llvm.sin(<2 x i64> %x) will get scalarized during codegen to:

	vs1r.v	v8, (a0)                        # Unknown-size Folded Spill
	vslidedown.vi	v8, v8, 1
	vfmv.f.s	fa0, v8
	call	tanh
	fmv.d	fs0, fa0
	fld	fa0, 16(sp)                     # 8-byte Folded Reload
	call	tanh

I have #125070 to fix this

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh woops I didn't see this. I'll close #125650

NumCalls++;

++PrevInstIt;
}

if (NumCalls) {
SmallVector<Type *, 4> V;
for (auto *II : LiveValues) {
auto *ScalarTy = II->getType();
if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
ScalarTy = VectorTy->getElementType();
V.push_back(getWidenedType(ScalarTy, BundleWidth));
SmallVector<Type *, 4> EntriesTypes;
for (const TreeEntry *TE : LiveEntries) {
auto *ScalarTy = TE->getMainOp()->getType();
auto It = MinBWs.find(TE);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You dropped the

if (auto *VectorTy = dyn_cast(ScalarTy))
ScalarTy = VectorTy->getElementType()

Which I think was added for revectorization. Is that needed, do does the MainOp->getType() call normalize?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It was a bug, we need estimate whole vector here. For revec, if the ScalarTy is 4x and there are 2 elements, it estimates the spill cost for vector 2x, but instead it should estimate for vector 8x.

if (It != MinBWs.end())
ScalarTy = IntegerType::get(ScalarTy->getContext(), It->second.first);
EntriesTypes.push_back(getWidenedType(ScalarTy, TE->getVectorFactor()));
}
Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(EntriesTypes);
}

PrevInst = Inst;
Prev = TE;
}

return Cost;
Expand Down
33 changes: 15 additions & 18 deletions llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
Original file line number Diff line number Diff line change
Expand Up @@ -684,27 +684,27 @@ define void @store_blockstrided3(ptr nocapture noundef readonly %x, ptr nocaptur
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM5]]
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[STRIDE]], 1
; CHECK-NEXT: [[IDXPROM11:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM11]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4
; CHECK-NEXT: [[ADD14:%.*]] = or disjoint i32 [[MUL]], 1
; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM11]]
; CHECK-NEXT: [[ADD14:%.*]] = add nsw i32 [[MUL]], 2
; CHECK-NEXT: [[IDXPROM15:%.*]] = sext i32 [[ADD14]] to i64
; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM15]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4
; CHECK-NEXT: [[MUL21:%.*]] = mul nsw i32 [[STRIDE]], 3
; CHECK-NEXT: [[IDXPROM23:%.*]] = sext i32 [[MUL21]] to i64
; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM23]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX24]], align 4
; CHECK-NEXT: [[ADD26:%.*]] = add nsw i32 [[MUL21]], 1
; CHECK-NEXT: [[IDXPROM27:%.*]] = sext i32 [[ADD26]] to i64
; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM27]]
; CHECK-NEXT: [[ARRAYIDX64:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM27]]
; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds nuw i8, ptr [[Y:%.*]], i64 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX35]], align 4
; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM5]]
; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM11]]
; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM11]]
; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM15]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX48]], align 4
; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM15]]
; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM23]]
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX60]], align 4
; CHECK-NEXT: [[ARRAYIDX64:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM27]]
; CHECK-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 [[IDXPROM27]]
; CHECK-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds nuw i8, ptr [[Z:%.*]], i64 4
; CHECK-NEXT: [[MUL73:%.*]] = mul nsw i32 [[TMP3]], [[TMP0]]
; CHECK-NEXT: [[ARRAYIDX76:%.*]] = getelementptr inbounds nuw i8, ptr [[Z]], i64 24
Expand All @@ -715,25 +715,22 @@ define void @store_blockstrided3(ptr nocapture noundef readonly %x, ptr nocaptur
; CHECK-NEXT: [[TMP10:%.*]] = mul nsw <2 x i32> [[TMP8]], [[TMP6]]
; CHECK-NEXT: [[TMP11:%.*]] = mul nsw <2 x i32> [[TMP9]], [[TMP7]]
; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x i32> [[TMP10]], <2 x i32> [[TMP11]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; CHECK-NEXT: [[ARRAYIDX84:%.*]] = getelementptr inbounds nuw i8, ptr [[Z]], i64 28
; CHECK-NEXT: [[MUL81:%.*]] = mul nsw i32 [[TMP4]], [[TMP1]]
; CHECK-NEXT: [[ARRAYIDX82:%.*]] = getelementptr inbounds nuw i8, ptr [[Z]], i64 32
; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i32>, ptr [[ARRAYIDX16]], align 4
; CHECK-NEXT: [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX52]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = mul nsw <2 x i32> [[TMP14]], [[TMP13]]
; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: [[MUL87:%.*]] = mul nsw i32 [[TMP5]], [[TMP2]]
; CHECK-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds nuw i8, ptr [[Z]], i64 44
; CHECK-NEXT: [[ARRAYIDX92:%.*]] = getelementptr inbounds nuw i8, ptr [[Z]], i64 36
; CHECK-NEXT: [[TMP17:%.*]] = load <2 x i32>, ptr [[ARRAYIDX28]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = load <2 x i32>, ptr [[ARRAYIDX64]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = load <2 x i32>, ptr [[ARRAYIDX49]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = load <2 x i32>, ptr [[ARRAYIDX65]], align 4
; CHECK-NEXT: store i32 [[MUL73]], ptr [[Z]], align 4
; CHECK-NEXT: store <4 x i32> [[TMP12]], ptr [[ARRAYIDX72]], align 4
; CHECK-NEXT: store i32 [[MUL81]], ptr [[ARRAYIDX82]], align 4
; CHECK-NEXT: store <2 x i32> [[TMP16]], ptr [[ARRAYIDX76]], align 4
; CHECK-NEXT: store i32 [[MUL81]], ptr [[ARRAYIDX76]], align 4
; CHECK-NEXT: store i32 [[MUL87]], ptr [[ARRAYIDX88]], align 4
; CHECK-NEXT: [[TMP19:%.*]] = mul nsw <2 x i32> [[TMP18]], [[TMP17]]
; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <2 x i32> [[TMP19]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
; CHECK-NEXT: store <2 x i32> [[TMP20]], ptr [[ARRAYIDX92]], align 4
; CHECK-NEXT: [[TMP20:%.*]] = mul nsw <2 x i32> [[TMP15]], [[TMP17]]
; CHECK-NEXT: [[TMP21:%.*]] = mul nsw <2 x i32> [[TMP16]], [[TMP18]]
; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <2 x i32> [[TMP20]], <2 x i32> [[TMP21]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; CHECK-NEXT: store <4 x i32> [[TMP19]], ptr [[ARRAYIDX84]], align 4
; CHECK-NEXT: ret void
;
entry:
Expand Down
Loading