diff --git a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp index 50700bd41e8a2..7e5d0045f5f1f 100644 --- a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp +++ b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp @@ -19,8 +19,10 @@ #include "swift/SIL/SILBuilder.h" #include "swift/SILOptimizer/PassManager/Passes.h" #include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/CFGOptUtils.h" #include "swift/SILOptimizer/Utils/InstOptUtils.h" #include "swift/SILOptimizer/Utils/SILSSAUpdater.h" +#include "swift/SILOptimizer/Utils/ValueLifetime.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/Compiler.h" @@ -410,6 +412,8 @@ class AvailableValueAggregator { /// take. SmallVector insertedInsts; + SmallVector insertedPhiNodes; + public: AvailableValueAggregator(SILInstruction *Inst, MutableArrayRef AvailableValueList, @@ -431,12 +435,6 @@ class AvailableValueAggregator { bool isTopLevel = true); bool canTake(SILType loadTy, unsigned firstElt) const; - /// If as a result of us copying values, we may have unconsumed destroys, find - /// the appropriate location and place the values there. Only used when - /// ownership is enabled. - SingleValueInstruction *addMissingDestroysForCopiedValues(LoadBorrowInst *li, - SILValue newVal); - SingleValueInstruction *addMissingDestroysForCopiedValues(LoadInst *li, SILValue newVal); @@ -455,6 +453,19 @@ class AvailableValueAggregator { return expectedOwnership == AvailableValueExpectedOwnership::Copy; } + /// Given a load_borrow that we have aggregated a new value for, fixup the + /// reference counts of the intermediate copies and phis to ensure that all + /// forwarding operations in the CFG are strongly control equivalent (i.e. run + /// the same number of times). + void fixupOwnership(LoadBorrowInst *lbi, SILValue newVal) { + // Sort inserted insts so we can bisect upon it and mark copy_value as needing + // to be skipped. + sort(insertedInsts); + SmallBitVector instsToSkip(insertedInsts.size()); + addHandOffCopyDestroysForPhis(lbi, newVal, instsToSkip); + addMissingDestroysForCopiedValues(lbi, newVal, instsToSkip); + } + private: SILValue aggregateFullyAvailableValue(SILType loadTy, unsigned firstElt); SILValue aggregateTupleSubElts(TupleType *tt, SILType loadTy, @@ -464,6 +475,15 @@ class AvailableValueAggregator { SILValue handlePrimitiveValue(SILType loadTy, SILValue address, unsigned firstElt); bool isFullyAvailable(SILType loadTy, unsigned firstElt) const; + + + /// If as a result of us copying values, we may have unconsumed destroys, find + /// the appropriate location and place the values there. Only used when + /// ownership is enabled. + void addMissingDestroysForCopiedValues(LoadBorrowInst *li, SILValue newVal, + const SmallBitVector &instsToSkip); + void addHandOffCopyDestroysForPhis(LoadBorrowInst *li, SILValue newVal, + SmallBitVector &instsToSkipOut); }; } // end anonymous namespace @@ -553,18 +573,53 @@ SILValue AvailableValueAggregator::aggregateValues(SILType LoadTy, // Check to see if the requested value is fully available, as an aggregate. // This is a super-common case for single-element structs, but is also a // general answer for arbitrary structs and tuples as well. - if (SILValue Result = aggregateFullyAvailableValue(LoadTy, FirstElt)) + if (SILValue Result = aggregateFullyAvailableValue(LoadTy, FirstElt)) { return Result; + } // If we have a tuple type, then aggregate the tuple's elements into a full // tuple value. - if (TupleType *TT = LoadTy.getAs()) - return aggregateTupleSubElts(TT, LoadTy, Address, FirstElt); + if (TupleType *tupleType = LoadTy.getAs()) { + SILValue result = + aggregateTupleSubElts(tupleType, LoadTy, Address, FirstElt); + if (isTopLevel && + result.getOwnershipKind() == ValueOwnershipKind::Guaranteed) { + SILValue borrowedResult = result; + SILBuilderWithScope builder(&*B.getInsertionPoint(), &insertedInsts); + result = builder.emitCopyValueOperation(Loc, borrowedResult); + SmallVector introducers; + bool foundIntroducers = + getUnderlyingBorrowIntroducingValues(borrowedResult, introducers); + (void)foundIntroducers; + assert(foundIntroducers); + for (auto value : introducers) { + builder.emitEndBorrowOperation(Loc, value.value); + } + } + return result; + } // If we have a struct type, then aggregate the struct's elements into a full // struct value. - if (auto *SD = getFullyReferenceableStruct(LoadTy)) - return aggregateStructSubElts(SD, LoadTy, Address, FirstElt); + if (auto *structDecl = getFullyReferenceableStruct(LoadTy)) { + SILValue result = + aggregateStructSubElts(structDecl, LoadTy, Address, FirstElt); + if (isTopLevel && + result.getOwnershipKind() == ValueOwnershipKind::Guaranteed) { + SILValue borrowedResult = result; + SILBuilderWithScope builder(&*B.getInsertionPoint(), &insertedInsts); + result = builder.emitCopyValueOperation(Loc, borrowedResult); + SmallVector introducers; + bool foundIntroducers = + getUnderlyingBorrowIntroducingValues(borrowedResult, introducers); + (void)foundIntroducers; + assert(foundIntroducers); + for (auto value : introducers) { + builder.emitEndBorrowOperation(Loc, value.value); + } + } + return result; + } // Otherwise, we have a non-aggregate primitive. Load or extract the value. // @@ -610,7 +665,7 @@ AvailableValueAggregator::aggregateFullyAvailableValue(SILType loadTy, // SSA updater to get a value. The reason why this is safe is that we can only // have multiple insertion points if we are storing exactly the same value // implying that we can just copy firstVal at each insertion point. - SILSSAUpdater updater; + SILSSAUpdater updater(&insertedPhiNodes); updater.Initialize(loadTy); Optional singularValue; @@ -653,6 +708,7 @@ AvailableValueAggregator::aggregateFullyAvailableValue(SILType loadTy, // Finally, grab the value from the SSA updater. SILValue result = updater.GetValueInMiddleOfBlock(B.getInsertionBB()); + assert(result.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned)); return result; } @@ -681,6 +737,15 @@ SILValue AvailableValueAggregator::aggregateTupleSubElts(TupleType *TT, FirstElt += NumSubElt; } + // If we are going to use this to promote a borrowed value, insert borrow + // operations. Eventually I am going to do this for everything, but this + // should make it easier to bring up. + if (expectedOwnership == AvailableValueExpectedOwnership::Borrow) { + for (unsigned i : indices(ResultElts)) { + ResultElts[i] = B.emitBeginBorrowOperation(Loc, ResultElts[i]); + } + } + return B.createTuple(Loc, LoadTy, ResultElts); } @@ -708,6 +773,12 @@ SILValue AvailableValueAggregator::aggregateStructSubElts(StructDecl *sd, firstElt += numSubElt; } + if (expectedOwnership == AvailableValueExpectedOwnership::Borrow) { + for (unsigned i : indices(resultElts)) { + resultElts[i] = B.emitBeginBorrowOperation(Loc, resultElts[i]); + } + } + return B.createStruct(Loc, loadTy, resultElts); } @@ -725,8 +796,9 @@ SILValue AvailableValueAggregator::handlePrimitiveValue(SILType loadTy, if (!val) { LoadInst *load = ([&]() { if (B.hasOwnership()) { - return B.createTrivialLoadOr(Loc, address, - LoadOwnershipQualifier::Copy); + SILBuilderWithScope builder(&*B.getInsertionPoint(), &insertedInsts); + return builder.createTrivialLoadOr(Loc, address, + LoadOwnershipQualifier::Copy); } return B.createLoad(Loc, address, LoadOwnershipQualifier::Unqualified); }()); @@ -756,7 +828,7 @@ SILValue AvailableValueAggregator::handlePrimitiveValue(SILType loadTy, // inserted copies at each of these insertion points, we know that we will // never have the same value along all paths unless we have a trivial value // meaning the SSA updater given a non-trivial value must /always/ be used. - SILSSAUpdater updater; + SILSSAUpdater updater(&insertedPhiNodes); updater.Initialize(loadTy); Optional singularValue; @@ -871,24 +943,247 @@ AvailableValueAggregator::addMissingDestroysForCopiedValues(LoadInst *li, return nullptr; } -SingleValueInstruction * -AvailableValueAggregator::addMissingDestroysForCopiedValues(LoadBorrowInst *lbi, - SILValue newVal) { +void AvailableValueAggregator::addHandOffCopyDestroysForPhis(LoadBorrowInst *lbi, SILValue newVal, + SmallBitVector &instsToSkip) { + ValueLifetimeAnalysis::Frontier lifetimeFrontier; + SmallPtrSet visitedBlocks; + SmallVector leakingBlocks; + SmallVector, 8> incomingValues; + auto loc = RegularLocation::getAutoGeneratedLocation(); + + LLVM_DEBUG(llvm::dbgs() << "Inserted Phis!\n"); +#ifndef NDEBUG + for (auto *phi : insertedPhiNodes) { + LLVM_DEBUG(llvm::dbgs() << "Phi: " << *phi); + } +#endif + + // Before we begin, identify the offset for all phis that are intermediate + // phis inserted by the SSA updater. + SmallBitVector intermediatePhiOffsets(insertedPhiNodes.size()); + for (unsigned i : indices(insertedPhiNodes)) { + if (insertedPhiNodes[i]->getSingleUserOfType()) { + intermediatePhiOffsets.set(i); + } + } + + // First go through all of our phi nodes doing the following: + // + // 1. If any of the phi node have a copy_value as an operand, we know that the + // copy_value does not dominate our final definition. In such a case since + // we may not have that the copy_value is post-dominated by the phi, we + // need to insert a copy_value at the phi to allow for post-domination and + // then use the ValueLifetimeChecker to determine the rest of the frontier + // for the value. + // + // 2. If our phi node is used by another phi node, we run into a similar + // problem where we could have that our original phi node does not dominate + // our final definition and may not be strongly control dependent on our + // phi. To work around this problem, we insert at the phi a copy_value to + // allow for the phi to post_dominate its copy and then extend the lifetime + // of the phied value over that copy. + for (unsigned i : indices(insertedPhiNodes)) { + auto *phiArg = insertedPhiNodes[i]; + + // If our phiArg is not owned, continue. No fixes are needed. + if (phiArg->getOwnershipKind() != ValueOwnershipKind::Owned) + continue; + + LLVM_DEBUG(llvm::dbgs() << "Visiting inserted phi: " << *phiArg); + // Otherwise, we have a copy_value that may not be strongly control + // equivalent with our phi node. In such a case, we need to use + // ValueLifetimeAnalysis to lifetime extend the copy such that we can + // produce a new copy_value at the phi. We insert destroys along the + // frontier. + visitedBlocks.clear(); + leakingBlocks.clear(); + incomingValues.clear(); + + phiArg->getIncomingPhiValues(incomingValues); + unsigned phiIndex = phiArg->getIndex(); + for (auto pair : incomingValues) { + SILValue value = pair.second; + + // If we had a non-trivial type with non-owned ownership, we will not see + // a copy_value, so skip them here. + if (value.getOwnershipKind() != ValueOwnershipKind::Owned) + continue; + + // Otherwise, value should be from a copy_value or a phi node. + assert(isa(value) || isa(value)); + + // If we have a copy_value Set a bit for it in instsToSkip so that when we + // start processing insertedInstrs we know that we handled it here + // already. + if (auto *cvi = dyn_cast(value)) { + auto iter = lower_bound(insertedInsts, cvi); + assert(iter != insertedInsts.end() && *iter == cvi); + instsToSkip[std::distance(insertedInsts.begin(), iter)] = true; + + // Then check if our termInst is in the same block as our copy_value. In + // such a case, we can just use the copy_value as our phi's value + // without needing to worry about any issues around control equivalence. + if (pair.first == cvi->getParent()) + continue; + } else { + assert(isa(value)); + } + + // Otherwise, insert a copy_value instruction right before the phi. We use + // that for our actual phi. + auto *termInst = pair.first->getTerminator(); + SILBuilderWithScope builder(termInst); + auto *phiCopy = builder.createCopyValue(loc, value); + termInst->setOperand(phiIndex, phiCopy); + + // Normalize on our base now that we have inserted the copy_value into the + // terminator block. If we have a copy_value, just use it directly as our + // base. We know it isn't in the block of our phiCopy due to a check + // above. + SILInstruction *base = nullptr; + if (auto *cvi = dyn_cast(value)) { + assert(cvi->getParent() != phiCopy->getParent() && + "Just to check invariant from above"); + base = cvi; + } else { + assert(isa(value)); + // If we have a phi argument and our incoming value block is the same as + // our phi block, we know that the copy_value we inserted will only be + // used by the phi. So insert a destroy_value in the incoming value + // block after the copy_value that we inserted and then continue. + if (pair.first == value->getParentBlock()) { + builder.createDestroyValue(loc, value); + continue; + } + + // Otherwise, our copy_value may not be post-dominated by our phi. To + // work around that, we need to insert destroys along the other + // paths. So set base to the first instruction in our argument's block, + // so we can insert destroys for our base. + base = &*value->getParentBlock()->begin(); + } + assert(base && "Should have been assigned"); + + // Then lifetime extend our base over the copy_value. + assert(lifetimeFrontier.empty()); + ValueLifetimeAnalysis analysis(base, phiCopy); + bool foundCriticalEdges = !analysis.computeFrontier( + lifetimeFrontier, ValueLifetimeAnalysis::DontModifyCFG, + &deadEndBlocks); + (void)foundCriticalEdges; + assert(!foundCriticalEdges); + + while (!lifetimeFrontier.empty()) { + auto *insertPoint = lifetimeFrontier.pop_back_val(); + SILBuilderWithScope builder(insertPoint); + builder.createDestroyValue(loc, value); + } + + visitedBlocks.clear(); + leakingBlocks.clear(); + } + + // Then see if our phi is an intermediate phi. If it is an intermediate phi, + // we know that this is not the phi node that is post-dominated by the + // load_borrow and that we will lifetime extend it via the child + // phi. Instead, we need to just ensure that our phi arg does not leak onto + // its set of post-dominating paths, subtracting from that set the path + // through our terminator use. + if (intermediatePhiOffsets[i]) { + continue; + } + + // If we reach this point, then we know that we are a phi node that actually + // dominates our user so we need to lifetime extend it over the + // load_borrow. Thus insert copy_value along the incoming edges and then + // lifetime extend the phi node over the load_borrow. + // + // The linear lifetime checker doesn't care if the passed in load is + // actually a user of our copy_value. What we care about is that the load is + // guaranteed to be in the block where we have reformed the tuple in a + // consuming manner. This means if we add it as the consuming use of the + // copy, we can find the leaking places if any exist. + // + // Then perform the linear lifetime check. If we succeed, continue. We have + // no further work to do. + auto errorKind = ownership::ErrorBehaviorKind::ReturnFalse; + LinearLifetimeChecker checker(visitedBlocks, deadEndBlocks); + auto error = checker.checkValue( + phiArg, {BranchPropagatedUser(&lbi->getAllOperands()[0])}, {}, + errorKind, &leakingBlocks); + + if (!error.getFoundError()) { + // If we did not find an error, then our copy_value must be strongly + // control equivalent as our load_borrow. So just insert a destroy_value + // for the copy_value. + auto next = std::next(lbi->getIterator()); + SILBuilderWithScope builder(next); + builder.emitDestroyValueOperation(next->getLoc(), phiArg); + continue; + } + + // Ok, we found some leaking blocks and potentially a loop. If we do not + // find a loop, insert the destroy_value after the load_borrow. We do not do + // this if we found a loop since our leaking blocks will lifetime extend the + // value over the loop. + if (!error.getFoundOverConsume()) { + auto next = std::next(lbi->getIterator()); + SILBuilderWithScope builder(next); + builder.emitDestroyValueOperation(next->getLoc(), phiArg); + } + + // Ok, we found some leaking blocks. Insert destroys at the beginning of + // these blocks for our copy_value. + for (auto *bb : leakingBlocks) { + SILBuilderWithScope b(bb->begin()); + b.emitDestroyValueOperation(loc, phiArg); + } + } + // Clear the phi node array now that we are done. + insertedPhiNodes.clear(); +} + +void AvailableValueAggregator::addMissingDestroysForCopiedValues( + LoadBorrowInst *lbi, SILValue newVal, + const SmallBitVector &instsToSkip) { assert(B.hasOwnership() && "We assume this is only called if we have ownership"); SmallPtrSet visitedBlocks; SmallVector leakingBlocks; - bool foundLoop = false; auto loc = RegularLocation::getAutoGeneratedLocation(); - while (!insertedInsts.empty()) { - auto *cvi = dyn_cast(insertedInsts.pop_back_val()); + + for (unsigned i : indices(insertedInsts)) { + // If we already handled this instruction above when handling phi nodes, + // just continue. + if (instsToSkip[i]) + continue; + + // Otherwise, see if this is a load [copy]. It if it a load [copy], then we + // know that the load [copy] must be in the load block meaing we can just + // put a destroy_value /after/ the load_borrow to ensure that the value + // lives long enough for us to copy_value it or a derived value for the + // begin_borrow. + if (auto *li = dyn_cast(insertedInsts[i])) { + if (li->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) { + assert(li->getParent() == lbi->getParent()); + auto next = std::next(lbi->getIterator()); + SILBuilderWithScope builder(next); + builder.emitDestroyValueOperation(next->getLoc(), li); + continue; + } + } + + // Our copy_value may have been unset above if it was used by a phi + // (implying it does not dominate our final user). + auto *cvi = dyn_cast(insertedInsts[i]); if (!cvi) continue; // Clear our state. visitedBlocks.clear(); leakingBlocks.clear(); + // The linear lifetime checker doesn't care if the passed in load is // actually a user of our copy_value. What we care about is that the load is // guaranteed to be in the block where we have reformed the tuple in a @@ -902,51 +1197,34 @@ AvailableValueAggregator::addMissingDestroysForCopiedValues(LoadBorrowInst *lbi, auto error = checker.checkValue( cvi, {BranchPropagatedUser(&lbi->getAllOperands()[0])}, {}, errorKind, &leakingBlocks); - if (!error.getFoundError()) + + if (!error.getFoundError()) { + // If we did not find an error, then our copy_value must be strongly + // control equivalent as our load_borrow. So just insert a destroy_value + // for the copy_value. + auto next = std::next(lbi->getIterator()); + SILBuilderWithScope builder(next); + builder.emitDestroyValueOperation(next->getLoc(), cvi); continue; + } - // Ok, we found some leaking blocks. Since we are using the linear lifetime - // checker with memory, we do not have any guarantees that the store is out - // side of a loop and a load is in a loop. In such a case, we want to - // replace the load with a copy_value. - foundLoop |= error.getFoundOverConsume(); + // Ok, we found some leaking blocks and potentially a loop. If we do not + // find a loop, insert the destroy_value after the load_borrow. We do not do + // this if we found a loop since our leaking blocks will lifetime extend the + // value over the loop. + if (!error.getFoundOverConsume()) { + auto next = std::next(lbi->getIterator()); + SILBuilderWithScope builder(next); + builder.emitDestroyValueOperation(next->getLoc(), cvi); + } - // Ok, we found some leaking blocks. Insert destroys at the - // beginning of these blocks for our copy_value. + // Ok, we found some leaking blocks. Insert destroys at the beginning of + // these blocks for our copy_value. for (auto *bb : leakingBlocks) { SILBuilderWithScope b(bb->begin()); b.emitDestroyValueOperation(loc, cvi); } } - - // If we didn't find a loop, we are done, just return svi to get RAUWed. - if (!foundLoop) { - // If we had a load_borrow, we have created an extra copy that we are going - // to borrow at the load point. This means we need to handle the destroying - // of the value along paths reachable from the load_borrow. Luckily that - // will exactly be after the end_borrows of the load_borrow. - for (auto *use : lbi->getUses()) { - if (auto *ebi = dyn_cast(use->getUser())) { - auto next = std::next(ebi->getIterator()); - SILBuilderWithScope(next).emitDestroyValueOperation(ebi->getLoc(), - newVal); - } - } - return lbi; - } - - // If we found a loop, then we know that our leaking blocks are the exiting - // blocks of the loop and the value has been lifetime extended over the loop. - // If we have a load_borrow, we create a begin_borrow for the end_borrows in - // the loop. - newVal = SILBuilderWithScope(lbi).createBeginBorrow(lbi->getLoc(), newVal); - - lbi->replaceAllUsesWith(newVal); - SILValue addr = lbi->getOperand(); - lbi->eraseFromParent(); - if (auto *addrI = addr->getDefiningInstruction()) - recursivelyDeleteTriviallyDeadInstructions(addrI); - return nullptr; } //===----------------------------------------------------------------------===// @@ -1704,8 +1982,8 @@ bool AllocOptimize::promoteLoadCopy(LoadInst *li) { AvailableValueExpectedOwnership::Copy); SILValue newVal = agg.aggregateValues(loadTy, li->getOperand(), firstElt); - LLVM_DEBUG(llvm::dbgs() << " *** Promoting load: " << *li << "\n"); - LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal << "\n"); + LLVM_DEBUG(llvm::dbgs() << " *** Promoting load: " << *li); + LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal); ++NumLoadPromoted; // If we did not have ownership, we did not insert extra copies at our stores, @@ -1793,6 +2071,8 @@ bool AllocOptimize::promoteLoadBorrow(LoadBorrowInst *lbi) { if (!result.hasValue()) return false; + ++NumLoadPromoted; + SILType loadTy = result->first; unsigned firstElt = result->second; @@ -1804,33 +2084,39 @@ bool AllocOptimize::promoteLoadBorrow(LoadBorrowInst *lbi) { AvailableValueExpectedOwnership::Borrow); SILValue newVal = agg.aggregateValues(loadTy, lbi->getOperand(), firstElt); - LLVM_DEBUG(llvm::dbgs() << " *** Promoting load: " << *lbi << "\n"); - LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal << "\n"); + LLVM_DEBUG(llvm::dbgs() << " *** Promoting load: " << *lbi); + LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal); - // If we inserted any copies, we created the copies at our stores. We know - // that in our load block, we will reform the aggregate as appropriate at the - // load implying that the value /must/ be fully consumed. If we promoted a +0 - // value, we created dominating destroys along those paths. Thus any leaking - // blocks that we may have can be found by performing a linear lifetime check - // over all copies that we found using the load as the "consuming uses" (just - // for the purposes of identifying the consuming block). - auto *oldLoad = agg.addMissingDestroysForCopiedValues(lbi, newVal); - - ++NumLoadPromoted; + // If we inserted any copies, we created the copies at our + // stores. We know that in our load block, we will reform the + // aggregate as appropriate, will borrow the value there and give us + // a whole pristine new value. Now in this routine, we go through + // all of the copies and phis that we inserted and ensure that: + // + // 1. Phis are always strongly control equivalent to the copies that + // produced their incoming values. + // + // 2. All intermediate copies are properly lifetime extended to the + // load block and all leaking blocks are filled in as appropriate + // with destroy_values. + agg.fixupOwnership(lbi, newVal); - // If we are returned the load, eliminate it. Otherwise, it was already - // handled for us... so return true. - if (!oldLoad) - return true; + // Now that we have fixed up the lifetimes of all of our incoming copies so + // that they are alive over the load point, copy, borrow newVal and insert + // destroy_value after the end_borrow and then RAUW. + SILBuilderWithScope builder(lbi); + SILValue copiedVal = builder.emitCopyValueOperation(lbi->getLoc(), newVal); + newVal = builder.createBeginBorrow(lbi->getLoc(), copiedVal); - // If our load was a +0 value, borrow the value and the RAUW. We reuse the - // end_borrows of our load_borrow. - newVal = - SILBuilderWithScope(oldLoad).createBeginBorrow(oldLoad->getLoc(), newVal); + for (auto *ebi : lbi->getUsersOfType()) { + auto next = std::next(ebi->getIterator()); + SILBuilderWithScope(next).emitDestroyValueOperation(ebi->getLoc(), + copiedVal); + } - oldLoad->replaceAllUsesWith(newVal); - SILValue addr = oldLoad->getOperand(0); - oldLoad->eraseFromParent(); + lbi->replaceAllUsesWith(newVal); + SILValue addr = lbi->getOperand(); + lbi->eraseFromParent(); if (auto *addrI = addr->getDefiningInstruction()) recursivelyDeleteTriviallyDeadInstructions(addrI); return true; @@ -1917,8 +2203,8 @@ void AllocOptimize::promoteDestroyAddr( ++NumDestroyAddrPromoted; - LLVM_DEBUG(llvm::dbgs() << " *** Promoting destroy_addr: " << *dai << "\n"); - LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal << "\n"); + LLVM_DEBUG(llvm::dbgs() << " *** Promoting destroy_addr: " << *dai); + LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal); SILBuilderWithScope(dai).emitDestroyValueOperation(dai->getLoc(), newVal); dai->eraseFromParent(); @@ -1945,8 +2231,8 @@ void AllocOptimize::promoteLoadTake( ++NumLoadTakePromoted; - LLVM_DEBUG(llvm::dbgs() << " *** Promoting load_take: " << *li << "\n"); - LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal << "\n"); + LLVM_DEBUG(llvm::dbgs() << " *** Promoting load_take: " << *li); + LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal); // Then perform the RAUW. li->replaceAllUsesWith(newVal); @@ -2223,8 +2509,8 @@ static bool optimizeMemoryAccesses(SILFunction &fn) { continue; } - LLVM_DEBUG(llvm::dbgs() << "*** PMO Optimize Memory Accesses looking at: " - << *alloc << "\n"); + LLVM_DEBUG(llvm::dbgs() + << "*** PMO Optimize Memory Accesses looking at: " << *alloc); PMOMemoryObjectInfo memInfo(alloc); // Set up the datastructure used to collect the uses of the allocation. @@ -2266,8 +2552,8 @@ static bool eliminateDeadAllocations(SILFunction &fn) { } LLVM_DEBUG(llvm::dbgs() - << "*** PMO Dead Allocation Elimination looking at: " << *alloc - << "\n"); + << "*** PMO Dead Allocation Elimination looking at: " + << *alloc); PMOMemoryObjectInfo memInfo(alloc); // Set up the datastructure used to collect the uses of the allocation. diff --git a/test/SILOptimizer/predictable_memaccess_opts.sil b/test/SILOptimizer/predictable_memaccess_opts.sil index cf92559a25b39..192723cfbd731 100644 --- a/test/SILOptimizer/predictable_memaccess_opts.sil +++ b/test/SILOptimizer/predictable_memaccess_opts.sil @@ -21,9 +21,12 @@ struct IntPair { var y: Builtin.Int32 } -sil @guaranteed_object_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () +sil @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () sil @intpair_user : $@convention(thin) (IntPair) -> () +sil @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () sil @inout_int32_user : $@convention(thin) (@inout Builtin.Int32) -> () +sil @get_object : $@convention(thin) () -> @owned Builtin.NativeObject +sil @nativeobject_tuple_user : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () /// Needed to avoid tuple scalarization code in the use gatherer. struct NativeObjectAndTuple { @@ -545,13 +548,36 @@ bb0(%0 : @owned $Builtin.NativeObject): // CHECK-LABEL: sil [ossa] @load_borrow_promotion : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { // CHECK: bb0([[ARG:%.*]] : +// Block where we have our store and do our lifetime extending copy_value. // CHECK: [[STACK:%.*]] = alloc_stack $Builtin.NativeObject // CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG]] // CHECK: store [[ARG]] to [init] [[STACK]] -// CHECK: [[BORROWED_ARG_COPY:%.*]] = begin_borrow [[ARG_COPY]] +// CHECK: br bb1 +// +// Our load block. Here, we insert our copy_value + begin_borrow that is +// associated with the load_borrow. We can not use the original copy since even +// though in this situation we know that our copy/borrow would be strongly +// control equivalent, this is not always true. To simplify the algorithm, we +// always insert the copy here. We insert a destroy_value to end the lifetime of +// ARG_COPY since we do not have a loop here. +// +// CHECK: bb1: +// CHECK: [[CONTROL_EQUIVALENT_ARG_COPY:%.*]] = copy_value [[ARG_COPY]] +// CHECK: [[BORROWED_ARG_COPY:%.*]] = begin_borrow [[CONTROL_EQUIVALENT_ARG_COPY]] +// CHECK: destroy_value [[ARG_COPY]] +// CHECK: br bb2 +// +// The block where the load_borrow is actually used. We destroy the control +// equivalent arg copy here after the end_borrow. +// +// CHECK: bb2: // CHECK: [[RESULT:%.*]] = copy_value [[BORROWED_ARG_COPY]] // CHECK: end_borrow [[BORROWED_ARG_COPY]] -// CHECK: destroy_value [[ARG_COPY]] +// CHECK: destroy_value [[CONTROL_EQUIVALENT_ARG_COPY]] +// CHECK: br bb3 +// +// The block after the load_borrow is ever used. +// CHECK: bb3: // CHECK: destroy_addr [[STACK]] // CHECK: return [[RESULT]] // CHECK: } // end sil function 'load_borrow_promotion' @@ -559,9 +585,18 @@ sil [ossa] @load_borrow_promotion : $@convention(thin) (@owned Builtin.NativeObj bb0(%0 : @owned $Builtin.NativeObject): %1 = alloc_stack $Builtin.NativeObject store %0 to [init] %1 : $*Builtin.NativeObject + br bb1 + +bb1: %2 = load_borrow %1 : $*Builtin.NativeObject + br bb2 + +bb2: %3 = copy_value %2 : $Builtin.NativeObject end_borrow %2 : $Builtin.NativeObject + br bb3 + +bb3: destroy_addr %1 : $*Builtin.NativeObject dealloc_stack %1 : $*Builtin.NativeObject return %3 : $Builtin.NativeObject @@ -579,7 +614,7 @@ bb0(%0 : @owned $NativeObjectPair): bb2: %3 = load_borrow %2 : $*Builtin.NativeObject - %4 = function_ref @guaranteed_object_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %4 = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () apply %4(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () end_borrow %3 : $Builtin.NativeObject br bb2 @@ -597,7 +632,7 @@ bb0(%0 : @owned $NativeObjectPair): bb2: %3 = load_borrow %2 : $*Builtin.NativeObject - %4 = function_ref @guaranteed_object_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %4 = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () apply %4(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () end_borrow %3 : $Builtin.NativeObject cond_br undef, bb3, bb4 @@ -662,17 +697,21 @@ bb9: // CHECK: ([[TUP_0:%.*]], [[TUP_1:%.*]]) = destructure_tuple [[TUP]] // CHECK: [[TUP_0_COPY:%.*]] = copy_value [[TUP_0]] // CHECK: [[TUP_1_COPY:%.*]] = copy_value [[TUP_1]] -// CHECK: [[BORROWED_TUP_0_COPY:%.*]] = begin_borrow [[TUP_0_COPY]] -// CHECK: [[BORROWED_TUP_1_COPY:%.*]] = begin_borrow [[TUP_1_COPY]] +// CHECK: [[CONTROL_EQUIVALENT_TUP_0_COPY:%.*]] = copy_value [[TUP_0_COPY]] +// CHECK: [[BORROWED_TUP_0_COPY:%.*]] = begin_borrow [[CONTROL_EQUIVALENT_TUP_0_COPY]] +// CHECK: destroy_value [[TUP_0_COPY]] +// CHECK: [[CONTROL_EQUIVALENT_TUP_1_COPY:%.*]] = copy_value [[TUP_1_COPY]] +// CHECK: [[BORROWED_TUP_1_COPY:%.*]] = begin_borrow [[CONTROL_EQUIVALENT_TUP_1_COPY]] +// CHECK: destroy_value [[TUP_1_COPY]] // CHECK: [[BORROWED_TUP:%.*]] = tuple ([[BORROWED_TUP_0_COPY]] : ${{.*}}, [[BORROWED_TUP_1_COPY]] : // CHECK: [[TUP_EXT_1:%.*]] = tuple_extract [[BORROWED_TUP]] : // CHECK: [[TUP_EXT_2:%.*]] = tuple_extract [[BORROWED_TUP]] : // CHECK: apply {{%.*}}([[TUP_EXT_1]]) // CHECK: apply {{%.*}}([[TUP_EXT_2]]) // CHECK: end_borrow [[BORROWED_TUP_0_COPY]] -// CHECK: destroy_value [[TUP_0_COPY]] +// CHECK: destroy_value [[CONTROL_EQUIVALENT_TUP_0_COPY]] // CHECK: end_borrow [[BORROWED_TUP_1_COPY]] -// CHECK: destroy_value [[TUP_1_COPY]] +// CHECK: destroy_value [[CONTROL_EQUIVALENT_TUP_1_COPY]] // CHECK: } // end sil function 'load_borrow_tuple_scalarize' sil [canonical] [ossa] @load_borrow_tuple_scalarize : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): @@ -683,7 +722,7 @@ bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): %4 = load_borrow %2 : $*(Builtin.NativeObject, Builtin.NativeObject) %5 = tuple_extract %4 : $(Builtin.NativeObject, Builtin.NativeObject), 0 %6 = tuple_extract %4 : $(Builtin.NativeObject, Builtin.NativeObject), 1 - %7 = function_ref @guaranteed_object_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %7 = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () apply %7(%5) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () apply %7(%6) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () end_borrow %4 : $(Builtin.NativeObject, Builtin.NativeObject) @@ -694,12 +733,12 @@ bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject): return %9999 : $() } -// CHECK-LABEL: sil [ossa] @multiple_available_values_diamond_followed_by_loop_trivial : $@convention(thin) (Builtin.Int32, Builtin.Int32) -> () { +// CHECK-LABEL: sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial : $@convention(thin) (Builtin.Int32, Builtin.Int32) -> () { // CHECK: bb0( // CHECK-NOT: load [trivial] %{{[0-9][0-9]*}} : $*IntPair // CHECK-NOT: bb{{[0-9][0-9]*}}( -// CHECK: } // end sil function 'multiple_available_values_diamond_followed_by_loop_trivial' -sil [ossa] @multiple_available_values_diamond_followed_by_loop_trivial : $@convention(thin) (Builtin.Int32, Builtin.Int32) -> () { +// CHECK: } // end sil function 'trivial_multiple_available_values_diamond_followed_by_loop_trivial' +sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial : $@convention(thin) (Builtin.Int32, Builtin.Int32) -> () { bb0(%0a : $Builtin.Int32, %0b : $Builtin.Int32): %func = function_ref @intpair_user : $@convention(thin) (IntPair) -> () %1 = alloc_stack $IntPair @@ -738,12 +777,12 @@ bb7: return %9999 : $() } -// CHECK-LABEL: sil [ossa] @multiple_available_values_diamond_followed_by_loop_trivial_reload : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { +// CHECK-LABEL: sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial_reload : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { // CHECK: bb0( // CHECK-NOT: load [trivial] %{{[0-9][0-9]*}} : $*IntPair // CHECK-NOT: bb{{[0-9][0-9]*}}( -// CHECK: } // end sil function 'multiple_available_values_diamond_followed_by_loop_trivial_reload' -sil [ossa] @multiple_available_values_diamond_followed_by_loop_trivial_reload : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { +// CHECK: } // end sil function 'trivial_multiple_available_values_diamond_followed_by_loop_trivial_reload' +sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial_reload : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { bb0(%0a : $Builtin.Int32, %0b : $Builtin.Int32, %0c : $Builtin.Int32): %func = function_ref @intpair_user : $@convention(thin) (IntPair) -> () %1 = alloc_stack $IntPair @@ -782,7 +821,10 @@ bb7: return %9999 : $() } -sil [ossa] @multiple_available_values_diamond_followed_by_loop_trivial_store_in_loop : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { +// CHECK-LABEL: sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial_store_in_loop : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { +// CHECK-NOT: load +// CHECK: } // end sil function 'trivial_multiple_available_values_diamond_followed_by_loop_trivial_store_in_loop' +sil [ossa] @trivial_multiple_available_values_diamond_followed_by_loop_trivial_store_in_loop : $@convention(thin) (Builtin.Int32, Builtin.Int32, Builtin.Int32) -> () { bb0(%0a : $Builtin.Int32, %0b : $Builtin.Int32, %0c : $Builtin.Int32): %func = function_ref @intpair_user : $@convention(thin) (IntPair) -> () %1 = alloc_stack $IntPair @@ -820,4 +862,481 @@ bb7: dealloc_stack %1 : $*IntPair %9999 = tuple() return %9999 : $() -} \ No newline at end of file +} + +// CHECK-LABEL: sil [ossa] @multiple_available_values_diamond_followed_by_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +// CHECK: bb0( +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'multiple_available_values_diamond_followed_by_loop' +sil [ossa] @multiple_available_values_diamond_followed_by_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load_borrow %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_available_values_diamond_followed_by_loop_reload : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'multiple_available_values_diamond_followed_by_loop_reload' +sil [ossa] @multiple_available_values_diamond_followed_by_loop_reload : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @owned Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @owned $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0c to [init] %1b : $*Builtin.NativeObject + destroy_value %0b : $Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + destroy_value %0c : $Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load_borrow %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_available_values_diamond_followed_by_loop_store_in_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'multiple_available_values_diamond_followed_by_loop_store_in_loop' +sil [ossa] @multiple_available_values_diamond_followed_by_loop_store_in_loop : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + %0bhat = copy_value %0b : $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb2: + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + br bb3 + +bb3: + br bb4 + +bb4: + br bb5 + +bb5: + %2 = load_borrow %1 : $*NativeObjectPair + cond_br undef, bb6, bb7 + +bb6: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + destroy_addr %1b : $*Builtin.NativeObject + %0bhat2 = copy_value %0bhat : $Builtin.NativeObject + store %0bhat2 to [init] %1b : $*Builtin.NativeObject + br bb5 + +bb7: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + destroy_value %0bhat : $Builtin.NativeObject + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadborrow : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_loadborrow' +sil [canonical] [ossa] @loop_carry_loadborrow : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %1 = alloc_stack $Builtin.NativeObject + store %0 to [init] %1 : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %2 = load_borrow %1 : $*Builtin.NativeObject + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*Builtin.NativeObject + dealloc_stack %1 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadborrow_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_loadborrow_2' +sil [canonical] [ossa] @loop_carry_loadborrow_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %1 = alloc_stack $Builtin.NativeObject + store %0 to [init] %1 : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %2 = load_borrow %1 : $*Builtin.NativeObject + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*Builtin.NativeObject + dealloc_stack %1 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadborrow_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_loadborrow_3' +sil [canonical] [ossa] @loop_carry_loadborrow_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobject_tuple_user : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + %1 = alloc_stack $(Builtin.NativeObject, Builtin.NativeObject) + %1a = tuple_element_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject), 0 + %1b = tuple_element_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject), 1 + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %0ccopy = copy_value %0c : $Builtin.NativeObject + destroy_addr %1a : $*Builtin.NativeObject + store %0ccopy to [init] %1a : $*Builtin.NativeObject + %2 = load_borrow %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + end_borrow %2 : $(Builtin.NativeObject, Builtin.NativeObject) + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed (Builtin.NativeObject, Builtin.NativeObject)) -> () + end_borrow %2 : $(Builtin.NativeObject, Builtin.NativeObject) + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + dealloc_stack %1 : $*(Builtin.NativeObject, Builtin.NativeObject) + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [canonical] [ossa] @loop_carry_loadborrow_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_loadborrow_4' +sil [canonical] [ossa] @loop_carry_loadborrow_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { +bb0(%0a : @owned $Builtin.NativeObject, %0b : @owned $Builtin.NativeObject, %0c : @guaranteed $Builtin.NativeObject): + %func = function_ref @nativeobjectpair_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () + %1 = alloc_stack $NativeObjectPair + %1a = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x + %1b = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.y + store %0a to [init] %1a : $*Builtin.NativeObject + store %0b to [init] %1b : $*Builtin.NativeObject + cond_br undef, bb1, bb7 + +bb1: + br bb2 + +bb2: + br bb3 + +bb3: + %0ccopy = copy_value %0c : $Builtin.NativeObject + destroy_addr %1a : $*Builtin.NativeObject + store %0ccopy to [init] %1a : $*Builtin.NativeObject + %2 = load_borrow %1 : $*NativeObjectPair + cond_br undef, bb4, bb5 + +bb4: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + br bb2 + +bb5: + apply %func(%2) : $@convention(thin) (@guaranteed NativeObjectPair) -> () + end_borrow %2 : $NativeObjectPair + br bb6 + +bb6: + br bb8 + +bb7: + br bb8 + +bb8: + destroy_addr %1 : $*NativeObjectPair + dealloc_stack %1 : $*NativeObjectPair + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @loop_carry_load_borrow_phi_not_control_equivalent : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_load_borrow_phi_not_control_equivalent' +sil [ossa] @loop_carry_load_borrow_phi_not_control_equivalent : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%arg : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %0 = alloc_stack $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + cond_br undef, bb3, bb4 + +bb2: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb5 + +bb3: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb6 + +bb4: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb7 + +bb5: + br bb8 + +bb6: + br bb8 + +bb7: + br bbPreLoopHeader + +bb8: + br bbPreLoopHeader + +bbPreLoopHeader: + br bbLoop + +bbLoop: + br bbLoop1 + +bbLoop1: + br bbLoop2 + +bbLoop2: + %2 = load_borrow %0 : $*Builtin.NativeObject + cond_br undef, bbLoop3, bbLoop4 + +bbLoop3: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bbLoop2 + +bbLoop4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bbEnd + +bbEnd: + destroy_addr %0 : $*Builtin.NativeObject + dealloc_stack %0 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// In this case, we will have that we need to separately lifetime extend our phi +// node's copy to prevent leaks along the edge skipping the loop. +// CHECK-LABEL: sil [ossa] @loop_carry_load_borrow_phi_not_control_equivalent_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loop_carry_load_borrow_phi_not_control_equivalent_2' +sil [ossa] @loop_carry_load_borrow_phi_not_control_equivalent_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%arg : @owned $Builtin.NativeObject): + %func = function_ref @nativeobject_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %0 = alloc_stack $Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + cond_br undef, bb3, bb4 + +bb2: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb5 + +bb3: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb6 + +bb4: + store %arg to [init] %0 : $*Builtin.NativeObject + br bb7 + +bb5: + br bb8a + +bb6: + br bb8a + +bb7: + br bbPreLoopHeader + +bb8a: + br bb8 + +bb8: + cond_br undef, bbPreLoopHeader1, bbSkipLoop + +bbPreLoopHeader: + br bbLoop + +bbPreLoopHeader1: + br bbLoop + +bbLoop: + br bbLoop1 + +bbLoop1: + br bbLoop2 + +bbLoop2: + %2 = load_borrow %0 : $*Builtin.NativeObject + br bbLoop6 + +bbLoop6: + cond_br undef, bbLoop3, bbLoop4 + +bbLoop3: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bbLoop5 + +bbLoop5: + br bbLoop2 + +bbLoop4: + apply %func(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $Builtin.NativeObject + br bbEnd + +bbSkipLoop: + br bbEnd + +bbEnd: + destroy_addr %0 : $*Builtin.NativeObject + dealloc_stack %0 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} +