From 7cea31ba3c055ca316b344b3ca8eff8f35a5baa1 Mon Sep 17 00:00:00 2001 From: Meghana Gupta Date: Wed, 7 Oct 2020 13:56:38 -0700 Subject: [PATCH 1/5] SILMem2Reg: Don't add dead values as phi arguments A dealloc_stack ends the lifetime of an alloc_stack on a path. We don't have to pass RunningVal beyond the dealloc_stack as phi argument to the post dominating block. --- lib/SILOptimizer/Transforms/SILMem2Reg.cpp | 6 ++++- test/SILOptimizer/mem2reg.sil | 27 ++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp index 332af38b3f2b3..e72abfb68a8fb 100644 --- a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp +++ b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp @@ -468,8 +468,12 @@ StackAllocationPromoter::promoteAllocationInBlock(SILBasicBlock *BB) { // Stop on deallocation. if (auto *DSI = dyn_cast(Inst)) { - if (DSI->getOperand() == ASI) + if (DSI->getOperand() == ASI) { + // Reset LastStore. + // So that we don't pass RunningVal as a phi arg beyond dealloc_stack + LastStore = nullptr; break; + } } } if (LastStore) { diff --git a/test/SILOptimizer/mem2reg.sil b/test/SILOptimizer/mem2reg.sil index a77e77e4c2552..8373fbee77f66 100644 --- a/test/SILOptimizer/mem2reg.sil +++ b/test/SILOptimizer/mem2reg.sil @@ -465,3 +465,30 @@ bb0(%0 : $Optional): %4 = tuple() return %4 : $() } + +// CHECK-LABEL: sil @multi_basic_block_use_on_one_path : +// CHECK-NOT: alloc_stack +// CHECK:bb2: +// CHECK: br bb3(undef : $Klass) +// CHECK-LABEL: } // end sil function 'multi_basic_block_use_on_one_path' +sil @multi_basic_block_use_on_one_path : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : $Klass): + %1 = alloc_stack $Klass + cond_br undef, bb1, bb2 + +bb1: + dealloc_stack %1 : $*Klass + strong_release %0 : $Klass + br bb3 + +bb2: + store %0 to %1 : $*Klass + %7 = load %1 : $*Klass + dealloc_stack %1 : $*Klass + strong_release %7 : $Klass + br bb3 + +bb3: + %11 = tuple () + return %11 : $() +} From eff6b66906f094026afe1a61dae44d4eeba2709b Mon Sep 17 00:00:00 2001 From: Meghana Gupta Date: Wed, 7 Oct 2020 11:42:31 -0700 Subject: [PATCH 2/5] Add hasOwnership assertion in SILBuilder while creating unchecked_value_cast --- include/swift/SIL/SILBuilder.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index 83d93da9059c1..484f6df02e79a 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -1125,6 +1125,7 @@ class SILBuilder { UncheckedValueCastInst *createUncheckedValueCast(SILLocation Loc, SILValue Op, SILType Ty) { + assert(hasOwnership()); return insert(UncheckedValueCastInst::create( getSILDebugLocation(Loc), Op, Ty, getFunction(), C.OpenedArchetypes)); } From 83474707eea654d81bc49565f952c84cc19508c9 Mon Sep 17 00:00:00 2001 From: Meghana Gupta Date: Wed, 9 Sep 2020 11:08:42 -0700 Subject: [PATCH 3/5] Enable SILMem2Reg for OSSA SILMem2Reg has roughly 2 central algorithms, removal of alloc_stack with uses in a single block vs multiple blocks. While replacing loads and stores to the stack location in each of the 2 algorithms, new handling of qualifiers like [assign], [copy] and [take] which are new to OSSA are needed. Also Disable SILMem2Reg when we see this pattern: load [take] (struct_element_addr/tuple_element_addr %ASI) Convert SILMem2Reg tests into ossa And add new SILMem2Reg tests for non-trivial values. Thanks to zoecarver for additional tests. --- lib/SILOptimizer/Transforms/SILMem2Reg.cpp | 188 +++++- test/SILOptimizer/mem2reg.sil | 4 +- test/SILOptimizer/mem2reg_liveness_ossa.sil | 65 ++ test/SILOptimizer/mem2reg_ossa.sil | 489 ++++++++++++++ test/SILOptimizer/mem2reg_ossa_nontrivial.sil | 615 ++++++++++++++++++ .../mem2reg_ossa_nontrivial_casts.sil | 81 +++ test/SILOptimizer/mem2reg_resilient_ossa.sil | 28 + test/SILOptimizer/mem2reg_simple_ossa.sil | 410 ++++++++++++ .../SILOptimizer/mem2reg_unreachable_ossa.sil | 68 ++ 9 files changed, 1924 insertions(+), 24 deletions(-) create mode 100644 test/SILOptimizer/mem2reg_liveness_ossa.sil create mode 100644 test/SILOptimizer/mem2reg_ossa.sil create mode 100644 test/SILOptimizer/mem2reg_ossa_nontrivial.sil create mode 100644 test/SILOptimizer/mem2reg_ossa_nontrivial_casts.sil create mode 100644 test/SILOptimizer/mem2reg_resilient_ossa.sil create mode 100644 test/SILOptimizer/mem2reg_simple_ossa.sil create mode 100644 test/SILOptimizer/mem2reg_unreachable_ossa.sil diff --git a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp index e72abfb68a8fb..e0b43fb8e24ac 100644 --- a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp +++ b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp @@ -183,23 +183,43 @@ class MemoryToRegisters { /// Returns true if \p I is an address of a LoadInst, skipping struct and /// tuple address projections. Sets \p singleBlock to null if the load (or /// it's address is not in \p singleBlock. -static bool isAddressForLoad(SILInstruction *I, SILBasicBlock *&singleBlock) { - - if (isa(I)) +/// This function looks for these patterns: +/// 1. (load %ASI) +/// 2. (load (struct_element_addr/tuple_element_addr/unchecked_addr_cast %ASI)) +static bool isAddressForLoad(SILInstruction *I, SILBasicBlock *&singleBlock, + bool &hasGuaranteedOwnership) { + + if (isa(I)) { + // SILMem2Reg is disabled when we find: + // (load [take] (struct_element_addr/tuple_element_addr %ASI)) + // struct_element_addr and tuple_element_addr are lowered into + // struct_extract and tuple_extract and these SIL instructions have a + // guaranteed ownership. For replacing load's users, we need an owned value. + // We will need a new copy and destroy of the running val placed after the + // last use. This is not implemented currently. + if (hasGuaranteedOwnership && cast(I)->getOwnershipQualifier() == + LoadOwnershipQualifier::Take) { + return false; + } return true; + } if (!isa(I) && !isa(I) && !isa(I)) return false; - + + if (isa(I) || isa(I)) { + hasGuaranteedOwnership = true; + } + // Recursively search for other (non-)loads in the instruction's uses. for (auto UI : cast(I)->getUses()) { SILInstruction *II = UI->getUser(); if (II->getParent() != singleBlock) singleBlock = nullptr; - - if (!isAddressForLoad(II, singleBlock)) - return false; + + if (!isAddressForLoad(II, singleBlock, hasGuaranteedOwnership)) + return false; } return true; } @@ -233,7 +253,8 @@ static bool isCaptured(AllocStackInst *ASI, bool &inSingleBlock) { singleBlock = nullptr; // Loads are okay. - if (isAddressForLoad(II, singleBlock)) + bool hasGuaranteedOwnership = false; + if (isAddressForLoad(II, singleBlock, hasGuaranteedOwnership)) continue; // We can store into an AllocStack (but not the pointer). @@ -348,6 +369,8 @@ static void collectLoads(SILInstruction *I, SmallVectorImpl &Loads) static void replaceLoad(LoadInst *LI, SILValue val, AllocStackInst *ASI) { ProjectionPath projections(val->getType()); SILValue op = LI->getOperand(); + SILBuilderWithScope builder(LI); + while (op != ASI) { assert(isa(op) || isa(op) || isa(op)); @@ -355,14 +378,45 @@ static void replaceLoad(LoadInst *LI, SILValue val, AllocStackInst *ASI) { projections.push_back(Projection(Inst)); op = Inst->getOperand(0); } - SILBuilder builder(LI); + + SmallVector borrowedVals; for (auto iter = projections.rbegin(); iter != projections.rend(); ++iter) { const Projection &projection = *iter; + assert(projection.getKind() == ProjectionKind::BitwiseCast || + projection.getKind() == ProjectionKind::Struct || + projection.getKind() == ProjectionKind::Tuple); + + // struct_extract and tuple_extract expect guaranteed operand ownership + // non-trivial RunningVal is owned. Insert borrow operation to convert + if (projection.getKind() == ProjectionKind::Struct || + projection.getKind() == ProjectionKind::Tuple) { + SILValue opVal = builder.emitBeginBorrowOperation(LI->getLoc(), val); + if (opVal != val) { + borrowedVals.push_back(opVal); + val = opVal; + } + } val = projection.createObjectProjection(builder, LI->getLoc(), val).get(); } + op = LI->getOperand(); - LI->replaceAllUsesWith(val); + // Replace users of the loaded value with `val` + // If we have a load [copy], replace the users with copy_value of `val` + if (LI->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) { + LI->replaceAllUsesWith(builder.createCopyValue(LI->getLoc(), val)); + } else { + assert(!ASI->getFunction()->hasOwnership() || + val.getOwnershipKind() != ValueOwnershipKind::Guaranteed); + LI->replaceAllUsesWith(val); + } + + for (auto borrowedVal : borrowedVals) { + builder.emitEndBorrowOperation(LI->getLoc(), borrowedVal); + } + + // Delete the load LI->eraseFromParent(); + while (op != ASI && op->use_empty()) { assert(isa(op) || isa(op) || isa(op)); @@ -399,6 +453,7 @@ StoreInst * StackAllocationPromoter::promoteAllocationInBlock(SILBasicBlock *BB) { LLVM_DEBUG(llvm::dbgs() << "*** Promoting ASI in block: " << *ASI); + // RunningVal is the current value in the stack location. // We don't know the value of the alloca until we find the first store. SILValue RunningVal = SILValue(); // Keep track of the last StoreInst that we found. @@ -415,12 +470,16 @@ StackAllocationPromoter::promoteAllocationInBlock(SILBasicBlock *BB) { // If we are loading from the AllocStackInst and we already know the // content of the Alloca then use it. LLVM_DEBUG(llvm::dbgs() << "*** Promoting load: " << *Load); - replaceLoad(Load, RunningVal, ASI); ++NumInstRemoved; - } else if (Load->getOperand() == ASI) { + } else if (Load->getOperand() == ASI && + Load->getOwnershipQualifier() != + LoadOwnershipQualifier::Copy) { // If we don't know the content of the AllocStack then the loaded // value *is* the new value; + // Don't use result of load [copy] as a RunningVal, it necessitates + // additional logic for cleanup of consuming instructions of the result. + // StackAllocationPromoter::fixBranchesAndUses will later handle it. LLVM_DEBUG(llvm::dbgs() << "*** First load: " << *Load); RunningVal = Load; } @@ -433,16 +492,51 @@ StackAllocationPromoter::promoteAllocationInBlock(SILBasicBlock *BB) { if (SI->getDest() != ASI) continue; - // The stored value is the new running value. - RunningVal = SI->getSrc(); + // Special handling of entry block + // If we have a store [assign] in the first block, OSSA guarantees we can + // find the previous value stored in the stack location in RunningVal. + // Create destroy_value of the RunningVal. + // For all other blocks we may not know the previous value stored in the + // stack location. So we will create destroy_value in + // StackAllocationPromoter::fixBranchesAndUses, by getting the live-in + // value to the block. + if (BB->isEntry()) { + if (SI->getOwnershipQualifier() == StoreOwnershipQualifier::Assign) { + assert(RunningVal); + SILBuilderWithScope(SI).createDestroyValue(SI->getLoc(), RunningVal); + } + } // If we met a store before this one, delete it. + // If the LastStore was a store with [assign], delete it only if we know + // the RunningValue to destroy. If not, it will be deleted in + // StackAllocationPromoter::fixBranchesAndUses. if (LastStore) { - ++NumInstRemoved; - LLVM_DEBUG(llvm::dbgs() << "*** Removing redundant store: " - << *LastStore); - LastStore->eraseFromParent(); + if (LastStore->getOwnershipQualifier() == + StoreOwnershipQualifier::Assign) { + if (RunningVal) { + // For entry block, we would have already created the destroy_value, + // skip it. + if (!BB->isEntry()) { + SILBuilderWithScope(LastStore).createDestroyValue( + LastStore->getLoc(), RunningVal); + } + LLVM_DEBUG(llvm::dbgs() + << "*** Removing redundant store: " << *LastStore); + ++NumInstRemoved; + LastStore->eraseFromParent(); + } + } else { + LLVM_DEBUG(llvm::dbgs() + << "*** Removing redundant store: " << *LastStore); + ++NumInstRemoved; + LastStore->eraseFromParent(); + } } + + // The stored value is the new running value. + RunningVal = SI->getSrc(); + // The current store is now the LastStore LastStore = SI; continue; } @@ -466,6 +560,15 @@ StackAllocationPromoter::promoteAllocationInBlock(SILBasicBlock *BB) { continue; } + if (auto *DVI = dyn_cast(Inst)) { + if (DVI->getOperand() == RunningVal) { + // Reset LastStore. + // So that we don't end up passing dead values as phi args in + // StackAllocationPromoter::fixBranchesAndUses + LastStore = nullptr; + } + } + // Stop on deallocation. if (auto *DSI = dyn_cast(Inst)) { if (DSI->getOperand() == ASI) { @@ -516,6 +619,10 @@ void MemoryToRegisters::removeSingleBlockAllocation(AllocStackInst *ASI) { // value. if (auto *SI = dyn_cast(Inst)) { if (SI->getDest() == ASI) { + if (SI->getOwnershipQualifier() == StoreOwnershipQualifier::Assign) { + assert(RunningVal); + SILBuilderWithScope(SI).createDestroyValue(SI->getLoc(), RunningVal); + } RunningVal = SI->getSrc(); Inst->eraseFromParent(); ++NumInstRemoved; @@ -647,6 +754,21 @@ void StackAllocationPromoter::fixPhiPredBlock(BlockSet &PhiBlocks, TI->eraseFromParent(); } +static bool hasOnlyUndefIncomingValues(SILPhiArgument *phiArg) { + SmallVector incomingValues; + phiArg->getIncomingPhiValues(incomingValues); + for (auto predArg : incomingValues) { + if (isa(predArg)) + continue; + if (isa(predArg) && + hasOnlyUndefIncomingValues(cast(predArg))) { + continue; + } + return false; + } + return true; +} + void StackAllocationPromoter::fixBranchesAndUses(BlockSet &PhiBlocks) { // First update uses of the value. SmallVector collectedLoads; @@ -683,6 +805,16 @@ void StackAllocationPromoter::fixBranchesAndUses(BlockSet &PhiBlocks) { // on. SILBasicBlock *BB = Inst->getParent(); + if (!BB->isEntry()) { + if (auto *SI = dyn_cast(Inst)) { + if (SI->getOwnershipQualifier() == StoreOwnershipQualifier::Assign) { + SILValue Def = getLiveInValue(PhiBlocks, BB); + SILBuilderWithScope(SI).createDestroyValue(SI->getLoc(), Def); + continue; + } + } + } + if (auto *DVAI = dyn_cast(Inst)) { // Replace DebugValueAddr with DebugValue. SILValue Def = getLiveInValue(PhiBlocks, BB); @@ -714,6 +846,22 @@ void StackAllocationPromoter::fixBranchesAndUses(BlockSet &PhiBlocks) { fixPhiPredBlock(PhiBlocks, Block, PBB); } } + + // If the owned phi arg we added did not have any uses, create end_lifetime to + // end its lifetime. In asserts mode, make sure we have only undef incoming + // values for such phi args. + if (ASI->getFunction()->hasOwnership()) { + for (auto Block : PhiBlocks) { + auto *phiArg = cast( + Block->getArgument(Block->getNumArguments() - 1)); + if (phiArg->getOwnershipKind() == ValueOwnershipKind::Owned && + phiArg->use_empty()) { + assert(hasOnlyUndefIncomingValues(phiArg)); + SILBuilderWithScope(&Block->front()) + .createEndLifetime(Block->front().getLoc(), phiArg); + } + } + } } void StackAllocationPromoter::pruneAllocStackUsage() { @@ -960,10 +1108,6 @@ class SILMem2Reg : public SILFunctionTransform { void run() override { SILFunction *F = getFunction(); - // FIXME: We should be able to support ownership. - if (F->hasOwnership()) - return; - LLVM_DEBUG(llvm::dbgs() << "** Mem2Reg on function: " << F->getName() << " **\n"); diff --git a/test/SILOptimizer/mem2reg.sil b/test/SILOptimizer/mem2reg.sil index 8373fbee77f66..94d43f7053f09 100644 --- a/test/SILOptimizer/mem2reg.sil +++ b/test/SILOptimizer/mem2reg.sil @@ -449,12 +449,12 @@ bb0: return %16 : $((), ()) } -// CHECK-LABEL: sil @unchecked_ref_cast +// CHECK-LABEL: sil @unchecked_bitwise_cast // CHECK-NOT: alloc_stack // CHECK: [[CAST:%.*]] = unchecked_bitwise_cast %0 : $Optional to $Klass // CHECK: release_value [[CAST]] // CHECK: return -sil @unchecked_ref_cast : $@convention(thin) (@owned Optional) -> () { +sil @unchecked_bitwise_cast : $@convention(thin) (@owned Optional) -> () { bb0(%0 : $Optional): %1 = alloc_stack $Optional store %0 to %1 : $*Optional diff --git a/test/SILOptimizer/mem2reg_liveness_ossa.sil b/test/SILOptimizer/mem2reg_liveness_ossa.sil new file mode 100644 index 0000000000000..d9121babdecda --- /dev/null +++ b/test/SILOptimizer/mem2reg_liveness_ossa.sil @@ -0,0 +1,65 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg | %FileCheck %s + +import Builtin +import Swift + +sil [ossa] @_Ts5printFT3valSi_T_ : $@convention(thin) (Int64) -> () + +// CHECK-LABEL: sil [ossa] @liveness0 : +// CHECK-NOT: alloc_stack +sil [ossa] @liveness0 : $@convention(thin) (Int64) -> () { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "x" + store %0 to [trivial] %1 : $*Int64 + %3 = integer_literal $Builtin.Int64, 10 + %5 = struct_extract %0 : $Int64, #Int64._value + %6 = builtin "cmp_eq_Int64"(%5 : $Builtin.Int64, %3 : $Builtin.Int64) : $Builtin.Int1 + cond_br %6, bb1, bb5 + +bb1: + %8 = alloc_stack $Int64, var, name "y" + %9 = integer_literal $Builtin.Int64, 20 + %10 = struct $Int64 (%9 : $Builtin.Int64) + store %10 to [trivial] %8 : $*Int64 + %12 = integer_literal $Builtin.Int64, 3 + %14 = struct_extract %0 : $Int64, #Int64._value + %15 = builtin "cmp_sgt_Int64"(%14 : $Builtin.Int64, %12 : $Builtin.Int64) : $Builtin.Int1 + cond_br %15, bb2, bb3 + +bb2: + %17 = integer_literal $Builtin.Int64, 0 + %18 = struct $Int64 (%17 : $Builtin.Int64) + store %18 to [trivial] %8 : $*Int64 + br bb4 + +bb3: + %21 = integer_literal $Builtin.Int64, 2 + %22 = struct $Int64 (%21 : $Builtin.Int64) + store %22 to [trivial] %8 : $*Int64 + br bb4 + +bb4: + // function_ref + %25 = function_ref @_Ts5printFT3valSi_T_ : $@convention(thin) (Int64) -> () + %26 = load [trivial] %8 : $*Int64 + %27 = integer_literal $Builtin.Int64, 2 + %28 = integer_literal $Builtin.Int1, -1 + %30 = struct_extract %26 : $Int64, #Int64._value + %31 = builtin "sadd_with_overflow_Int64"(%30 : $Builtin.Int64, %27 : $Builtin.Int64, %28 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %32 = tuple_extract %31 : $(Builtin.Int64, Builtin.Int1), 0 + %33 = tuple_extract %31 : $(Builtin.Int64, Builtin.Int1), 1 + %34 = struct $Int64 (%32 : $Builtin.Int64) + cond_fail %33 : $Builtin.Int1 + %36 = apply %25(%34) : $@convention(thin) (Int64) -> () + dealloc_stack %8 : $*Int64 + br bb5 + +// We don't need a PHI node here because the value is dead! +// CHECK: bb5: +bb5: + dealloc_stack %1 : $*Int64 + %40 = tuple () + return %40 : $() +} +// CHECK-LABEL: } // end sil function 'liveness0' + diff --git a/test/SILOptimizer/mem2reg_ossa.sil b/test/SILOptimizer/mem2reg_ossa.sil new file mode 100644 index 0000000000000..26943f8c5442b --- /dev/null +++ b/test/SILOptimizer/mem2reg_ossa.sil @@ -0,0 +1,489 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg | %FileCheck %s + +import Builtin +import Swift + +////////////////// +// Declarations // +////////////////// + +class Klass {} + +struct SmallCodesizeStruct { + var cls1 : Klass + var cls2 : Klass +} + +struct LargeCodesizeStruct { + var s1: SmallCodesizeStruct + var s2: SmallCodesizeStruct + var s3: SmallCodesizeStruct + var s4: SmallCodesizeStruct + var s5: SmallCodesizeStruct +} + +/////////// +// Tests // +/////////// + +// CHECK-LABEL: sil [ossa] @store_only_allocas : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'store_only_allocas' +// simple.foo0 (c : Swift.Int64) -> () +sil [ossa] @store_only_allocas : $@convention(thin) (Int64) -> () { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + // function_ref Swift.print (val : Swift.Int64) -> () + %3 = function_ref @_Ts5printFT3valSi_T_ : $@convention(thin) (Int64) -> () + %4 = apply %3(%0) : $@convention(thin) (Int64) -> () + dealloc_stack %1 : $*Int64 + %6 = tuple () + return %6 : $() +} + +// Swift.print (val : Swift.Int64) -> () +sil [ossa] @_Ts5printFT3valSi_T_ : $@convention(thin) (Int64) -> () + +// CHECK-LABEL: sil [ossa] @multiple_store_vals : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multiple_store_vals' +// simple.foo1 (c : Swift.Int64) -> Swift.Int64 +sil [ossa] @multiple_store_vals : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 2 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + %7 = integer_literal $Builtin.Int64, 5 + %8 = integer_literal $Builtin.Int1, 0 + %9 = struct $Int64 (%7 : $Builtin.Int64) + cond_fail %8 : $Builtin.Int1 + store %9 to [trivial] %3 : $*Int64 + store %9 to [trivial] %3 : $*Int64 + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 + return %9 : $Int64 +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals2 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multiple_store_vals2' +// simple.foo2 (c : Swift.Int64) -> Swift.Int64 +sil [ossa] @multiple_store_vals2 : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_box $<τ_0_0> { var τ_0_0 } , var, name "x" + %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } , 0 + %4 = integer_literal $Builtin.Int64, 2 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3a : $*Int64 + %8 = struct_extract %0 : $Int64, #Int64._value + %9 = builtin "cmp_sgt_Int64"(%8 : $Builtin.Int64, %4 : $Builtin.Int64) : $Builtin.Int1 + cond_br %9, bb1, bb2 + +bb1: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3(%5 : $Int64) + +bb2: + %13 = integer_literal $Builtin.Int64, 5 + %14 = struct $Int64 (%13 : $Builtin.Int64) + cond_fail %9 : $Builtin.Int1 + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3(%14 : $Int64) + +bb3(%18 : $Int64): + dealloc_stack %1 : $*Int64 + return %18 : $Int64 +} + +// CHECK-LABEL: sil [ossa] @with_loads : +// CHECK: bb3([[RET:%[0-9]+]] : $Int64): +// CHECK-LABEL: } // end sil function 'with_loads' +// simple.foo2 (c : Swift.Int64) -> Swift.Int64 +sil [ossa] @with_loads : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_box $<τ_0_0> { var τ_0_0 } , var, name "x" + %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } , 0 + %4 = integer_literal $Builtin.Int64, 2 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3a : $*Int64 + %8 = struct_extract %0 : $Int64, #Int64._value + %9 = builtin "cmp_sgt_Int64"(%8 : $Builtin.Int64, %4 : $Builtin.Int64) : $Builtin.Int1 + cond_br %9, bb1, bb2 + +bb1: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3(%5 : $Int64) + +bb2: + %13 = integer_literal $Builtin.Int64, 5 + %14 = struct $Int64 (%13 : $Builtin.Int64) + cond_fail %9 : $Builtin.Int1 + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3(%14 : $Int64) + +bb3(%18 : $Int64): + dealloc_stack %1 : $*Int64 + %20 = load [trivial] %1 : $*Int64 + return %18 : $Int64 +} + +// CHECK-LABEL: sil [ossa] @basic_block_with_loads_and_stores : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'basic_block_with_loads_and_stores' +// test.foo3 (c : Swift.Int64) -> () +sil [ossa] @basic_block_with_loads_and_stores : $@convention(thin) (Int64) -> () { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 3 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + %7 = integer_literal $Builtin.Int64, 3 + %9 = struct_extract %0 : $Int64, #Int64._value + %10 = builtin "cmp_sgt_Int64"(%9 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1 + + %12 = integer_literal $Builtin.Int64, 2 + %13 = struct $Int64 (%12 : $Builtin.Int64) + store %13 to [trivial] %3 : $*Int64 + + // function_ref Swift.print (val : Swift.Int64) -> () + %16 = function_ref @_Ts5printFT3valSi_T_ : $@convention(thin) (Int64) -> () + %17 = load [trivial] %3 : $*Int64 + %18 = apply %16(%17) : $@convention(thin) (Int64) -> () + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 + %21 = tuple () + return %21 : $() +} + +// CHECK-LABEL: sil [ossa] @load_uninitialized_empty : +// CHECK-NOT: load +// CHECK-LABEL: } // end sil function 'load_uninitialized_empty' +sil [ossa] @load_uninitialized_empty : $@convention(thin) (@inout ()) -> () { +bb0(%0 : $*()): + %1 = alloc_stack $() + %2 = load [trivial] %1 : $*() + store %2 to [trivial] %0 : $*() + dealloc_stack %1 : $*() + %3 = tuple () + return %3 : $() +} + +// CHECK-LABEL: sil [ossa] @mem2reg_debug_value_addr : +// CHECK-NOT: alloc_stack +// CHECK-NOT: debug_value_addr +// CHECK: debug_value %0 +// CHECK-LABEL: } // end sil function 'mem2reg_debug_value_addr' +sil [ossa] @mem2reg_debug_value_addr : $@convention(thin) (Int) -> Int { +bb0(%0 : $Int): + %1 = alloc_stack $Int + store %0 to [trivial] %1 : $*Int + debug_value_addr %1 : $*Int + %2 = load [trivial] %1 : $*Int + dealloc_stack %1 : $*Int + return %2 : $Int +} + +// CHECK-LABEL: sil [ossa] @mem2reg_struct_addr : +// CHECK-NOT: alloc_stack +// CHECK: struct_extract +// CHECK-LABEL: } // end sil function 'mem2reg_struct_addr' +sil [ossa] @mem2reg_struct_addr : $@convention(thin) (Int64) -> Builtin.Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64 + store %0 to [trivial] %1 : $*Int64 + %2 = struct_element_addr %1 : $*Int64, #Int64._value + %3 = load [trivial] %2 : $*Builtin.Int64 + dealloc_stack %1 : $*Int64 + return %3 : $Builtin.Int64 +} + +// CHECK-LABEL: sil [ossa] @mem2reg_tuple_addr : +// CHECK-NOT: alloc_stack +// CHECK: tuple_extract {{.*}}, 0 +// CHECK-LABEL: } // end sil function 'mem2reg_tuple_addr' +sil [ossa] @mem2reg_tuple_addr : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $(Int64, Int64) + %2 = tuple (%0 : $Int64, %0 : $Int64) + store %2 to [trivial] %1 : $*(Int64, Int64) + %4 = tuple_element_addr %1 : $*(Int64, Int64), 0 + %5 = load [trivial] %4 : $*Int64 + dealloc_stack %1 : $*(Int64, Int64) + return %5 : $Int64 +} + +// CHECK-LABEL: sil [ossa] @struct_extract_if_then_else : +// CHECK-NOT: alloc_stack +sil [ossa] @struct_extract_if_then_else : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64 + store %0 to [trivial] %1 : $*Int64 + %3 = integer_literal $Builtin.Int64, 2 + %4 = struct_extract %0 : $Int64, #Int64._value + %5 = builtin "cmp_sgt_Int64"(%4 : $Builtin.Int64, %3 : $Builtin.Int64) : $Builtin.Int1 + %6 = struct_element_addr %1 : $*Int64, #Int64._value + cond_br %5, bb1, bb2 + +// CHECK: bb1: +// CHECK: struct_extract %0 +bb1: + %8 = load [trivial] %6 : $*Builtin.Int64 + %9 = integer_literal $Builtin.Int64, 1 + %10 = integer_literal $Builtin.Int1, 0 + %11 = builtin "sadd_with_overflow_Int64"(%8 : $Builtin.Int64, %9 : $Builtin.Int64, %10 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %12 = tuple_extract %11 : $(Builtin.Int64, Builtin.Int1), 0 + br bb3(%12 : $Builtin.Int64) + +// CHECK: bb2: +// CHECK: struct_extract %0 +bb2: + %14 = load [trivial] %6 : $*Builtin.Int64 + %15 = integer_literal $Builtin.Int64, 2 + %16 = integer_literal $Builtin.Int1, 0 + %17 = builtin "sadd_with_overflow_Int64"(%14 : $Builtin.Int64, %15 : $Builtin.Int64, %16 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %18 = tuple_extract %17 : $(Builtin.Int64, Builtin.Int1), 0 + br bb3(%18 : $Builtin.Int64) + +// CHECK-NOT: dealloc_stack +bb3(%20 : $Builtin.Int64): + dealloc_stack %1 : $*Int64 + %22 = struct $Int64 (%20 : $Builtin.Int64) + return %22 : $Int64 +} +// CHECK-LABEL: } // end sil function 'struct_extract_if_then_else' + +sil [ossa] @first : $@convention(thin) () -> Int +sil [ossa] @second : $@convention(thin) () -> Int + +// CHECK: sil [ossa] @promote_function_refs : +sil [ossa] @promote_function_refs : $@convention(thin) (Bool) -> Int { +// CHECK: bb0 +bb0(%0 : $Bool): +// CHECK-NOT: [[STACK:%.*]] = alloc_stack + %1 = alloc_stack $@callee_owned () -> Int + debug_value %0 : $Bool + %3 = struct_extract %0 : $Bool, #Bool._value + cond_br %3, bb1, bb2 + +// CHECK: bb1 +bb1: +// CHECK: [[FIRSTREF:%.*]] = function_ref @first + %5 = function_ref @first : $@convention(thin) () -> Int +// CHECK: [[FIRSTTHICK:%.*]] = thin_to_thick_function [[FIRSTREF]] + %6 = thin_to_thick_function %5 : $@convention(thin) () -> Int to $@callee_owned () -> Int +// CHECK-NOT: store + store %6 to [init] %1 : $*@callee_owned () -> Int +// CHECK: br bb3([[FIRSTTHICK]] : $@callee_owned () -> Int + br bb3 + +// CHECK: bb2 +bb2: +// CHECK: [[SECONDREF:%.*]] = function_ref @second + %9 = function_ref @second : $@convention(thin) () -> Int +// CHECK: [[SECONDTHICK:%.*]] = thin_to_thick_function [[SECONDREF]] + %10 = thin_to_thick_function %9 : $@convention(thin) () -> Int to $@callee_owned () -> Int +// CHECK-NOT: store + store %10 to [init] %1 : $*@callee_owned () -> Int +// CHECK: br bb3([[SECONDTHICK]] : $@callee_owned () -> Int) + br bb3 + +// CHECK: bb3([[ARG:%.*]] : @owned $@callee_owned () -> Int): +bb3: +// CHECK-NOT: load [[STACK]] + %13 = load [copy] %1 : $*@callee_owned () -> Int +// CHECK: [[COPY:%.*]] = copy_value [[ARG]] +// CHECK: [[RESULT:%.*]] = apply [[COPY]] + %15 = apply %13() : $@callee_owned () -> Int + br bb4 + + // NOTE: This block and the branch above exist to ensure that we + // test what happens when %1 hasn't already been loaded in this + // block. +// CHECK: bb4 +bb4: +// CHECK-NOT: destroy_addr [[STACK]] +// CHECK: destroy_value [[ARG]] + destroy_addr %1 : $*@callee_owned () -> Int +// CHECK-NOT: dealloc_stack [[STACK]] + dealloc_stack %1 : $*@callee_owned () -> Int + return %15 : $Int +} +// CHECK-LABEL: } // end sil function 'promote_function_refs' + +// Test cases where the only use is a debug_value_addr +// CHECK-LABEL: sil [ossa] @no_real_uses : +sil [ossa] @no_real_uses : $@convention(thin) () -> () { +// CHECK: bb0 +bb0: + // CHECK-NOT: alloc_stack + %0 = alloc_stack $Builtin.Int32 + // CHECK-NOT: debug_value_addr + debug_value_addr %0 : $*Builtin.Int32, let, name "x", argno 1 + // CHECK-NOT: dealloc_stack + dealloc_stack %0 : $*Builtin.Int32 + %1 = tuple () + return %1 : $() +} +// CHECK-LABEL: } // end sil function 'no_real_uses' + +// CHECK-LABEL: sil [ossa] @keep_release : +// CHECK: destroy_value %0 +// CHECK-LABEL: } // end sil function 'keep_release' +sil [ossa] @keep_release : $@convention(thin) (@owned AnyObject) -> () { +bb0(%0 : @owned $AnyObject): + %1 = alloc_stack $AnyObject + store %0 to [init] %1 : $*AnyObject + destroy_addr %1 : $*AnyObject + dealloc_stack %1 : $*AnyObject + %7 = tuple () + return %7 : $() +} + +// Test cases where there are dead address instructions. +// CHECK-LABEL: sil [ossa] @dead_use : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'dead_use' +sil [ossa] @dead_use : $@convention(thin) () -> () { + %0 = alloc_stack $Int64 + %1 = struct_element_addr %0 : $*Int64, #Int64._value + dealloc_stack %0 : $*Int64 + %2 = alloc_stack $(Int64, Int64) + %3 = tuple_element_addr %2 : $*(Int64, Int64), 0 + dealloc_stack %2 : $*(Int64, Int64) + %4 = tuple () + return %4 : $() +} + +// CHECK-LABEL: sil [ossa] @dont_crash_on_dead_arg_use : +// CHECK: bb0{{.*}}: +// CHECK: tuple () +// CHECK-LABEL: } // end sil function 'dont_crash_on_dead_arg_use' +sil [ossa] @dont_crash_on_dead_arg_use : $@convention(thin) (@inout Int64) -> () { +bb0(%0 : $*Int64): + %2 = alloc_stack $Int64 + %1 = struct_element_addr %0 : $*Int64, #Int64._value + %3 = struct_element_addr %2 : $*Int64, #Int64._value + dealloc_stack %2 : $*Int64 + %4 = tuple () + return %4 : $() +} + +// Make sure that we do expand destroy_addr appropriately for code-size +// trade-offs. +// CHECK-LABEL: sil [ossa] @large_struct_test : +// CHECK: bb0([[ARG0:%.*]] : @owned $LargeCodesizeStruct): +// CHECK: destroy_value [[ARG0]] +// CHECK: } // end sil function 'large_struct_test' +sil [ossa] @large_struct_test : $@convention(thin) (@owned LargeCodesizeStruct) -> () { +bb0(%0 : @owned $LargeCodesizeStruct): + %1 = alloc_stack $LargeCodesizeStruct + store %0 to [init] %1 : $*LargeCodesizeStruct + destroy_addr %1 : $*LargeCodesizeStruct + dealloc_stack %1 : $*LargeCodesizeStruct + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @small_struct_test : +// CHECK: bb0([[ARG0:%.*]] : @owned $SmallCodesizeStruct): +// CHECK: ([[ELEM1:%[0-9]+]], [[ELEM2:%[0-9]+]]) = destructure_struct [[ARG0]] +// CHECK: destroy_value [[ELEM1]] +// CHECK: destroy_value [[ELEM2]] +// CHECK: } // end sil function 'small_struct_test' +sil [ossa] @small_struct_test : $@convention(thin) (@owned SmallCodesizeStruct) -> () { +bb0(%0 : @owned $SmallCodesizeStruct): + %1 = alloc_stack $SmallCodesizeStruct + store %0 to [init] %1 : $*SmallCodesizeStruct + destroy_addr %1 : $*SmallCodesizeStruct + dealloc_stack %1 : $*SmallCodesizeStruct + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @small_struct_multi_test : +// CHECK-NOT: alloc_stack +// CHECK: [[COPY:%.*]] = copy_value %0 +// CHECK-NEXT: destructure_struct %0 +// CHECK-NEXT: destroy_value +// CHECK-NEXT: destroy_value +// CHECK-NEXT: begin_borrow [[COPY]] +// CHECK-NEXT: debug_value +// CHECK-NEXT: end_borrow +// CHECK-NEXT: destroy_value [[COPY]] +// CHECK: bb2: +// CHECK-NEXT: destructure_struct %0 +// CHECK-NEXT: destroy_value +// CHECK-NEXT: destroy_value +// CHECK-LABEL: } // end sil function 'small_struct_multi_test' +sil [ossa] @small_struct_multi_test : $@convention(thin) (@owned SmallCodesizeStruct) -> () { +bb0(%0 : @owned $SmallCodesizeStruct): + %1 = alloc_stack $SmallCodesizeStruct + store %0 to [init] %1 : $*SmallCodesizeStruct + cond_br undef, bb1, bb2 + +bb1: + %3 = load [copy] %1 : $*SmallCodesizeStruct + destroy_addr %1 : $*SmallCodesizeStruct + dealloc_stack %1 : $*SmallCodesizeStruct + %4 = begin_borrow %3 : $SmallCodesizeStruct + debug_value %4 : $SmallCodesizeStruct + end_borrow %4 : $SmallCodesizeStruct + destroy_value %3 : $SmallCodesizeStruct + br bb3 + +bb2: + destroy_addr %1 : $*SmallCodesizeStruct + dealloc_stack %1 : $*SmallCodesizeStruct + br bb3 + +bb3: + %7 = tuple () + return %7 : $() +} + + +// CHECK-LABEL: sil [ossa] @dead_address_projections : +// CHECK-NOT: alloc_stack +// CHECK: } // end sil function 'dead_address_projections' +sil [ossa] @dead_address_projections : $@convention(thin) (((), ())) -> ((), ()) { +bb0(%0 : $((), ())): + %1 = alloc_stack $((), ()) + %200 = tuple_element_addr %1 : $*((), ()), 0 + %300 = tuple_element_addr %1 : $*((), ()), 1 + cond_br undef, bb1, bb2 + +bb1: + store %0 to [trivial] %1 : $*((), ()) + %16 = load [trivial] %1 : $*((), ()) + dealloc_stack %1 : $*((), ()) + br bb3(%16 : $((), ())) + +bb2: + dealloc_stack %1 : $*((), ()) + br bb3(%0 : $((), ())) + +bb3(%20 : $((), ())): + return %20 : $((), ()) +} + +// CHECK-LABEL: sil [ossa] @load_tuple_of_void : +// CHECK-NOT: alloc_stack +// CHECK: return undef : $((), ()) +// CHECK: } // end sil function 'load_tuple_of_void' +sil [ossa] @load_tuple_of_void : $@convention(thin) () -> ((), ()) { +bb0: + %1 = alloc_stack $((), ()) + %16 = load [trivial] %1 : $*((), ()) + dealloc_stack %1 : $*((), ()) + return %16 : $((), ()) +} diff --git a/test/SILOptimizer/mem2reg_ossa_nontrivial.sil b/test/SILOptimizer/mem2reg_ossa_nontrivial.sil new file mode 100644 index 0000000000000..33f643644f8ed --- /dev/null +++ b/test/SILOptimizer/mem2reg_ossa_nontrivial.sil @@ -0,0 +1,615 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg | %FileCheck %s + +import Builtin +import Swift + +////////////////// +// Declarations // +////////////////// + +class Klass {} + +struct SmallCodesizeStruct { + var cls1 : Klass + var cls2 : Klass +} + +struct WrapperStruct { + var cls : Klass +} + +struct LargeCodesizeStruct { + var s1: SmallCodesizeStruct + var s2: SmallCodesizeStruct + var s3: SmallCodesizeStruct + var s4: SmallCodesizeStruct + var s5: SmallCodesizeStruct +} + +/////////// +// Tests // +/////////// + +sil [noinline] [ossa] @blackhole : $@convention(thin) (@in_guaranteed T) -> () { +bb0(%0 : $*T): + debug_value_addr %0 : $*T, let, name "t", argno 1 + %2 = tuple () + return %2 : $() +} + +sil shared [noinline] @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : $Klass): + %1 = tuple () + return %1 : $() +} + +// CHECK-LABEL: sil [ossa] @store_only_allocas : +// CHECK-NOT: alloc_stack +// CHECK: return +sil [ossa] @store_only_allocas : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + %2 = function_ref @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () + %3 = load [take] %1 : $*Klass + %4 = apply %2(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + dealloc_stack %1 : $*Klass + %6 = tuple () + return %6 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals : +// CHECK-NOT: alloc_stack +// CHECK: destroy_value %0 : $Klass +// CHECK: [[FUNC:%.*]] = function_ref @blackhole_spl : +// CHECK: apply [[FUNC]](%1) : $@convention(thin) (@guaranteed Klass) -> () +// CHECK: destroy_value %1 : $Klass +// CHECK-LABEL: } // end sil function 'multiple_store_vals' +sil [ossa] @multiple_store_vals : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + %3 = integer_literal $Builtin.Int1, 0 + cond_fail %3 : $Builtin.Int1 + store %1 to [assign] %2 : $*Klass + %4 = function_ref @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () + %5 = load [take] %2 : $*Klass + %6 = apply %4(%5) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %5 : $Klass + dealloc_stack %2 : $*Klass + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals2 : +// CHECK-NOT: alloc_stack +// CHECK: destroy_value %0 : $Klass +// CHECK: [[FUNC:%.*]] = function_ref @blackhole_spl : +// CHECK: apply [[FUNC]](%1) : $@convention(thin) (@guaranteed Klass) -> () +// CHECK: destroy_value %1 : $Klass +// CHECK-LABEL: } // end sil function 'multiple_store_vals2' +sil [ossa] @multiple_store_vals2 : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + %3 = integer_literal $Builtin.Int1, 0 + cond_fail %3 : $Builtin.Int1 + destroy_addr %2 : $*Klass + store %1 to [init] %2 : $*Klass + %4 = function_ref @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () + %5 = load [take] %2 : $*Klass + %6 = apply %4(%5) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %5 : $Klass + dealloc_stack %2 : $*Klass + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals3 : +// CHECK-NOT: alloc_stack +// CHECK: [[COPY0:%.*]] = copy_value %0 : $Klass +// CHECK: [[COPY1:%.*]] = copy_value %1 : $Klass +// CHECK: destroy_value [[COPY0]] : $Klass +// CHECK: [[FUNC:%.*]] = function_ref @blackhole_spl : +// CHECK: apply [[FUNC]]([[COPY1]]) : $@convention(thin) (@guaranteed Klass) -> () +// CHECK: destroy_value [[COPY1]] : $Klass +// CHECK-LABEL: } // end sil function 'multiple_store_vals3' +sil [ossa] @multiple_store_vals3 : $@convention(thin) (@guaranteed Klass, @guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass, %1 : @guaranteed $Klass): + %2 = alloc_stack $Klass + %copy0 = copy_value %0 : $Klass + store %copy0 to [init] %2 : $*Klass + %3 = integer_literal $Builtin.Int1, 0 + cond_fail %3 : $Builtin.Int1 + %copy1 = copy_value %1 : $Klass + store %copy1 to [assign] %2 : $*Klass + %4 = function_ref @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () + %5 = load [take] %2 : $*Klass + %6 = apply %4(%5) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %5 : $Klass + dealloc_stack %2 : $*Klass + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals4 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multiple_store_vals4' +sil [ossa] @multiple_store_vals4 : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + %3 = alloc_box $<τ_0_0> { var τ_0_0 } + %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } , 0 + store %1 to [assign] %2 : $*Klass + cond_br undef, bb1, bb2 + +bb1: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb2: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb3: + destroy_addr %2 : $*Klass + dealloc_stack %2 : $*Klass + %ret = tuple () + return %ret : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_store_vals5 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multiple_store_vals5' +sil [ossa] @multiple_store_vals5 : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass, %2 : @owned $Klass): + %stk = alloc_stack $Klass + store %0 to [init] %stk : $*Klass + %3 = alloc_box $<τ_0_0> { var τ_0_0 } + %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } , 0 + store %1 to [assign] %stk : $*Klass + store %2 to [assign] %stk : $*Klass + cond_br undef, bb1, bb2 + +bb1: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb2: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb3: + destroy_addr %stk : $*Klass + dealloc_stack %stk : $*Klass + %ret = tuple () + return %ret : $() +} + +// CHECK-LABEL: sil [ossa] @with_loads : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'with_loads' +sil [ossa] @with_loads : $@convention(thin) (@owned Klass, @owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + %3 = alloc_box $<τ_0_0> { var τ_0_0 } + %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } , 0 + store %1 to [assign] %2 : $*Klass + cond_br undef, bb1, bb2 + +bb1: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb2: + destroy_value %3 : $<τ_0_0> { var τ_0_0 } + br bb3 + +bb3: + %ret = load [take] %2 : $*Klass + dealloc_stack %2 : $*Klass + return %ret : $Klass +} + +// CHECK-LABEL: sil [ossa] @basic_block_with_loads_and_stores : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'basic_block_with_loads_and_stores' +sil [ossa] @basic_block_with_loads_and_stores : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + %3 = alloc_stack $Klass + store %1 to [init] %3 : $*Klass + %local = alloc_ref $Klass + store %local to [assign] %3 : $*Klass + %func = function_ref @blackhole_spl : $@convention(thin) (@guaranteed Klass) -> () + %arg = load [take] %3 : $*Klass + %applyres = apply %func(%arg) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %arg : $Klass + destroy_addr %2 : $*Klass + dealloc_stack %3 : $*Klass + dealloc_stack %2 : $*Klass + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @basic_block_with_loads_copy_and_take : +// CHECK-NOT: alloc_stack +// CHECK: [[COPY:%.*]] = copy_value %0 : $Klass +// CHECK: destroy_value [[COPY]] : $Klass +// CHECK: destroy_value %0 : $Klass +// CHECK-LABEL: } // end sil function 'basic_block_with_loads_copy_and_take' +sil [ossa] @basic_block_with_loads_copy_and_take : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + %copy = load [copy] %1 : $*Klass + %take = load [take] %1 : $*Klass + destroy_value %copy : $Klass + destroy_value %take : $Klass + dealloc_stack %1 : $*Klass + %res = tuple () + return %res : $() +} + +// load [copy] is not used as RunningVal +// StackAllocationPromoter::fixBranchesAndUses will delete the loads and replace with %0 +// CHECK-LABEL: sil [ossa] @multi_basic_block_with_loads_copy_and_take_1 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multi_basic_block_with_loads_copy_and_take_1' +sil [ossa] @multi_basic_block_with_loads_copy_and_take_1 : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + br bb1 +bb1: + %copy = load [copy] %1 : $*Klass + %take = load [take] %1 : $*Klass + destroy_value %copy : $Klass + destroy_value %take : $Klass + dealloc_stack %1 : $*Klass + %res = tuple () + return %res : $() +} + +// load [copy] is not used as RunningVal +// StackAllocationPromoter::fixBranchesAndUses will delete the loads and replace with %0 +// CHECK-LABEL: sil [ossa] @multi_basic_block_with_loads_copy_and_take_2 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multi_basic_block_with_loads_copy_and_take_2' +sil [ossa] @multi_basic_block_with_loads_copy_and_take_2 : $@convention(thin) (@owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + br bb1 +bb1: + %copy = load [copy] %1 : $*Klass + %take = load [take] %1 : $*Klass + destroy_value %take : $Klass + dealloc_stack %1 : $*Klass + return %copy : $Klass +} + +// load [take] is used as RunningVal in bb1 +// CHECK-LABEL: sil [ossa] @multi_basic_block_with_loads_copy_and_take_3 : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multi_basic_block_with_loads_copy_and_take_3' +sil [ossa] @multi_basic_block_with_loads_copy_and_take_3 : $@convention(thin) (@owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + br bb1 +bb1: + %take = load [take] %1 : $*Klass + %copy = copy_value %take : $Klass + destroy_value %take : $Klass + dealloc_stack %1 : $*Klass + return %copy : $Klass +} + +// CHECK-LABEL: sil [ossa] @multi_basic_block_with_store_assign : +// CHECK-NOT: alloc_stack +// CHECK: destroy_value %0 : $Klass +// CHECK-LABEL: } // end sil function 'multi_basic_block_with_store_assign' +sil [ossa] @multi_basic_block_with_store_assign : $@convention(thin) (@owned Klass, @owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass, %1: @owned $Klass): + %stk = alloc_stack $Klass + store %0 to [init] %stk : $*Klass + br bb1 +bb1: + store %1 to [assign] %stk : $*Klass + %res = load [take] %stk : $*Klass + dealloc_stack %stk : $*Klass + return %res : $Klass +} + +// CHECK-LABEL: sil [ossa] @multi_basic_block_with_phiarg : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: bb1: +// CHECK: br bb3(%1 : $Klass) +// CHECK-LABEL: bb2: +// CHECK: br bb3(%0 : $Klass) +// CHECK-LABEL: } // end sil function 'multi_basic_block_with_phiarg' +sil [ossa] @multi_basic_block_with_phiarg : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %stk = alloc_stack $Klass + cond_br undef, bb1, bb2 +bb1: + store %1 to [init] %stk : $*Klass + destroy_value %0 : $Klass + br bb3 +bb2: + store %0 to [init] %stk : $*Klass + destroy_value %1 : $Klass + br bb3 +bb3: + %val = load [take] %stk : $*Klass + destroy_value %val : $Klass + dealloc_stack %stk : $*Klass + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @multi_asi_basic_block_with_phiarg : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: bb1: +// CHECK: br bb3(%1 : $Klass, %0 : $Klass) +// CHECK-LABEL: bb2: +// CHECK: br bb3(%0 : $Klass, %1 : $Klass) +// CHECK-LABEL: } // end sil function 'multi_asi_basic_block_with_phiarg' +sil [ossa] @multi_asi_basic_block_with_phiarg : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %stk1 = alloc_stack $Klass + %stk2 = alloc_stack $Klass + cond_br undef, bb1, bb2 +bb1: + store %1 to [init] %stk1 : $*Klass + store %0 to [init] %stk2 : $*Klass + br bb3 +bb2: + store %1 to [init] %stk2 : $*Klass + store %0 to [init] %stk1 : $*Klass + br bb3 +bb3: + %val1 = load [take] %stk1 : $*Klass + destroy_value %val1 : $Klass + %val2 = load [take] %stk2 : $*Klass + destroy_value %val2 : $Klass + dealloc_stack %stk2 : $*Klass + dealloc_stack %stk1 : $*Klass + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @multi_basic_block_stack_deallocated_phiarg : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: bb2: +// CHECK: br bb3(undef : $Klass) +// CHECK: bb3([[PHI:%.*]] : @owned $Klass): +// CHECK-NEXT: end_lifetime [[PHI]] : $Klass +// CHECK-LABEL: } // end sil function 'multi_basic_block_stack_deallocated_phiarg' +sil [ossa] @multi_basic_block_stack_deallocated_phiarg : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %stk = alloc_stack $Klass + cond_br undef, bb1, bb2 +bb1: + dealloc_stack %stk : $*Klass + destroy_value %0 : $Klass + br bb3 +bb2: + store %0 to [init] %stk : $*Klass + %val = load [take] %stk : $*Klass + dealloc_stack %stk : $*Klass + destroy_value %val : $Klass + br bb3 +bb3: + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @multi_asi_basic_block_stack_deallocated_phiarg : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: bb2: +// CHECK: br bb3(undef : $Klass, undef : $Klass) +// CHECK: bb3([[PHI1:%.*]] : @owned $Klass, [[PHI2:%.*]] : @owned $Klass): +// CHECK-NEXT: end_lifetime [[PHI2]] : $Klass +// CHECK-NEXT: end_lifetime [[PHI1]] : $Klass +// CHECK-LABEL: } // end sil function 'multi_asi_basic_block_stack_deallocated_phiarg' +sil [ossa] @multi_asi_basic_block_stack_deallocated_phiarg : $@convention(thin) (@owned Klass, @owned Klass) -> () { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %stk1 = alloc_stack $Klass + %stk2 = alloc_stack $Klass + cond_br undef, bb1, bb2 +bb1: + dealloc_stack %stk2 : $*Klass + dealloc_stack %stk1 : $*Klass + destroy_value %0 : $Klass + destroy_value %1 : $Klass + br bb3 +bb2: + store %0 to [init] %stk1 : $*Klass + %val1 = load [take] %stk1 : $*Klass + store %1 to [init] %stk2 : $*Klass + %val2 = load [take] %stk2 : $*Klass + destroy_value %val1 : $Klass + destroy_value %val2 : $Klass + dealloc_stack %stk2 : $*Klass + dealloc_stack %stk1 : $*Klass + br bb3 +bb3: + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @multi_basic_block_destroyed_last_stored_val_phiarg : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: bb2: +// CHECK: br bb3(undef : $Klass) +// CHECK-LABEL: } // end sil function 'multi_basic_block_destroyed_last_stored_val_phiarg' +sil [ossa] @multi_basic_block_destroyed_last_stored_val_phiarg : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %stk = alloc_stack $Klass + cond_br undef, bb1, bb2 +bb1: + destroy_value %0 : $Klass + br bb3 +bb2: + store %0 to [init] %stk : $*Klass + %val = load [take] %stk : $*Klass + destroy_value %val : $Klass + br bb3 +bb3: + dealloc_stack %stk : $*Klass + %res = tuple () + return %res : $() +} + +// CHECK-LABEL: sil [ossa] @mem2reg_debug_value_addr : +// CHECK-NOT: alloc_stack +// CHECK-NOT: debug_value_addr +// CHECK: debug_value %0 +// CHECK-LABEL: } // end sil function 'mem2reg_debug_value_addr' +sil [ossa] @mem2reg_debug_value_addr : $@convention(thin) (@owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + debug_value_addr %1 : $*Klass + %2 = load [take] %1 : $*Klass + dealloc_stack %1 : $*Klass + return %2 : $Klass +} + +// CHECK-LABEL: sil [ossa] @mem2reg_struct_addr : +// CHECK-NOT: alloc_stack +// CHECK: [[BORROW:%.*]] = begin_borrow %0 : $SmallCodesizeStruct +// CHECK: [[ELE:%.*]] = struct_extract [[BORROW]] +// CHECK: [[COPY:%.*]] = copy_value [[ELE]] : $Klass +// CHECK: end_borrow [[BORROW]] : $SmallCodesizeStruct +// CHECK: return [[COPY]] +// CHECK-LABEL: } // end sil function 'mem2reg_struct_addr' +sil [ossa] @mem2reg_struct_addr : $@convention(thin) (@owned SmallCodesizeStruct) -> @owned Klass { +bb0(%0 : @owned $SmallCodesizeStruct): + %1 = alloc_stack $SmallCodesizeStruct + store %0 to [init] %1 : $*SmallCodesizeStruct + %2 = struct_element_addr %1 : $*SmallCodesizeStruct, #SmallCodesizeStruct.cls1 + %3 = load [copy] %2 : $*Klass + destroy_addr %1 : $*SmallCodesizeStruct + dealloc_stack %1 : $*SmallCodesizeStruct + return %3 : $Klass +} + +// SILMem2Reg is disabled when there is a load [take] with struct_elemenet_addr/tuple_element_addr +// CHECK-LABEL: sil [ossa] @mem2reg_struct_addr_load_take : +// CHECK: alloc_stack +// CHECK-LABEL: } // end sil function 'mem2reg_struct_addr_load_take' +sil [ossa] @mem2reg_struct_addr_load_take : $@convention(thin) (@owned WrapperStruct) -> () { +bb0(%0 : @owned $WrapperStruct): + %1 = alloc_stack $WrapperStruct + store %0 to [init] %1 : $*WrapperStruct + %2 = struct_element_addr %1 : $*WrapperStruct, #WrapperStruct.cls + %3 = load [take] %2 : $*Klass + destroy_value %3 : $Klass + dealloc_stack %1 : $*WrapperStruct + %tup = tuple () + return %tup : $() +} + +// CHECK-LABEL: sil [ossa] @mem2reg_tuple_addr : +// CHECK-NOT: alloc_stack +// CHECK: [[TUP:%.*]] = tuple (%0 : $Klass, %1 : $Klass) +// CHECK: [[BORROW:%.*]] = begin_borrow [[TUP]] : $(Klass, Klass) +// CHECK: [[ELE:%.*]] = tuple_extract [[BORROW]] +// CHECK: [[COPY:%.*]] = copy_value [[ELE]] : $Klass +// CHECK: end_borrow [[BORROW]] : $(Klass, Klass) +// CHECK: return [[COPY]] +// CHECK-LABEL: } // end sil function 'mem2reg_tuple_addr' +sil [ossa] @mem2reg_tuple_addr : $@convention(thin) (@owned Klass, @owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass, %1 : @owned $Klass): + %stk = alloc_stack $(Klass, Klass) + %2 = tuple (%0 : $Klass, %1 : $Klass) + store %2 to [init] %stk : $*(Klass, Klass) + %4 = tuple_element_addr %stk : $*(Klass, Klass), 0 + %5 = load [copy] %4 : $*Klass + destroy_addr %stk : $*(Klass, Klass) + dealloc_stack %stk : $*(Klass, Klass) + return %5 : $Klass +} + +// CHECK-LABEL: sil [ossa] @struct_extract_if_then_else : +// CHECK-NOT: alloc_stack +sil [ossa] @struct_extract_if_then_else : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64 + store %0 to [trivial] %1 : $*Int64 + %3 = integer_literal $Builtin.Int64, 2 + %4 = struct_extract %0 : $Int64, #Int64._value + %5 = builtin "cmp_sgt_Int64"(%4 : $Builtin.Int64, %3 : $Builtin.Int64) : $Builtin.Int1 + %6 = struct_element_addr %1 : $*Int64, #Int64._value + cond_br %5, bb1, bb2 + +// CHECK: bb1: +// CHECK: struct_extract %0 +bb1: + %8 = load [trivial] %6 : $*Builtin.Int64 + %9 = integer_literal $Builtin.Int64, 1 + %10 = integer_literal $Builtin.Int1, 0 + %11 = builtin "sadd_with_overflow_Int64"(%8 : $Builtin.Int64, %9 : $Builtin.Int64, %10 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %12 = tuple_extract %11 : $(Builtin.Int64, Builtin.Int1), 0 + br bb3(%12 : $Builtin.Int64) + +// CHECK: bb2: +// CHECK: struct_extract %0 +bb2: + %14 = load [trivial] %6 : $*Builtin.Int64 + %15 = integer_literal $Builtin.Int64, 2 + %16 = integer_literal $Builtin.Int1, 0 + %17 = builtin "sadd_with_overflow_Int64"(%14 : $Builtin.Int64, %15 : $Builtin.Int64, %16 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %18 = tuple_extract %17 : $(Builtin.Int64, Builtin.Int1), 0 + br bb3(%18 : $Builtin.Int64) + +// CHECK-NOT: dealloc_stack +bb3(%20 : $Builtin.Int64): + dealloc_stack %1 : $*Int64 + %22 = struct $Int64 (%20 : $Builtin.Int64) + return %22 : $Int64 +} +// CHECK-LABEL: } // end sil function 'struct_extract_if_then_else' + +// Test cases where the only use is a debug_value_addr +// CHECK-LABEL: sil [ossa] @no_real_uses : +sil [ossa] @no_real_uses : $@convention(thin) () -> () { +// CHECK: bb0 +bb0: + // CHECK-NOT: alloc_stack + %0 = alloc_stack $Klass + %local = alloc_ref $Klass + store %local to [init] %0 : $*Klass + // CHECK-NOT: debug_value_addr + debug_value_addr %0 : $*Klass + destroy_addr %0 : $*Klass + // CHECK-NOT: dealloc_stack + dealloc_stack %0 : $*Klass + %1 = tuple () + return %1 : $() +} +// CHECK-LABEL: } // end sil function 'no_real_uses' + +// CHECK-LABEL: sil [ossa] @half_trivial +// CHECK: destructure_tuple %0 +// CHECK-NEXT: destroy_value +// CHECK-NEXT: tuple +// CHECK-LABEL: } // end sil function 'half_trivial' +sil [ossa] @half_trivial : $@convention(thin) (@owned (Builtin.BridgeObject, Builtin.Int32)) -> () { +bb0(%0 : @owned $(Builtin.BridgeObject, Builtin.Int32)): + %1 = alloc_stack $(Builtin.BridgeObject, Builtin.Int32) + store %0 to [init] %1 : $*(Builtin.BridgeObject, Builtin.Int32) + %3 = load [copy] %1 : $*(Builtin.BridgeObject, Builtin.Int32) + destroy_value %3 : $(Builtin.BridgeObject, Builtin.Int32) + destroy_addr %1 : $*(Builtin.BridgeObject, Builtin.Int32) + dealloc_stack %1 : $*(Builtin.BridgeObject, Builtin.Int32) + %7 = tuple () + return %7 : $() +} diff --git a/test/SILOptimizer/mem2reg_ossa_nontrivial_casts.sil b/test/SILOptimizer/mem2reg_ossa_nontrivial_casts.sil new file mode 100644 index 0000000000000..12cd17bad6869 --- /dev/null +++ b/test/SILOptimizer/mem2reg_ossa_nontrivial_casts.sil @@ -0,0 +1,81 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg | %FileCheck %s + +import Builtin +import Swift + +// We cannot use unchecked_value_cast for conversions to trivial type. +// Since it is forwarding, the ownersip of the src forwards, but we cannot destroy the dst because it is trivial + +// CHECK-LABEL: sil [ossa] @casttotrivial : +// CHECK: [[CAST:%.*]] = unchecked_bitwise_cast %0 : $AnyObject to $UInt8 +// CHECK-NEXT: destroy_value %0 : $AnyObject +// CHECK-NEXT: return [[CAST]] : $UInt8 +// CHECK-LABEL: } // end sil function 'casttotrivial' +sil [ossa] @casttotrivial : $@convention(thin) (@owned AnyObject) -> @owned UInt8 { +bb0(%0 : @owned $AnyObject): + %1 = alloc_stack $AnyObject + store %0 to [init] %1 : $*AnyObject + %2 = unchecked_addr_cast %1 : $*AnyObject to $*UInt8 + %3 = load [trivial] %2 : $*UInt8 + %4 = load [take] %1 : $*AnyObject + destroy_value %4 : $AnyObject + dealloc_stack %1 : $*AnyObject + return %3 : $UInt8 +} + +// We cannot use unchecked_value_cast, because it is forwarding, it forwards the src, and the src can no longer be used as a RunningVal +// To get rid of this issue we need to use a copy_value of the src, and make sure we don't generate copy_value in case of a load [copy]. +// To avoid all this spl handling, just use bitwise cast + +// CHECK-LABEL: sil [ossa] @casttonontrivial : +// CHECK: [[CAST:%.*]] = unchecked_bitwise_cast %0 : $AnyObject to $String +// CHECK: [[COPY:%.*]] = copy_value [[CAST]] : $String +// CHECK-NEXT: destroy_value %0 : $AnyObject +// CHECK-NEXT: return [[COPY]] : $String +// CHECK-LABEL: } // end sil function 'casttonontrivial' +sil [ossa] @casttonontrivial : $@convention(thin) (@owned AnyObject) -> @owned String { +bb0(%0 : @owned $AnyObject): + %1 = alloc_stack $AnyObject + store %0 to [init] %1 : $*AnyObject + %2 = unchecked_addr_cast %1 : $*AnyObject to $*String + %3 = load [copy] %2 : $*String + %4 = load [take] %1 : $*AnyObject + destroy_value %4 : $AnyObject + dealloc_stack %1 : $*AnyObject + return %3 : $String +} + +struct Pair { var lhs: AnyObject; var rhs: AnyObject } + +// CHECK-LABEL: sil [ossa] @shorteningcast : +// CHECK: [[CAST:%.*]] = unchecked_bitwise_cast %0 : $Pair to $AnyObject +// CHECK: [[COPY:%.*]] = copy_value [[CAST]] : $AnyObject +// CHECK-NEXT: destroy_value %0 : $Pair +// CHECK-NEXT: return [[COPY]] : $AnyObject +// CHECK-LABEL: } // end sil function 'shorteningcast' +sil [ossa] @shorteningcast : $@convention(thin) (@owned Pair) -> @owned AnyObject { +bb0(%0 : @owned $Pair): + %1 = alloc_stack $Pair + store %0 to [init] %1 : $*Pair + %2 = unchecked_addr_cast %1 : $*Pair to $*AnyObject + %3 = load [copy] %2 : $*AnyObject + %4 = load [take] %1 : $*Pair + destroy_value %4 : $Pair + dealloc_stack %1 : $*Pair + return %3 : $AnyObject +} + +// CHECK-LABEL: sil [ossa] @deadcast : +// CHECK-LABEL: bb0 +// CHECK-NEXT: destroy_value %0 : $AnyObject +// CHECK-LABEL: } // end sil function 'deadcast' +sil [ossa] @deadcast : $@convention(thin) (@owned AnyObject) -> () { +bb0(%0 : @owned $AnyObject): + %1 = alloc_stack $AnyObject + store %0 to [init] %1 : $*AnyObject + %2 = unchecked_addr_cast %1 : $*AnyObject to $*String + destroy_addr %1 : $*AnyObject + dealloc_stack %1 : $*AnyObject + %4 = tuple() + return %4 : $() +} diff --git a/test/SILOptimizer/mem2reg_resilient_ossa.sil b/test/SILOptimizer/mem2reg_resilient_ossa.sil new file mode 100644 index 0000000000000..25ee0ef094308 --- /dev/null +++ b/test/SILOptimizer/mem2reg_resilient_ossa.sil @@ -0,0 +1,28 @@ + +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg -enable-library-evolution | %FileCheck %s + +import Builtin +import Swift + +public struct ResilientStruct { + var x: AnyObject +} + +// CHECK-LABEL: sil [ossa] @mem2reg_debug_value_addr : +// CHECK: bb0(%0 : $*ResilientStruct): +// CHECK-NEXT: %1 = load [copy] %0 : $*ResilientStruct +// CHECK-NEXT: debug_value %1 : $ResilientStruct +// CHECK-NEXT: destroy_value %1 : $ResilientStruct +// CHECK-LABEL: } // end sil function 'mem2reg_debug_value_addr' +sil [ossa] @mem2reg_debug_value_addr : $@convention(thin) (@in_guaranteed ResilientStruct) -> () { +bb0(%0 : $*ResilientStruct): + %1 = alloc_stack $ResilientStruct + %2 = load [copy] %0 : $*ResilientStruct + store %2 to [init] %1 : $*ResilientStruct + debug_value_addr %1 : $*ResilientStruct + %3 = load [take] %1 : $*ResilientStruct + destroy_value %3 : $ResilientStruct + dealloc_stack %1 : $*ResilientStruct + %4 = tuple () + return %4 : $() +} diff --git a/test/SILOptimizer/mem2reg_simple_ossa.sil b/test/SILOptimizer/mem2reg_simple_ossa.sil new file mode 100644 index 0000000000000..db5a7ceec3103 --- /dev/null +++ b/test/SILOptimizer/mem2reg_simple_ossa.sil @@ -0,0 +1,410 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg | %FileCheck %s + +sil_stage canonical + +import Builtin +import Swift + +// func foo(v : Int) -> Int { +// var x : Int = 0 +// if v == 3 { x = 3 } else { +// if (v == 2) { x = 2 } +// } +// var i : Int = 0 +// while (i < 10) { i = i+1 } +// return x +// } + +// CHECK: sil [ossa] @place_phi : +// CHECK-NOT: alloc_stack +sil [ossa] @place_phi : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "v" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 0 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + %7 = integer_literal $Builtin.Int64, 3 + %9 = struct_extract %0 : $Int64, #Int64._value + %10 = builtin "cmp_eq_Int64"(%9 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1 + cond_br %10, bb1, bb2 + +bb1: + %12 = struct $Int64 (%7 : $Builtin.Int64) + store %12 to [trivial] %3 : $*Int64 + br bb5 + +bb2: + %15 = integer_literal $Builtin.Int64, 2 + %16 = builtin "cmp_eq_Int64"(%9 : $Builtin.Int64, %15 : $Builtin.Int64) : $Builtin.Int1 + cond_br %16, bb3, bb4 + +bb3: + %18 = struct $Int64 (%15 : $Builtin.Int64) + store %18 to [trivial] %3 : $*Int64 + br bb4 + +bb4: + br bb5 + +// CHECK: bb5([[PHI:%[0-9]+]] : $Int64): +// CHECK-NOT: alloc_stack +bb5: + %22 = alloc_stack $Int64, var, name "i" + store %5 to [trivial] %22 : $*Int64 + br bb6 + +// CHECK: bb6([[PHI2:%[0-9]+]] : $Int64): +bb6: + // CHECK: struct_extract [[PHI2]] + %25 = struct_element_addr %22 : $*Int64, #Int64._value + %26 = load [trivial] %25 : $*Builtin.Int64 + %27 = integer_literal $Builtin.Int64, 10 + %29 = builtin "cmp_slt_Int64"(%26 : $Builtin.Int64, %27 : $Builtin.Int64) : $Builtin.Int1 + cond_br %29, bb7, bb8 + +bb7: + // CHECK: struct_extract [[PHI2]] + %31 = struct_element_addr %22 : $*Int64, #Int64._value + %32 = load [trivial] %31 : $*Builtin.Int64 + %33 = integer_literal $Builtin.Int64, 1 + %35 = builtin "sadd_with_overflow_Int64"(%32 : $Builtin.Int64, %33 : $Builtin.Int64, %29 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %36 = tuple_extract %35 : $(Builtin.Int64, Builtin.Int1), 0 + %37 = tuple_extract %35 : $(Builtin.Int64, Builtin.Int1), 1 + %38 = struct $Int64 (%36 : $Builtin.Int64) + cond_fail %37 : $Builtin.Int1 + store %38 to [trivial] %22 : $*Int64 + br bb6 + +bb8: + %42 = load [trivial] %3 : $*Int64 + dealloc_stack %22 : $*Int64 + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 + return %42 : $Int64 +} +// CHECK-LABEL: } // end sil function 'place_phi' + +// func loop(c : Int) -> Int { +// var x : Int = 0 +// while (x < c) { x = x + 1 } +// return x +// } + +// CHECK: sil [ossa] @func_loop : +// CHECK-NOT: alloc_stack +sil [ossa] @func_loop: $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 0 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + br bb1 + +// CHECK: bb1([[VAR:%[0-9]+]] : $Int64): +bb1: + %8 = load [trivial] %3 : $*Int64 + %10 = struct_extract %8 : $Int64, #Int64._value + %11 = struct_extract %0 : $Int64, #Int64._value + %12 = builtin "cmp_slt_Int64"(%10 : $Builtin.Int64, %11 : $Builtin.Int64) : $Builtin.Int1 + cond_br %12, bb2, bb3 + +bb2: + %14 = load [trivial] %3 : $*Int64 + %15 = integer_literal $Builtin.Int64, 1 + %16 = integer_literal $Builtin.Int1, -1 + %18 = struct_extract %14 : $Int64, #Int64._value + %19 = builtin "sadd_with_overflow_Int64"(%18 : $Builtin.Int64, %15 : $Builtin.Int64, %16 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %20 = tuple_extract %19 : $(Builtin.Int64, Builtin.Int1), 0 + %21 = tuple_extract %19 : $(Builtin.Int64, Builtin.Int1), 1 + %22 = struct $Int64 (%20 : $Builtin.Int64) + cond_fail %21 : $Builtin.Int1 + store %22 to [trivial] %3 : $*Int64 + br bb1 + +bb3: + %26 = load [trivial] %3 : $*Int64 + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 +// CHECK-NOT: dealloc_stack + return %26 : $Int64 +} +// CHECK-LABEL: } // end sil function 'func_loop' + +// func nest(c : Int) -> Int { +// var x : Int = 0 +// if (c > 1) { if (c > 2) { if (c > 3) { if (c > 4) { +// if (c > 5) { if (c > 6) { if (c > 7) { if (c > 8) { +// if (c > 9) { if (c > 10) { if (c > 11) { if (c > 12) { +// if (c > 13) { if (c > 14) { if (c > 15) { if (c > 16) { +// if (c > 17) { x = 7 }}}}}}}}}}}}}}}}} return x +// } + +// This test should kill exponential algorithms. +// CHECK: sil [ossa] @high_nest : +// CHECK-NOT: alloc_stack +// CHECK-NOT: dealloc_stack +// CHECK-LABEL: } // end sil function 'high_nest' +sil [ossa] @high_nest : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 0 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + %7 = integer_literal $Builtin.Int64, 1 + %9 = struct_extract %0 : $Int64, #Int64._value + %10 = builtin "cmp_sgt_Int64"(%9 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1 + cond_br %10, bb1, bb34 + +bb1: + %12 = integer_literal $Builtin.Int64, 2 + %14 = struct_extract %0 : $Int64, #Int64._value + %15 = builtin "cmp_sgt_Int64"(%14 : $Builtin.Int64, %12 : $Builtin.Int64) : $Builtin.Int1 + cond_br %15, bb2, bb33 + +bb2: + %17 = integer_literal $Builtin.Int64, 3 + %19 = struct_extract %0 : $Int64, #Int64._value + %20 = builtin "cmp_sgt_Int64"(%19 : $Builtin.Int64, %17 : $Builtin.Int64) : $Builtin.Int1 + cond_br %20, bb3, bb32 + +bb3: + %22 = integer_literal $Builtin.Int64, 4 + %24 = struct_extract %0 : $Int64, #Int64._value + %25 = builtin "cmp_sgt_Int64"(%24 : $Builtin.Int64, %22 : $Builtin.Int64) : $Builtin.Int1 + cond_br %25, bb4, bb31 + +bb4: + %27 = integer_literal $Builtin.Int64, 5 + %29 = struct_extract %0 : $Int64, #Int64._value + %30 = builtin "cmp_sgt_Int64"(%29 : $Builtin.Int64, %27 : $Builtin.Int64) : $Builtin.Int1 + cond_br %30, bb5, bb30 + +bb5: + %32 = integer_literal $Builtin.Int64, 6 + %34 = struct_extract %0 : $Int64, #Int64._value + %35 = builtin "cmp_sgt_Int64"(%34 : $Builtin.Int64, %32 : $Builtin.Int64) : $Builtin.Int1 + cond_br %35, bb6, bb29 + +bb6: + %37 = integer_literal $Builtin.Int64, 7 + %39 = struct_extract %0 : $Int64, #Int64._value + %40 = builtin "cmp_sgt_Int64"(%39 : $Builtin.Int64, %37 : $Builtin.Int64) : $Builtin.Int1 + cond_br %40, bb7, bb28 + +bb7: + %42 = integer_literal $Builtin.Int64, 8 + %44 = struct_extract %0 : $Int64, #Int64._value + %45 = builtin "cmp_sgt_Int64"(%44 : $Builtin.Int64, %42 : $Builtin.Int64) : $Builtin.Int1 + cond_br %45, bb8, bb27 + +bb8: + %47 = integer_literal $Builtin.Int64, 9 + %49 = struct_extract %0 : $Int64, #Int64._value + %50 = builtin "cmp_sgt_Int64"(%49 : $Builtin.Int64, %47 : $Builtin.Int64) : $Builtin.Int1 + cond_br %50, bb9, bb26 + +bb9: + %52 = integer_literal $Builtin.Int64, 10 + %54 = struct_extract %0 : $Int64, #Int64._value + %55 = builtin "cmp_sgt_Int64"(%54 : $Builtin.Int64, %52 : $Builtin.Int64) : $Builtin.Int1 + cond_br %55, bb10, bb25 + +bb10: + %57 = integer_literal $Builtin.Int64, 11 + %59 = struct_extract %0 : $Int64, #Int64._value + %60 = builtin "cmp_sgt_Int64"(%59 : $Builtin.Int64, %57 : $Builtin.Int64) : $Builtin.Int1 + cond_br %60, bb11, bb24 + +bb11: + %62 = integer_literal $Builtin.Int64, 12 + %64 = struct_extract %0 : $Int64, #Int64._value + %65 = builtin "cmp_sgt_Int64"(%64 : $Builtin.Int64, %62 : $Builtin.Int64) : $Builtin.Int1 + cond_br %65, bb12, bb23 + +bb12: + %67 = integer_literal $Builtin.Int64, 13 + %69 = struct_extract %0 : $Int64, #Int64._value + %70 = builtin "cmp_sgt_Int64"(%69 : $Builtin.Int64, %67 : $Builtin.Int64) : $Builtin.Int1 + cond_br %70, bb13, bb22 + + +bb13: + %72 = integer_literal $Builtin.Int64, 14 + %74 = struct_extract %0 : $Int64, #Int64._value + %75 = builtin "cmp_sgt_Int64"(%74 : $Builtin.Int64, %72 : $Builtin.Int64) : $Builtin.Int1 + cond_br %75, bb14, bb21 + +bb14: + %77 = integer_literal $Builtin.Int64, 15 + %79 = struct_extract %0 : $Int64, #Int64._value + %80 = builtin "cmp_sgt_Int64"(%79 : $Builtin.Int64, %77 : $Builtin.Int64) : $Builtin.Int1 + cond_br %80, bb15, bb20 + +bb15: + %82 = integer_literal $Builtin.Int64, 16 + %84 = struct_extract %0 : $Int64, #Int64._value + %85 = builtin "cmp_sgt_Int64"(%84 : $Builtin.Int64, %82 : $Builtin.Int64) : $Builtin.Int1 + cond_br %85, bb16, bb19 + +bb16: + %87 = integer_literal $Builtin.Int64, 17 + %89 = struct_extract %0 : $Int64, #Int64._value + %90 = builtin "cmp_sgt_Int64"(%89 : $Builtin.Int64, %87 : $Builtin.Int64) : $Builtin.Int1 + cond_br %90, bb17, bb18 + +bb17: + %92 = integer_literal $Builtin.Int64, 7 + %93 = struct $Int64 (%92 : $Builtin.Int64) + store %93 to [trivial] %3 : $*Int64 + br bb18 + +bb18: + br bb19 + +bb19: + br bb20 + +bb20: + br bb21 + +bb21: + br bb22 + +bb22: + br bb23 + +bb23: + br bb24 + +bb24: + br bb25 + +bb25: + br bb26 + +bb26: + br bb27 + +bb27: + br bb28 + +bb28: + br bb29 + +bb29: + br bb30 + +bb30: + br bb31 + +bb31: + br bb32 + +bb32: + br bb33 + +bb33: + br bb34 + +bb34: + %112 = load [trivial] %3 : $*Int64 + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 + return %112 : $Int64 +} + +// CHECK-LABEL: sil [ossa] @simple_if : +// CHECK-NOT: alloc_stack +sil [ossa] @simple_if : $@convention(thin) (Int64) -> Int64 { +bb0(%0 : $Int64): + %1 = alloc_stack $Int64, var, name "c" + store %0 to [trivial] %1 : $*Int64 + %3 = alloc_stack $Int64, var, name "x" + %4 = integer_literal $Builtin.Int64, 0 +// CHECK: [[INIT:%[0-9]+]] = struct $Int64 + %5 = struct $Int64 (%4 : $Builtin.Int64) + store %5 to [trivial] %3 : $*Int64 + %8 = struct_extract %0 : $Int64, #Int64._value + %9 = builtin "cmp_sgt_Int64"(%4 : $Builtin.Int64, %8 : $Builtin.Int64) : $Builtin.Int1 +// CHECK: bb2([[INIT]] : $Int64) + cond_br %9, bb1, bb2 + +bb1: + %11 = integer_literal $Builtin.Int64, 2 +// CHECK: [[INIT2:%[0-9]+]] = struct $Int64 + %12 = struct $Int64 (%11 : $Builtin.Int64) + store %12 to [trivial] %3 : $*Int64 +// CHECK: bb2([[INIT2]] : $Int64) + br bb2 + +bb2: + %15 = load [trivial] %3 : $*Int64 + dealloc_stack %3 : $*Int64 + dealloc_stack %1 : $*Int64 + return %15 : $Int64 +} +// CHECK-LABEL: } // end sil function 'simple_if' + +enum X { + case One + case Two + case Three +} + +// CHECK-LABEL: sil [ossa] @test_switch : +// CHECK-NOT: alloc_stack +// CHECK-NOT: dealloc_stack +sil [ossa] @test_switch: $@convention(thin) (Int64, X) -> Int64 { +bb0(%0 : $Int64, %1 : $X): + %2 = alloc_stack $Int64, var, name "xi" + %3 = alloc_stack $X, var, name "c" + store %0 to [trivial] %2 : $*Int64 + store %1 to [trivial] %3 : $*X + %6 = alloc_stack $Int64, var, name "x" + store %0 to [trivial] %6 : $*Int64 + %8 = tuple () + switch_enum %1 : $X, case #X.One!enumelt: bb1, case #X.Two!enumelt: bb3, case #X.Three!enumelt: bb5 + +bb1: + br bb2 + +bb2: + %11 = integer_literal $Builtin.Int64, 3 + %12 = struct $Int64 (%11 : $Builtin.Int64) + store %12 to [trivial] %6 : $*Int64 + br bb7 + +bb3: + br bb4 + +bb4: + %16 = integer_literal $Builtin.Int64, 2 + %17 = struct $Int64 (%16 : $Builtin.Int64) + store %17 to [trivial] %6 : $*Int64 + br bb7 + +bb5: + br bb6 + +bb6: + %21 = integer_literal $Builtin.Int64, 1 + %22 = struct $Int64 (%21 : $Builtin.Int64) + store %22 to [trivial] %6 : $*Int64 + br bb7 + +bb7: + %25 = load [trivial] %6 : $*Int64 + dealloc_stack %6 : $*Int64 + dealloc_stack %3 : $*X + dealloc_stack %2 : $*Int64 + return %25 : $Int64 +} +// CHECK-LABEL: } // end sil function 'test_switch' + + diff --git a/test/SILOptimizer/mem2reg_unreachable_ossa.sil b/test/SILOptimizer/mem2reg_unreachable_ossa.sil new file mode 100644 index 0000000000000..c6cda0dcd2553 --- /dev/null +++ b/test/SILOptimizer/mem2reg_unreachable_ossa.sil @@ -0,0 +1,68 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -mem2reg + +// Make sure we are not crashing on blocks that are not dominated by the entry block. + +sil_stage canonical + +import Builtin +import Swift + +// CHECK-LABEL: sil [ossa] @_TF4main3fooFT1xSi1ySi_Si : +// CHECK-NEXT: alloc_stack +// CHECK-LABEL: } // end sil function '_TF4main3fooFT1xSi1ySi_Si' +sil [ossa] @_TF4main3fooFT1xSi1ySi_Si : $@convention(thin) (Int32, Int32) -> Int32 { +bb0(%0 : $Int32, %1 : $Int32): + debug_value %0 : $Int32, let, name "x" + debug_value %1 : $Int32, let, name "y" + %4 = alloc_stack $Int32, var, name "g" + %6 = struct_extract %1 : $Int32, #Int32._value + %7 = struct_extract %0 : $Int32, #Int32._value + %8 = builtin "cmp_sgt_Word"(%6 : $Builtin.Int32, %7 : $Builtin.Int32) : $Builtin.Int1 + %9 = struct $Bool (%8 : $Builtin.Int1) + %10 = struct_extract %9 : $Bool, #Bool._value + br bb2 + +// bb1 is unreachable +// CHECK-LABEL: bb1: +// CHECK-NEXT: br bb3 +bb1: + %12 = integer_literal $Builtin.Int32, 5 + %13 = struct $Int32 (%12 : $Builtin.Int32) + store %13 to [trivial] %4 : $*Int32 + br bb3 + +bb2: + %16 = integer_literal $Builtin.Int32, 4 + %17 = struct $Int32 (%16 : $Builtin.Int32) + store %17 to [trivial] %4 : $*Int32 + br bb3 + +bb3: + %20 = load [trivial] %4 : $*Int32 + dealloc_stack %4 : $*Int32 + return %20 : $Int32 +} + +struct S {} + +// CHECK-LABEL: sil hidden [ossa] @handle_unreachable : +// CHECK-NOT: alloc_stack +// CHECK: debug_value undef : $S, let, name "newvalue", argno 1 +// CHECK-LABEL: } // end sil function 'handle_unreachable' +sil hidden [ossa] @handle_unreachable : $@convention(thin) () -> () { +bb0: + %0 = alloc_stack $S, var, name "x" + %1 = struct $S () + store %1 to [trivial] %0 : $*S + unreachable + +bb1: + debug_value_addr %0 : $*S, let, name "newvalue", argno 1 + br bb2 + +bb2: + %3 = load [trivial] %0 : $*S + dealloc_stack %0 : $*S + %4 = tuple () + return %4 : $() +} From 0ea5d055a20e2066ba1e034d52b1dead007a4dab Mon Sep 17 00:00:00 2001 From: Meghana Gupta Date: Mon, 12 Oct 2020 11:20:27 -0700 Subject: [PATCH 4/5] Delete debug_value_addr of stack location, if a debug_value of the RunningVal is already found --- lib/SILOptimizer/Transforms/SILMem2Reg.cpp | 4 +++- test/SILOptimizer/mem2reg_ossa_nontrivial.sil | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp index e0b43fb8e24ac..4c2db62703e36 100644 --- a/lib/SILOptimizer/Transforms/SILMem2Reg.cpp +++ b/lib/SILOptimizer/Transforms/SILMem2Reg.cpp @@ -323,8 +323,10 @@ promoteDebugValueAddr(DebugValueAddrInst *DVAI, SILValue Value, SILBuilder &B) { // Avoid inserting the same debug_value twice. for (Operand *Use : Value->getUses()) if (auto *DVI = dyn_cast(Use->getUser())) - if (*DVI->getVarInfo() == *DVAI->getVarInfo()) + if (*DVI->getVarInfo() == *DVAI->getVarInfo()) { + DVAI->eraseFromParent(); return; + } B.setInsertionPoint(DVAI); B.setCurrentDebugScope(DVAI->getDebugScope()); B.createDebugValue(DVAI->getLoc(), Value, *DVAI->getVarInfo()); diff --git a/test/SILOptimizer/mem2reg_ossa_nontrivial.sil b/test/SILOptimizer/mem2reg_ossa_nontrivial.sil index 33f643644f8ed..5a05d7202733f 100644 --- a/test/SILOptimizer/mem2reg_ossa_nontrivial.sil +++ b/test/SILOptimizer/mem2reg_ossa_nontrivial.sil @@ -613,3 +613,20 @@ bb0(%0 : @owned $(Builtin.BridgeObject, Builtin.Int32)): %7 = tuple () return %7 : $() } + +// CHECK-LABEL: sil [ossa] @multiple_debug_value : +// CHECK-NOT: alloc_stack +// CHECK-LABEL: } // end sil function 'multiple_debug_value' +sil [ossa] @multiple_debug_value : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + debug_value %0 : $Klass + %2 = alloc_stack $Klass + store %0 to [init] %2 : $*Klass + debug_value_addr %2 : $*Klass + %5 = load [take] %2 : $*Klass + destroy_value %5 : $Klass + dealloc_stack %2 : $*Klass + %7 = tuple () + return %7 : $() +} + From 06eaf6bba4de85631c532cb9bb9cd232758706c4 Mon Sep 17 00:00:00 2001 From: Meghana Gupta Date: Mon, 12 Oct 2020 11:21:01 -0700 Subject: [PATCH 5/5] Disable SILCombine of unchecked_bitwise_cast to unchecked_ref_cast in OSSA unchecked_ref_cast is a forwarding cast while unchecked_bitwise_cast is not. We cannot just convert one to other in OSSA. Disable it now. --- .../SILCombiner/SILCombinerCastVisitors.cpp | 3 +++ test/SILOptimizer/sil_combine.sil | 14 ++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp index 4230ceaa37d28..35735ba6c9c5c 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerCastVisitors.cpp @@ -472,6 +472,9 @@ visitUncheckedTrivialBitCastInst(UncheckedTrivialBitCastInst *UTBCI) { SILInstruction * SILCombiner:: visitUncheckedBitwiseCastInst(UncheckedBitwiseCastInst *UBCI) { + if (UBCI->getFunction()->hasOwnership()) + return nullptr; + // (unchecked_bitwise_cast Y->Z (unchecked_bitwise_cast X->Y x)) // OR (unchecked_trivial_cast Y->Z (unchecked_bitwise_cast X->Y x)) // -> diff --git a/test/SILOptimizer/sil_combine.sil b/test/SILOptimizer/sil_combine.sil index 23978c8b8d17c..cdea966bd7e03 100644 --- a/test/SILOptimizer/sil_combine.sil +++ b/test/SILOptimizer/sil_combine.sil @@ -8,6 +8,7 @@ sil_stage canonical import Builtin import Swift +class Klass {} class RawBuffer {} class HeapBufferStorage : RawBuffer {} @@ -4116,3 +4117,16 @@ bb0: %1 = load %0 : $*Int64 return %1 : $Int64 } + +// Check for disabled optimization of unchecked_bitwise_cast to unchecked_ref_cast in ossa +// This test can be optimized when ossa is supported in the SILCombine for unchecked_bitwise_cast +// CHECK-LABEL: sil [ossa] @refcast : +// CHECK: unchecked_bitwise_cast +// CHECK-LABEL: } // end sil function 'refcast' +sil [ossa] @refcast : $@convention(thin) (@owned Klass) -> @owned Optional { +bb0(%0 : @owned $Klass): + %1 = unchecked_bitwise_cast %0 : $Klass to $Optional + %2 = copy_value %1 : $Optional + destroy_value %0 : $Klass + return %2 : $Optional +}