diff --git a/docs/SIL.rst b/docs/SIL.rst index 34143b4724f97..ab25e2cce843c 100644 --- a/docs/SIL.rst +++ b/docs/SIL.rst @@ -5148,6 +5148,21 @@ unchecked_bitwise_cast Bitwise copies an object of type ``A`` into a new object of type ``B`` of the same size or smaller. +unchecked_value_cast +```````````````````` +:: + + sil-instruction ::= 'unchecked_value_cast' sil-operand 'to' sil-type + + %1 = unchecked_value_cast %0 : $A to $B + +Bitwise copies an object of type ``A`` into a new layout-compatible object of +type ``B`` of the same size. + +This instruction is assumed to forward a fixed ownership (set upon its +construction) and lowers to 'unchecked_bitwise_cast' in non-ossa code. This +causes the cast to lose its guarantee of layout-compatibility. + ref_to_raw_pointer `````````````````` :: diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index 10422ec523278..8eafabda8a62d 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -1102,6 +1102,12 @@ class SILBuilder { getSILDebugLocation(Loc), Op, Ty, getFunction(), C.OpenedArchetypes)); } + UncheckedValueCastInst *createUncheckedValueCast(SILLocation Loc, SILValue Op, + SILType Ty) { + return insert(UncheckedValueCastInst::create( + getSILDebugLocation(Loc), Op, Ty, getFunction(), C.OpenedArchetypes)); + } + RefToBridgeObjectInst *createRefToBridgeObject(SILLocation Loc, SILValue Ref, SILValue Bits) { auto Ty = SILType::getBridgeObjectType(getASTContext()); @@ -1847,7 +1853,18 @@ class SILBuilder { // Unchecked cast helpers //===--------------------------------------------------------------------===// - // Create the appropriate cast instruction based on result type. + /// Create the appropriate cast instruction based on result type. + /// + /// NOTE: We allow for non-layout compatible casts that shrink the underlying + /// type we are bit casting! + SingleValueInstruction * + createUncheckedReinterpretCast(SILLocation Loc, SILValue Op, SILType Ty); + + /// Create an appropriate cast instruction based on result type. + /// + /// NOTE: This assumes that the input and the result cast are layout + /// compatible. Reduces to createUncheckedReinterpretCast when ownership is + /// disabled. SingleValueInstruction *createUncheckedBitCast(SILLocation Loc, SILValue Op, SILType Ty); diff --git a/include/swift/SIL/SILCloner.h b/include/swift/SIL/SILCloner.h index 24d4fadca8dbe..e7c544992dddc 100644 --- a/include/swift/SIL/SILCloner.h +++ b/include/swift/SIL/SILCloner.h @@ -1538,6 +1538,16 @@ visitUncheckedBitwiseCastInst(UncheckedBitwiseCastInst *Inst) { getOpType(Inst->getType()))); } +template +void SILCloner::visitUncheckedValueCastInst( + UncheckedValueCastInst *Inst) { + getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); + recordClonedInstruction(Inst, getBuilder().createUncheckedValueCast( + getOpLocation(Inst->getLoc()), + getOpValue(Inst->getOperand()), + getOpType(Inst->getType()))); +} + template void SILCloner:: diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index d57e8454a096e..dd9e3731b9ac9 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -4576,6 +4576,23 @@ class UncheckedBitwiseCastInst final SILFunction &F, SILOpenedArchetypesState &OpenedArchetypes); }; +/// Bitwise copy a value into another value of the same size. +class UncheckedValueCastInst final + : public UnaryInstructionWithTypeDependentOperandsBase< + SILInstructionKind::UncheckedValueCastInst, UncheckedValueCastInst, + OwnershipForwardingConversionInst> { + friend SILBuilder; + + UncheckedValueCastInst(SILDebugLocation DebugLoc, SILValue Operand, + ArrayRef TypeDependentOperands, SILType Ty) + : UnaryInstructionWithTypeDependentOperandsBase( + DebugLoc, Operand, TypeDependentOperands, Ty, + Operand.getOwnershipKind()) {} + static UncheckedValueCastInst * + create(SILDebugLocation DebugLoc, SILValue Operand, SILType Ty, + SILFunction &F, SILOpenedArchetypesState &OpenedArchetypes); +}; + /// Build a Builtin.BridgeObject from a heap object reference by bitwise-or-ing /// in bits from a word. class RefToBridgeObjectInst diff --git a/include/swift/SIL/SILNodes.def b/include/swift/SIL/SILNodes.def index 352589fde1a81..d26054e66aed4 100644 --- a/include/swift/SIL/SILNodes.def +++ b/include/swift/SIL/SILNodes.def @@ -500,6 +500,8 @@ ABSTRACT_VALUE_AND_INST(SingleValueInstruction, ValueBase, SILInstruction) ConversionInst, None, DoesNotRelease) SINGLE_VALUE_INST(UncheckedBitwiseCastInst, unchecked_bitwise_cast, ConversionInst, None, DoesNotRelease) + SINGLE_VALUE_INST(UncheckedValueCastInst, unchecked_value_cast, + ConversionInst, None, DoesNotRelease) SINGLE_VALUE_INST(RefToRawPointerInst, ref_to_raw_pointer, ConversionInst, None, DoesNotRelease) SINGLE_VALUE_INST(RawPointerToRefInst, raw_pointer_to_ref, diff --git a/include/swift/SILOptimizer/Utils/CFGOptUtils.h b/include/swift/SILOptimizer/Utils/CFGOptUtils.h index 120d19ec9eb9e..35de0e33cc6f8 100644 --- a/include/swift/SILOptimizer/Utils/CFGOptUtils.h +++ b/include/swift/SILOptimizer/Utils/CFGOptUtils.h @@ -107,6 +107,10 @@ bool splitCriticalEdgesFrom(SILBasicBlock *fromBB, DominanceInfo *domInfo = nullptr, SILLoopInfo *loopInfo = nullptr); +/// Splits all critical edges that have `toBB` as a destination. +bool splitCriticalEdgesTo(SILBasicBlock *toBB, DominanceInfo *domInfo = nullptr, + SILLoopInfo *loopInfo = nullptr); + /// Splits the edges between two basic blocks. /// /// Updates dominance information and loop information if not null. diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 8f489458e455d..ad2d0dfc79c44 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -1026,6 +1026,9 @@ class IRGenSILFunction : void visitUncheckedAddrCastInst(UncheckedAddrCastInst *i); void visitUncheckedTrivialBitCastInst(UncheckedTrivialBitCastInst *i); void visitUncheckedBitwiseCastInst(UncheckedBitwiseCastInst *i); + void visitUncheckedValueCastInst(UncheckedValueCastInst *i) { + llvm_unreachable("Should never be seen in Lowered SIL"); + } void visitRefToRawPointerInst(RefToRawPointerInst *i); void visitRawPointerToRefInst(RawPointerToRefInst *i); void visitThinToThickFunctionInst(ThinToThickFunctionInst *i); diff --git a/lib/IRGen/LoadableByAddress.cpp b/lib/IRGen/LoadableByAddress.cpp index 01abf45d6e0e7..8ec81868638ce 100644 --- a/lib/IRGen/LoadableByAddress.cpp +++ b/lib/IRGen/LoadableByAddress.cpp @@ -1544,7 +1544,7 @@ void LoadableStorageAllocation:: storageType); if (pass.containsDifferentFunctionSignature(pass.F->getLoweredFunctionType(), storageType)) { - auto *castInstr = argBuilder.createUncheckedBitCast( + auto *castInstr = argBuilder.createUncheckedReinterpretCast( RegularLocation(const_cast(arg->getDecl())), arg, newSILType); arg->replaceAllUsesWith(castInstr); @@ -1912,8 +1912,8 @@ static void castTupleInstr(SingleValueInstruction *instr, IRGenModule &Mod, switch (instr->getKind()) { // Add cast to the new sil function type: case SILInstructionKind::TupleExtractInst: { - castInstr = castBuilder.createUncheckedBitCast(instr->getLoc(), instr, - newSILType.getObjectType()); + castInstr = castBuilder.createUncheckedReinterpretCast( + instr->getLoc(), instr, newSILType.getObjectType()); break; } case SILInstructionKind::TupleElementAddrInst: { @@ -2471,8 +2471,8 @@ getOperandTypeWithCastIfNecessary(SILInstruction *containingInstr, SILValue op, } assert(currSILType.isObject() && "Expected an object type"); if (newSILType != currSILType) { - auto castInstr = builder.createUncheckedBitCast(containingInstr->getLoc(), - op, newSILType); + auto castInstr = builder.createUncheckedReinterpretCast( + containingInstr->getLoc(), op, newSILType); return castInstr; } } @@ -2653,8 +2653,8 @@ bool LoadableByAddress::recreateUncheckedEnumDataInstr( auto *takeEnum = enumBuilder.createUncheckedEnumData( enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(), caseTy); - newInstr = enumBuilder.createUncheckedBitCast(enumInstr->getLoc(), takeEnum, - newType); + newInstr = enumBuilder.createUncheckedReinterpretCast(enumInstr->getLoc(), + takeEnum, newType); } else { newInstr = enumBuilder.createUncheckedEnumData( enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(), @@ -2708,7 +2708,7 @@ bool LoadableByAddress::fixStoreToBlockStorageInstr( if (destType.getObjectType() != srcType) { // Add cast to destType SILBuilderWithScope castBuilder(instr); - auto *castInstr = castBuilder.createUncheckedBitCast( + auto *castInstr = castBuilder.createUncheckedReinterpretCast( instr->getLoc(), src, destType.getObjectType()); instr->setOperand(StoreInst::Src, castInstr); } diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index 0246ff0d18219..26aecc97afdf5 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -350,6 +350,7 @@ FORWARD_ANY_OWNERSHIP_INST(DestructureTuple) FORWARD_ANY_OWNERSHIP_INST(InitExistentialRef) FORWARD_ANY_OWNERSHIP_INST(DifferentiableFunction) FORWARD_ANY_OWNERSHIP_INST(LinearFunction) +FORWARD_ANY_OWNERSHIP_INST(UncheckedValueCast) #undef FORWARD_ANY_OWNERSHIP_INST // An instruction that forwards a constant ownership or trivial ownership. diff --git a/lib/SIL/IR/SILBuilder.cpp b/lib/SIL/IR/SILBuilder.cpp index 37b3ce61d0d79..037bfc9b3e6ec 100644 --- a/lib/SIL/IR/SILBuilder.cpp +++ b/lib/SIL/IR/SILBuilder.cpp @@ -141,7 +141,8 @@ SILBuilder::createClassifyBridgeObject(SILLocation Loc, SILValue value) { // Create the appropriate cast instruction based on result type. SingleValueInstruction * -SILBuilder::createUncheckedBitCast(SILLocation Loc, SILValue Op, SILType Ty) { +SILBuilder::createUncheckedReinterpretCast(SILLocation Loc, SILValue Op, + SILType Ty) { assert(isLoadableOrOpaque(Ty)); if (Ty.isTrivial(getFunction())) return insert(UncheckedTrivialBitCastInst::create( @@ -156,6 +157,26 @@ SILBuilder::createUncheckedBitCast(SILLocation Loc, SILValue Op, SILType Ty) { getSILDebugLocation(Loc), Op, Ty, getFunction(), C.OpenedArchetypes)); } +// Create the appropriate cast instruction based on result type. +SingleValueInstruction * +SILBuilder::createUncheckedBitCast(SILLocation Loc, SILValue Op, SILType Ty) { + // Without ownership, delegate to unchecked reinterpret cast. + if (!hasOwnership()) + return createUncheckedReinterpretCast(Loc, Op, Ty); + + assert(isLoadableOrOpaque(Ty)); + if (Ty.isTrivial(getFunction())) + return insert(UncheckedTrivialBitCastInst::create( + getSILDebugLocation(Loc), Op, Ty, getFunction(), C.OpenedArchetypes)); + + if (SILType::canRefCast(Op->getType(), Ty, getModule())) + return createUncheckedRefCast(Loc, Op, Ty); + + // The destination type is nontrivial, and may be smaller than the source + // type, so RC identity cannot be assumed. + return createUncheckedValueCast(Loc, Op, Ty); +} + BranchInst *SILBuilder::createBranch(SILLocation Loc, SILBasicBlock *TargetBlock, OperandValueArrayRef Args) { diff --git a/lib/SIL/IR/SILInstructions.cpp b/lib/SIL/IR/SILInstructions.cpp index 8ad354575fca6..85e21ea609f21 100644 --- a/lib/SIL/IR/SILInstructions.cpp +++ b/lib/SIL/IR/SILInstructions.cpp @@ -2217,6 +2217,21 @@ UncheckedRefCastInst::create(SILDebugLocation DebugLoc, SILValue Operand, TypeDependentOperands, Ty); } +UncheckedValueCastInst * +UncheckedValueCastInst::create(SILDebugLocation DebugLoc, SILValue Operand, + SILType Ty, SILFunction &F, + SILOpenedArchetypesState &OpenedArchetypes) { + SILModule &Mod = F.getModule(); + SmallVector TypeDependentOperands; + collectTypeDependentOperands(TypeDependentOperands, OpenedArchetypes, F, + Ty.getASTType()); + unsigned size = + totalSizeToAlloc(1 + TypeDependentOperands.size()); + void *Buffer = Mod.allocateInst(size, alignof(UncheckedValueCastInst)); + return ::new (Buffer) + UncheckedValueCastInst(DebugLoc, Operand, TypeDependentOperands, Ty); +} + UncheckedAddrCastInst * UncheckedAddrCastInst::create(SILDebugLocation DebugLoc, SILValue Operand, SILType Ty, SILFunction &F, diff --git a/lib/SIL/IR/SILPrinter.cpp b/lib/SIL/IR/SILPrinter.cpp index 763c0122779b0..2225c047abd3a 100644 --- a/lib/SIL/IR/SILPrinter.cpp +++ b/lib/SIL/IR/SILPrinter.cpp @@ -1620,6 +1620,9 @@ class SILPrinter : public SILInstructionVisitor { void visitUncheckedBitwiseCastInst(UncheckedBitwiseCastInst *CI) { printUncheckedConversionInst(CI, CI->getOperand()); } + void visitUncheckedValueCastInst(UncheckedValueCastInst *CI) { + printUncheckedConversionInst(CI, CI->getOperand()); + } void visitRefToRawPointerInst(RefToRawPointerInst *CI) { printUncheckedConversionInst(CI, CI->getOperand()); } diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 4acf9d278c6f5..be815d3cd89de 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -254,6 +254,7 @@ FORWARDING_OWNERSHIP_INST(Tuple) FORWARDING_OWNERSHIP_INST(UncheckedRefCast) FORWARDING_OWNERSHIP_INST(UnconditionalCheckedCast) FORWARDING_OWNERSHIP_INST(Upcast) +FORWARDING_OWNERSHIP_INST(UncheckedValueCast) FORWARDING_OWNERSHIP_INST(UncheckedEnumData) FORWARDING_OWNERSHIP_INST(SelectEnum) FORWARDING_OWNERSHIP_INST(Enum) diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index 8a9df9d667734..b237e70782399 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -3238,6 +3238,7 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, case SILInstructionKind::UncheckedAddrCastInst: case SILInstructionKind::UncheckedTrivialBitCastInst: case SILInstructionKind::UncheckedBitwiseCastInst: + case SILInstructionKind::UncheckedValueCastInst: case SILInstructionKind::UpcastInst: case SILInstructionKind::AddressToPointerInst: case SILInstructionKind::BridgeObjectToRefInst: @@ -3307,6 +3308,9 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, case SILInstructionKind::UncheckedBitwiseCastInst: ResultVal = B.createUncheckedBitwiseCast(InstLoc, Val, Ty); break; + case SILInstructionKind::UncheckedValueCastInst: + ResultVal = B.createUncheckedValueCast(InstLoc, Val, Ty); + break; case SILInstructionKind::UpcastInst: ResultVal = B.createUpcast(InstLoc, Val, Ty); break; diff --git a/lib/SIL/Utils/OwnershipUtils.cpp b/lib/SIL/Utils/OwnershipUtils.cpp index d696a9b1db227..8ece70a783b50 100644 --- a/lib/SIL/Utils/OwnershipUtils.cpp +++ b/lib/SIL/Utils/OwnershipUtils.cpp @@ -34,6 +34,7 @@ bool swift::isOwnershipForwardingValueKind(SILNodeKind kind) { case SILNodeKind::LinearFunctionInst: case SILNodeKind::OpenExistentialRefInst: case SILNodeKind::UpcastInst: + case SILNodeKind::UncheckedValueCastInst: case SILNodeKind::UncheckedRefCastInst: case SILNodeKind::ConvertFunctionInst: case SILNodeKind::RefToBridgeObjectInst: diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index cca9546938961..c8ad63ff8be84 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -1904,6 +1904,12 @@ class SILVerifier : public SILVerifierBase { "Inst with qualified ownership in a function that is not qualified"); } + void checkUncheckedValueCastInst(UncheckedValueCastInst *) { + require( + F.hasOwnership(), + "Inst with qualified ownership in a function that is not qualified"); + } + template void checkAccessEnforcement(AI *AccessInst) { if (AccessInst->getModule().getStage() != SILStage::Raw) { diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index 5829066d560c6..255bdeb4bafa9 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -1340,8 +1340,8 @@ class SILGenApply : public Lowering::ExprVisitor { && loweredResultTy.getASTType()->hasDynamicSelfType()) { assert(selfMetaTy); selfValue = SGF.emitManagedRValueWithCleanup( - SGF.B.createUncheckedBitCast(loc, selfValue.forward(SGF), - loweredResultTy)); + SGF.B.createUncheckedReinterpretCast(loc, selfValue.forward(SGF), + loweredResultTy)); } else { selfValue = SGF.emitManagedRValueWithCleanup( SGF.B.createUpcast(loc, selfValue.forward(SGF), loweredResultTy)); diff --git a/lib/SILGen/SILGenBuilder.cpp b/lib/SILGen/SILGenBuilder.cpp index bf0e64a90cc79..b5ab0668810a6 100644 --- a/lib/SILGen/SILGenBuilder.cpp +++ b/lib/SILGen/SILGenBuilder.cpp @@ -610,7 +610,7 @@ ManagedValue SILGenBuilder::createUncheckedBitCast(SILLocation loc, ManagedValue value, SILType type) { CleanupCloner cloner(*this, value); - SILValue cast = createUncheckedBitCast(loc, value.getValue(), type); + SILValue cast = createUncheckedReinterpretCast(loc, value.getValue(), type); // Currently createUncheckedBitCast only produces these // instructions. We assert here to make sure if this changes, this code is diff --git a/lib/SILGen/SILGenBuilder.h b/lib/SILGen/SILGenBuilder.h index b5b6c3567d213..1267b37d9e8af 100644 --- a/lib/SILGen/SILGenBuilder.h +++ b/lib/SILGen/SILGenBuilder.h @@ -279,7 +279,7 @@ class SILGenBuilder : public SILBuilder { ManagedValue createUncheckedAddrCast(SILLocation loc, ManagedValue op, SILType resultTy); - using SILBuilder::createUncheckedBitCast; + using SILBuilder::createUncheckedReinterpretCast; ManagedValue createUncheckedBitCast(SILLocation loc, ManagedValue original, SILType type); diff --git a/lib/SILGen/SILGenExpr.cpp b/lib/SILGen/SILGenExpr.cpp index dfd28c4511918..1523b5fc9b737 100644 --- a/lib/SILGen/SILGenExpr.cpp +++ b/lib/SILGen/SILGenExpr.cpp @@ -2066,8 +2066,8 @@ RValue RValueEmitter::visitUnderlyingToOpaqueExpr(UnderlyingToOpaqueExpr *E, if (value.getType() == opaqueTL.getLoweredType()) return RValue(SGF, E, value); - auto cast = SGF.B.createUncheckedBitCast(E, value.forward(SGF), - opaqueTL.getLoweredType()); + auto cast = SGF.B.createUncheckedReinterpretCast(E, value.forward(SGF), + opaqueTL.getLoweredType()); value = SGF.emitManagedRValueWithCleanup(cast); return RValue(SGF, E, value); diff --git a/lib/SILOptimizer/IPO/EagerSpecializer.cpp b/lib/SILOptimizer/IPO/EagerSpecializer.cpp index 7f47b83c5fcd9..f647a161b1164 100644 --- a/lib/SILOptimizer/IPO/EagerSpecializer.cpp +++ b/lib/SILOptimizer/IPO/EagerSpecializer.cpp @@ -36,8 +36,9 @@ #include "swift/AST/Type.h" #include "swift/SIL/SILFunction.h" #include "swift/SILOptimizer/PassManager/Transforms.h" -#include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" +#include "swift/SILOptimizer/Utils/CFGOptUtils.h" #include "swift/SILOptimizer/Utils/Generics.h" +#include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" #include "llvm/Support/Debug.h" using namespace swift; @@ -47,6 +48,16 @@ llvm::cl::opt EagerSpecializeFlag( "enable-eager-specializer", llvm::cl::init(true), llvm::cl::desc("Run the eager-specializer pass.")); +static void +cleanupCallArguments(SILBuilder &builder, SILLocation loc, + ArrayRef values, + ArrayRef valueIndicesThatNeedEndBorrow) { + for (int index : valueIndicesThatNeedEndBorrow) { + auto *lbi = cast(values[index]); + builder.createEndBorrow(loc, lbi); + } +} + /// Returns true if the given return or throw block can be used as a merge point /// for new return or error values. static bool isTrivialReturnBlock(SILBasicBlock *RetBB) { @@ -127,12 +138,16 @@ static void addReturnValueImpl(SILBasicBlock *RetBB, SILBasicBlock *NewRetBB, Builder.createBranch(Loc, MergedBB, {OldRetVal}); } } + // Create a CFG edge from NewRetBB to MergedBB. Builder.setInsertionPoint(NewRetBB); SmallVector BBArgs; if (!NewRetVal->getType().isVoid()) BBArgs.push_back(NewRetVal); Builder.createBranch(Loc, MergedBB, BBArgs); + + // Then split any critical edges we created to the merged block. + splitCriticalEdgesTo(MergedBB); } /// Adds a CFG edge from the unterminated NewRetBB to a merged "return" block. @@ -154,14 +169,10 @@ static void addThrowValue(SILBasicBlock *NewThrowBB, SILValue NewErrorVal) { /// /// TODO: Move this to Utils. static SILValue -emitApplyWithRethrow(SILBuilder &Builder, - SILLocation Loc, - SILValue FuncRef, - CanSILFunctionType CanSILFuncTy, - SubstitutionMap Subs, +emitApplyWithRethrow(SILBuilder &Builder, SILLocation Loc, SILValue FuncRef, + CanSILFunctionType CanSILFuncTy, SubstitutionMap Subs, ArrayRef CallArgs, - void (*EmitCleanup)(SILBuilder&, SILLocation)) { - + ArrayRef CallArgIndicesThatNeedEndBorrow) { auto &F = Builder.getFunction(); SILFunctionConventions fnConv(CanSILFuncTy, Builder.getModule()); @@ -176,30 +187,31 @@ emitApplyWithRethrow(SILBuilder &Builder, SILValue Error = ErrorBB->createPhiArgument( fnConv.getSILErrorType(F.getTypeExpansionContext()), ValueOwnershipKind::Owned); - - EmitCleanup(Builder, Loc); + cleanupCallArguments(Builder, Loc, CallArgs, + CallArgIndicesThatNeedEndBorrow); addThrowValue(ErrorBB, Error); } + // Advance Builder to the fall-thru path and return a SILArgument holding the // result value. Builder.clearInsertionPoint(); Builder.emitBlock(NormalBB); - return Builder.getInsertionBB()->createPhiArgument( + SILValue finalArgument = Builder.getInsertionBB()->createPhiArgument( fnConv.getSILResultType(F.getTypeExpansionContext()), ValueOwnershipKind::Owned); + cleanupCallArguments(Builder, Loc, CallArgs, CallArgIndicesThatNeedEndBorrow); + return finalArgument; } /// Emits code to invoke the specified specialized CalleeFunc using the /// provided SILBuilder. /// /// TODO: Move this to Utils. -static SILValue -emitInvocation(SILBuilder &Builder, - const ReabstractionInfo &ReInfo, - SILLocation Loc, - SILFunction *CalleeFunc, - ArrayRef CallArgs, - void (*EmitCleanup)(SILBuilder&, SILLocation)) { +static SILValue emitInvocation(SILBuilder &Builder, + const ReabstractionInfo &ReInfo, SILLocation Loc, + SILFunction *CalleeFunc, + ArrayRef CallArgs, + ArrayRef ArgsNeedEndBorrow) { auto *FuncRefInst = Builder.createFunctionRef(Loc, CalleeFunc); auto CanSILFuncTy = CalleeFunc->getLoweredFunctionType(); @@ -256,14 +268,15 @@ emitInvocation(SILBuilder &Builder, // or de-facto? if (!CanSILFuncTy->hasErrorResult() || CalleeFunc->findThrowBB() == CalleeFunc->end()) { - return Builder.createApply(CalleeFunc->getLocation(), FuncRefInst, - Subs, CallArgs, isNonThrowing); + auto *AI = Builder.createApply(CalleeFunc->getLocation(), FuncRefInst, Subs, + CallArgs, isNonThrowing); + cleanupCallArguments(Builder, Loc, CallArgs, ArgsNeedEndBorrow); + return AI; } - return emitApplyWithRethrow(Builder, CalleeFunc->getLocation(), - FuncRefInst, CalleeSubstFnTy, Subs, - CallArgs, - EmitCleanup); + return emitApplyWithRethrow(Builder, CalleeFunc->getLocation(), FuncRefInst, + CalleeSubstFnTy, Subs, CallArgs, + ArgsNeedEndBorrow); } /// Returns the thick metatype for the given SILType. @@ -323,8 +336,11 @@ class EagerDispatch { SILValue emitArgumentCast(CanSILFunctionType CalleeSubstFnTy, SILFunctionArgument *OrigArg, unsigned Idx); - SILValue emitArgumentConversion(SmallVectorImpl &CallArgs); + SILValue + emitArgumentConversion(SmallVectorImpl &CallArgs, + SmallVectorImpl &ArgAtIndexNeedsEndBorrow); }; + } // end anonymous namespace /// Inserts type checks in the original generic function for dispatching to the @@ -335,6 +351,7 @@ void EagerDispatch::emitDispatchTo(SILFunction *NewFunc) { auto ReturnBB = GenericFunc->findReturnBB(); if (ReturnBB != GenericFunc->end()) OldReturnBB = &*ReturnBB; + // 1. Emit a cascading sequence of type checks blocks. // First split the entry BB, moving all instructions to the FailedTypeCheckBB. @@ -384,23 +401,24 @@ void EagerDispatch::emitDispatchTo(SILFunction *NewFunc) { // 2. Convert call arguments, casting and adjusting for calling convention. SmallVector CallArgs; - SILValue StoreResultTo = emitArgumentConversion(CallArgs); + SmallVector ArgAtIndexNeedsEndBorrow; + SILValue StoreResultTo = + emitArgumentConversion(CallArgs, ArgAtIndexNeedsEndBorrow); // 3. Emit an invocation of the specialized function. // Emit any rethrow with no cleanup since all args have been forwarded and // nothing has been locally allocated or copied. - auto NoCleanup = [](SILBuilder&, SILLocation){}; - SILValue Result = - emitInvocation(Builder, ReInfo, Loc, NewFunc, CallArgs, NoCleanup); + SILValue Result = emitInvocation(Builder, ReInfo, Loc, NewFunc, CallArgs, + ArgAtIndexNeedsEndBorrow); // 4. Handle the return value. auto VoidTy = Builder.getModule().Types.getEmptyTupleType(); if (StoreResultTo) { // Store the direct result to the original result address. - Builder.createStore(Loc, Result, StoreResultTo, - StoreOwnershipQualifier::Unqualified); + Builder.emitStoreValueOperation(Loc, Result, StoreResultTo, + StoreOwnershipQualifier::Init); // And return Void. Result = Builder.createTuple(Loc, VoidTy, { }); } @@ -416,7 +434,10 @@ void EagerDispatch::emitDispatchTo(SILFunction *NewFunc) { auto resultTy = GenericFunc->getConventions().getSILResultType( Builder.getTypeExpansionContext()); auto GenResultTy = GenericFunc->mapTypeIntoContext(resultTy); - auto CastResult = Builder.createUncheckedBitCast(Loc, Result, GenResultTy); + + SILValue CastResult = + Builder.createUncheckedBitCast(Loc, Result, GenResultTy); + addReturnValue(Builder.getInsertionBB(), OldReturnBB, CastResult); } } @@ -620,8 +641,9 @@ SILValue EagerDispatch::emitArgumentCast(CanSILFunctionType CalleeSubstFnTy, /// /// Returns the SILValue to store the result into if the specialized function /// has a direct result. -SILValue EagerDispatch:: -emitArgumentConversion(SmallVectorImpl &CallArgs) { +SILValue EagerDispatch::emitArgumentConversion( + SmallVectorImpl &CallArgs, + SmallVectorImpl &ArgAtIndexNeedsEndBorrow) { auto OrigArgs = GenericFunc->begin()->getSILFunctionArguments(); assert(OrigArgs.size() == substConv.getNumSILArguments() && "signature mismatch"); @@ -661,6 +683,7 @@ emitArgumentConversion(SmallVectorImpl &CallArgs) { CallArgs.push_back(CastArg); continue; } + if (ArgIdx < substConv.getSILArgIndexOfFirstParam()) { // Handle result arguments. unsigned formalIdx = @@ -672,29 +695,43 @@ emitArgumentConversion(SmallVectorImpl &CallArgs) { StoreResultTo = CastArg; continue; } + CallArgs.push_back(CastArg); + continue; + } + + // Handle arguments for formal parameters. + unsigned paramIdx = ArgIdx - substConv.getSILArgIndexOfFirstParam(); + if (!ReInfo.isParamConverted(paramIdx)) { + CallArgs.push_back(CastArg); + continue; + } + + // An argument is converted from indirect to direct. Instead of the + // address we pass the loaded value. + // + // FIXME: If type of CastArg is an archetype, but it is loadable because + // of a layout constraint on the caller side, we have a problem here + // We need to load the value on the caller side, but this archetype is + // not statically known to be loadable on the caller side (though we + // have proven dynamically that it has a fixed size). + // + // We can try to load it as an int value of width N, but then it is not + // clear how to convert it into a value of the archetype type, which is + // expected. May be we should pass it as @in parameter and make it + // loadable on the caller's side? + auto argConv = substConv.getSILArgumentConvention(ArgIdx); + SILValue Val; + if (!argConv.isGuaranteedConvention()) { + Val = Builder.emitLoadValueOperation(Loc, CastArg, + LoadOwnershipQualifier::Take); } else { - // Handle arguments for formal parameters. - unsigned paramIdx = ArgIdx - substConv.getSILArgIndexOfFirstParam(); - if (ReInfo.isParamConverted(paramIdx)) { - // An argument is converted from indirect to direct. Instead of the - // address we pass the loaded value. - // FIXME: If type of CastArg is an archetype, but it is loadable because - // of a layout constraint on the caller side, we have a problem here - // We need to load the value on the caller side, but this archetype is - // not statically known to be loadable on the caller side (though we - // have proven dynamically that it has a fixed size). - // We can try to load it as an int value of width N, but then it is not - // clear how to convert it into a value of the archetype type, which is - // expected. May be we should pass it as @in parameter and make it - // loadable on the caller's side? - SILValue Val = Builder.createLoad(Loc, CastArg, - LoadOwnershipQualifier::Unqualified); - CallArgs.push_back(Val); - continue; - } + Val = Builder.emitLoadBorrowOperation(Loc, CastArg); + if (Val.getOwnershipKind() == ValueOwnershipKind::Guaranteed) + ArgAtIndexNeedsEndBorrow.push_back(CallArgs.size()); } - CallArgs.push_back(CastArg); + CallArgs.push_back(Val); } + return StoreResultTo; } @@ -745,11 +782,9 @@ void EagerSpecializerTransform::run() { // Process functions in any order. for (auto &F : *getModule()) { - // TODO: we should support ownership here but first we'll have to support - // ownership in GenericFuncSpecializer. - if (!F.shouldOptimize() || F.hasOwnership()) { + if (!F.shouldOptimize()) { LLVM_DEBUG(llvm::dbgs() << " Cannot specialize function " << F.getName() - << " because it has ownership or is marked to be " + << " because it is marked to be " "excluded from optimizations.\n"); continue; } @@ -787,6 +822,7 @@ void EagerSpecializerTransform::run() { [&](const SILSpecializeAttr *SA, SILFunction *NewFunc, const ReabstractionInfo &ReInfo) { if (NewFunc) { + NewFunc->verify(); Changed = true; EagerDispatch(&F, ReInfo).emitDispatchTo(NewFunc); } @@ -800,6 +836,7 @@ void EagerSpecializerTransform::run() { // As specializations are created, the attributes should be removed. F.clearSpecializeAttrs(); + F.verify(); } } diff --git a/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp b/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp index 33d574faddb73..769ee6c2cef75 100644 --- a/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp +++ b/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp @@ -93,6 +93,16 @@ struct OwnershipModelEliminatorVisitor bool visitSwitchEnumInst(SwitchEnumInst *SWI); bool visitDestructureStructInst(DestructureStructInst *DSI); bool visitDestructureTupleInst(DestructureTupleInst *DTI); + + // We lower this to unchecked_bitwise_cast losing our assumption of layout + // compatibility. + bool visitUncheckedValueCastInst(UncheckedValueCastInst *UVCI) { + auto *NewVal = B.createUncheckedBitwiseCast( + UVCI->getLoc(), UVCI->getOperand(), UVCI->getType()); + UVCI->replaceAllUsesWith(NewVal); + UVCI->eraseFromParent(); + return true; + } }; } // end anonymous namespace diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp index e71bba15b5f6c..9aa6581052f3f 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerApplyVisitors.cpp @@ -190,7 +190,8 @@ SILCombiner::optimizeApplyOfConvertFunctionInst(FullApplySite AI, auto UAC = Builder.createUncheckedAddrCast(AI.getLoc(), Op, NewOpType); Args.push_back(UAC); } else if (OldOpType.getASTType() != NewOpType.getASTType()) { - auto URC = Builder.createUncheckedBitCast(AI.getLoc(), Op, NewOpType); + auto URC = + Builder.createUncheckedReinterpretCast(AI.getLoc(), Op, NewOpType); Args.push_back(URC); } else { Args.push_back(Op); @@ -381,8 +382,8 @@ bool SILCombiner::tryOptimizeKeypathOffsetOf(ApplyInst *AI, return false; // Convert the projected address back to an optional integer. - SILValue offset = Builder.createUncheckedBitCast(loc, offsetPtr, - SILType::getPrimitiveObjectType(builtinIntTy)); + SILValue offset = Builder.createUncheckedReinterpretCast( + loc, offsetPtr, SILType::getPrimitiveObjectType(builtinIntTy)); SILValue offsetInt = Builder.createStruct(loc, intType, { offset }); result = Builder.createOptionalSome(loc, offsetInt, AI->getType()); } else { diff --git a/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp b/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp index ecd0e7db862ff..120cab28900ff 100644 --- a/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp +++ b/lib/SILOptimizer/Transforms/SpeculativeDevirtualizer.cpp @@ -562,9 +562,9 @@ static bool tryToSpeculateTarget(FullApplySite AI, ClassHierarchyAnalysis *CHA, if (LastCCBI && SubTypeValue == LastCCBI->getOperand()) { // Remove last checked_cast_br, because it will always succeed. SILBuilderWithScope B(LastCCBI); - auto CastedValue = B.createUncheckedBitCast(LastCCBI->getLoc(), - LastCCBI->getOperand(), - LastCCBI->getTargetLoweredType()); + auto CastedValue = B.createUncheckedReinterpretCast( + LastCCBI->getLoc(), LastCCBI->getOperand(), + LastCCBI->getTargetLoweredType()); B.createBranch(LastCCBI->getLoc(), LastCCBI->getSuccessBB(), {CastedValue}); LastCCBI->eraseFromParent(); ORE.emit(RB); diff --git a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp index 1d17283837cf9..88b831e20f2ba 100644 --- a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp +++ b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp @@ -177,6 +177,7 @@ static bool hasOpaqueArchetype(TypeExpansionContext context, case SILInstructionKind::UncheckedAddrCastInst: case SILInstructionKind::UncheckedTrivialBitCastInst: case SILInstructionKind::UncheckedBitwiseCastInst: + case SILInstructionKind::UncheckedValueCastInst: case SILInstructionKind::RefToRawPointerInst: case SILInstructionKind::RawPointerToRefInst: #define LOADABLE_REF_STORAGE(Name, ...) \ diff --git a/lib/SILOptimizer/Utils/CFGOptUtils.cpp b/lib/SILOptimizer/Utils/CFGOptUtils.cpp index d40cb378b8a45..213ebe5f15cf9 100644 --- a/lib/SILOptimizer/Utils/CFGOptUtils.cpp +++ b/lib/SILOptimizer/Utils/CFGOptUtils.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "swift/SILOptimizer/Utils/CFGOptUtils.h" +#include "swift/Basic/STLExtras.h" #include "swift/Demangling/ManglingMacros.h" #include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/Dominance.h" @@ -551,6 +552,20 @@ bool swift::splitCriticalEdgesFrom(SILBasicBlock *fromBB, return changed; } +bool swift::splitCriticalEdgesTo(SILBasicBlock *toBB, DominanceInfo *domInfo, + SILLoopInfo *loopInfo) { + bool changed = false; + unsigned numPreds = std::distance(toBB->pred_begin(), toBB->pred_end()); + + for (unsigned idx = 0; idx != numPreds; ++idx) { + SILBasicBlock *fromBB = *std::next(toBB->pred_begin(), idx); + auto *newBB = splitIfCriticalEdge(fromBB, toBB); + changed |= (newBB != nullptr); + } + + return changed; +} + bool swift::hasCriticalEdges(SILFunction &f, bool onlyNonCondBr) { for (SILBasicBlock &bb : f) { // Only consider critical edges for terminators that don't support block diff --git a/lib/SILOptimizer/Utils/InstOptUtils.cpp b/lib/SILOptimizer/Utils/InstOptUtils.cpp index 334cb559e142d..11a5bf7e79af4 100644 --- a/lib/SILOptimizer/Utils/InstOptUtils.cpp +++ b/lib/SILOptimizer/Utils/InstOptUtils.cpp @@ -853,7 +853,8 @@ swift::castValueToABICompatibleType(SILBuilder *builder, SILLocation loc, } // Cast between two metatypes and that's it. - return {builder->createUncheckedBitCast(loc, value, destTy), false}; + return {builder->createUncheckedReinterpretCast(loc, value, destTy), + false}; } } } diff --git a/lib/SILOptimizer/Utils/SILInliner.cpp b/lib/SILOptimizer/Utils/SILInliner.cpp index c8fc5e583298e..b08f9832e0237 100644 --- a/lib/SILOptimizer/Utils/SILInliner.cpp +++ b/lib/SILOptimizer/Utils/SILInliner.cpp @@ -714,6 +714,7 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::UncheckedAddrCastInst: case SILInstructionKind::UncheckedTrivialBitCastInst: case SILInstructionKind::UncheckedBitwiseCastInst: + case SILInstructionKind::UncheckedValueCastInst: case SILInstructionKind::RawPointerToRefInst: case SILInstructionKind::RefToRawPointerInst: diff --git a/lib/Serialization/DeserializeSIL.cpp b/lib/Serialization/DeserializeSIL.cpp index 2ed57c9f0794b..86565d2ee9314 100644 --- a/lib/Serialization/DeserializeSIL.cpp +++ b/lib/Serialization/DeserializeSIL.cpp @@ -1263,6 +1263,7 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, ONEOPERAND_ONETYPE_INST(UncheckedAddrCast) ONEOPERAND_ONETYPE_INST(UncheckedTrivialBitCast) ONEOPERAND_ONETYPE_INST(UncheckedBitwiseCast) + ONEOPERAND_ONETYPE_INST(UncheckedValueCast) ONEOPERAND_ONETYPE_INST(BridgeObjectToRef) ONEOPERAND_ONETYPE_INST(BridgeObjectToWord) ONEOPERAND_ONETYPE_INST(Upcast) diff --git a/lib/Serialization/ModuleFormat.h b/lib/Serialization/ModuleFormat.h index 88c11081e8905..9bf7c6b8e4647 100644 --- a/lib/Serialization/ModuleFormat.h +++ b/lib/Serialization/ModuleFormat.h @@ -55,7 +55,7 @@ const uint16_t SWIFTMODULE_VERSION_MAJOR = 0; /// describe what change you made. The content of this comment isn't important; /// it just ensures a conflict if two people change the module format. /// Don't worry about adhering to the 80-column limit for this line. -const uint16_t SWIFTMODULE_VERSION_MINOR = 562; // base_addr_for_offset instruction +const uint16_t SWIFTMODULE_VERSION_MINOR = 563; // unchecked_value_cast /// A standard hash seed used for all string hashes in a serialized module. /// diff --git a/lib/Serialization/SerializeSIL.cpp b/lib/Serialization/SerializeSIL.cpp index af20a44e2c83a..285db801310ff 100644 --- a/lib/Serialization/SerializeSIL.cpp +++ b/lib/Serialization/SerializeSIL.cpp @@ -1557,6 +1557,7 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { case SILInstructionKind::UncheckedAddrCastInst: case SILInstructionKind::UncheckedTrivialBitCastInst: case SILInstructionKind::UncheckedBitwiseCastInst: + case SILInstructionKind::UncheckedValueCastInst: case SILInstructionKind::BridgeObjectToRefInst: case SILInstructionKind::BridgeObjectToWordInst: case SILInstructionKind::UpcastInst: diff --git a/test/SIL/ownership-verifier/use_verifier.sil b/test/SIL/ownership-verifier/use_verifier.sil index 25e9865360868..e0ea189cea663 100644 --- a/test/SIL/ownership-verifier/use_verifier.sil +++ b/test/SIL/ownership-verifier/use_verifier.sil @@ -1307,3 +1307,24 @@ bb0(%0 : @guaranteed $ClassProtConformingRef, %1 : @owned $ClassProtConformingRe %5 = tuple(%3 : $ClassProt, %4 : $ClassProt) return %5 : $(ClassProt, ClassProt) } + +// Make sure that a generic struct that has an AnyObject constraint can be cased +// successfully. + +struct GenericS { + var t1: T + var t2: T +} + +sil [ossa] @unchecked_value_cast_forwarding : $@convention(thin) (@owned GenericS) -> @owned GenericS { +bb0(%0 : @owned $GenericS): + %1 = unchecked_value_cast %0 : $GenericS to $GenericS + return %1 : $GenericS +} + +sil [ossa] @unchecked_value_cast_forwarding_2 : $@convention(thin) (@guaranteed GenericS) -> @owned GenericS { +bb0(%0 : @guaranteed $GenericS): + %1 = unchecked_value_cast %0 : $GenericS to $GenericS + %2 = copy_value %1 : $GenericS + return %2 : $GenericS +} diff --git a/test/SILGen/specialize_attr.swift b/test/SILGen/specialize_attr.swift index b1816042ed18a..33185d824ba9c 100644 --- a/test/SILGen/specialize_attr.swift +++ b/test/SILGen/specialize_attr.swift @@ -1,10 +1,19 @@ - // RUN: %target-swift-emit-silgen -module-name specialize_attr -emit-verbose-sil %s | %FileCheck %s +// RUN: %target-swift-emit-sil -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT %s -// CHECK-LABEL: @_specialize(exported: false, kind: full, where T == Int, U == Float) +// CHECK: @_specialize(exported: false, kind: full, where T == Int, U == Float) +// CHECK-NEXT: @_specialize(exported: false, kind: full, where T == Klass1, U == FakeString) // CHECK-NEXT: func specializeThis(_ t: T, u: U) @_specialize(where T == Int, U == Float) -func specializeThis(_ t: T, u: U) {} +@_specialize(where T == Klass1, U == FakeString) +public func specializeThis(_ t: T, u: U) {} + +// CHECK: @_specialize(exported: false, kind: full, where T == Klass1, U == FakeString) +// CHECK-NEXT: func specializeThis2(_ t: __owned T, u: __owned U) -> (T, U) +@_specialize(where T == Klass1, U == FakeString) +public func specializeThis2(_ t: __owned T, u: __owned U) -> (T, U) { + (t, u) +} public protocol PP { associatedtype PElt @@ -20,22 +29,88 @@ public struct SS : QQ { public typealias QElt = Int } -public struct GG {} +public class Klass1 {} +public class FakeStringData {} +public struct FakeString { + var f: FakeStringData? = nil +} + +public struct RRNonTrivial : PP { + public typealias PElt = Klass1 + var elt: Klass1? = nil +} +public struct SSNonTrivial : QQ { + public typealias QElt = FakeString + var elt: FakeString? = nil +} + +public struct GG { + var t: T +} // CHECK-LABEL: public class CC where T : PP { -// CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RR, U == SS) -// CHECK-NEXT: @inline(never) public func foo(_ u: U, g: GG) -> (U, GG) where U : QQ public class CC { + // CHECK-NEXT: var k: Klass1? + // To make non-trivial + var k: Klass1? = nil + + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RR, U == SS) + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RRNonTrivial, U == SSNonTrivial) + // CHECK-NEXT: @inline(never) public func foo(_ u: U, g: GG) -> (U, GG) where U : QQ @inline(never) @_specialize(where T == RR, U == SS) + @_specialize(where T == RRNonTrivial, U == SSNonTrivial) public func foo(_ u: U, g: GG) -> (U, GG) { return (u, g) } + + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RR, U == SS) + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RRNonTrivial, U == SSNonTrivial) + // CHECK-NEXT: @inline(never) public func foo2(_ u: __owned U, g: __owned GG) -> (U, GG) where U : QQ + @inline(never) + @_specialize(where T == RR, U == SS) + @_specialize(where T == RRNonTrivial, U == SSNonTrivial) + public func foo2(_ u: __owned U, g: __owned GG) -> (U, GG) { + return (u, g) + } } -// CHECK-LABEL: sil hidden [_specialize exported: false, kind: full, where T == Int, U == Float] [ossa] @$s15specialize_attr0A4This_1uyx_q_tr0_lF : $@convention(thin) (@in_guaranteed T, @in_guaranteed U) -> () { +// CHECK-LABEL: public struct CC2 where T : PP { +public struct CC2 { + // CHECK-NEXT: var k: Klass1? + // To make non-trivial + var k: Klass1? = nil -// CHECK-LABEL: sil [noinline] [_specialize exported: false, kind: full, where T == RR, U == SS] [ossa] @$s15specialize_attr2CCC3foo_1gqd___AA2GGVyxGtqd___AHtAA2QQRd__lF : $@convention(method) (@in_guaranteed U, GG, @guaranteed CC) -> (@out U, GG) { + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RR, U == SS) + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RRNonTrivial, U == SSNonTrivial) + // CHECK-NEXT: @inline(never) public mutating func foo(_ u: U, g: GG) -> (U, GG) where U : QQ + @inline(never) + @_specialize(where T == RR, U == SS) + @_specialize(where T == RRNonTrivial, U == SSNonTrivial) + mutating public func foo(_ u: U, g: GG) -> (U, GG) { + return (u, g) + } + + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RR, U == SS) + // CHECK-NEXT: @_specialize(exported: false, kind: full, where T == RRNonTrivial, U == SSNonTrivial) + // CHECK-NEXT: @inline(never) public mutating func foo2(_ u: __owned U, g: __owned GG) -> (U, GG) where U : QQ + @inline(never) + @_specialize(where T == RR, U == SS) + @_specialize(where T == RRNonTrivial, U == SSNonTrivial) + mutating public func foo2(_ u: __owned U, g: __owned GG) -> (U, GG) { + return (u, g) + } +} + +// CHECK-LABEL: sil [_specialize exported: false, kind: full, where T == Klass1, U == FakeString] [_specialize exported: false, kind: full, where T == Int, U == Float] [ossa] @$s15specialize_attr0A4This_1uyx_q_tr0_lF : $@convention(thin) (@in_guaranteed T, @in_guaranteed U) -> () { + +// CHECK-OPT-LABEL: sil shared [noinline] @$s15specialize_attr2CCC3foo_1gqd___AA2GGVyxGtqd___AHtAA2QQRd__lFAA12RRNonTrivialV_AA05SSNonH0VTg5 : $@convention(method) (@guaranteed SSNonTrivial, @guaranteed GG, @guaranteed CC) -> (@owned SSNonTrivial, @out GG) { + +// CHECK-OPT-LABEL: sil shared [noinline] @$s15specialize_attr2CCC4foo2_1gqd___AA2GGVyxGtqd__n_AHntAA2QQRd__lFAA2RRV_AA2SSVTg5 : $@convention(method) (SS, GG, @guaranteed CC) -> (SS, @out GG) { + +// CHECK-OPT-LABEL: sil [noinline] @$s15specialize_attr2CCC4foo2_1gqd___AA2GGVyxGtqd__n_AHntAA2QQRd__lF : $@convention(method) (@in U, @in GG, @guaranteed CC) -> (@out U, @out GG) { + +// CHECK-LABEL: sil [noinline] [_specialize exported: false, kind: full, where T == RRNonTrivial, U == SSNonTrivial] [_specialize exported: false, kind: full, where T == RR, U == SS] [ossa] @$s15specialize_attr2CCC3foo_1gqd___AA2GGVyxGtqd___AHtAA2QQRd__lF : $@convention(method) (@in_guaranteed U, @in_guaranteed GG, @guaranteed CC) -> (@out U, @out GG) { // ----------------------------------------------------------------------------- // Test user-specialized subscript accessors. @@ -54,10 +129,13 @@ public class ASubscriptable : TestSubscriptable { public subscript(i: Int) -> Element { @_specialize(where Element == Int) + @_specialize(where Element == Klass1) get { return storage[i] } + @_specialize(where Element == Int) + @_specialize(where Element == Klass1) set(rhs) { storage[i] = rhs } @@ -65,10 +143,10 @@ public class ASubscriptable : TestSubscriptable { } // ASubscriptable.subscript.getter with _specialize -// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr14ASubscriptableCyxSicig : $@convention(method) (Int, @guaranteed ASubscriptable) -> @out Element { +// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Klass1] [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr14ASubscriptableCyxSicig : $@convention(method) (Int, @guaranteed ASubscriptable) -> @out Element { // ASubscriptable.subscript.setter with _specialize -// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr14ASubscriptableCyxSicis : $@convention(method) (@in Element, Int, @guaranteed ASubscriptable) -> () { +// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Klass1] [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr14ASubscriptableCyxSicis : $@convention(method) (@in Element, Int, @guaranteed ASubscriptable) -> () { // ASubscriptable.subscript.modify with no attribute // CHECK-LABEL: sil [transparent] [serialized] [ossa] @$s15specialize_attr14ASubscriptableCyxSiciM : $@yield_once @convention(method) (Int, @guaranteed ASubscriptable) -> @yields @inout Element { @@ -82,10 +160,13 @@ public class Addressable : TestSubscriptable { public subscript(i: Int) -> Element { @_specialize(where Element == Int) + @_specialize(where Element == Klass1) unsafeAddress { return UnsafePointer(storage + i) } + @_specialize(where Element == Int) + @_specialize(where Element == Klass1) unsafeMutableAddress { return UnsafeMutablePointer(storage + i) } @@ -93,10 +174,10 @@ public class Addressable : TestSubscriptable { } // Addressable.subscript.unsafeAddressor with _specialize -// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr11AddressableCyxSicilu : $@convention(method) (Int, @guaranteed Addressable) -> UnsafePointer { +// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Klass1] [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr11AddressableCyxSicilu : $@convention(method) (Int, @guaranteed Addressable) -> UnsafePointer { // Addressable.subscript.unsafeMutableAddressor with _specialize -// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr11AddressableCyxSiciau : $@convention(method) (Int, @guaranteed Addressable) -> UnsafeMutablePointer { +// CHECK-LABEL: sil [_specialize exported: false, kind: full, where Element == Klass1] [_specialize exported: false, kind: full, where Element == Int] [ossa] @$s15specialize_attr11AddressableCyxSiciau : $@convention(method) (Int, @guaranteed Addressable) -> UnsafeMutablePointer { // Addressable.subscript.getter with no attribute // CHECK-LABEL: sil [transparent] [serialized] [ossa] @$s15specialize_attr11AddressableCyxSicig : $@convention(method) (Int, @guaranteed Addressable) -> @out Element { diff --git a/test/SILOptimizer/eager_specialize_ossa.sil b/test/SILOptimizer/eager_specialize_ossa.sil new file mode 100644 index 0000000000000..7a6bd9a5f97ba --- /dev/null +++ b/test/SILOptimizer/eager_specialize_ossa.sil @@ -0,0 +1,1062 @@ +// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s -o %t.sil && %target-swift-frontend -module-name=eager_specialize -emit-ir %t.sil | %FileCheck --check-prefix=CHECK-IRGEN --check-prefix=CHECK-IRGEN-%target-cpu %s +// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer -sil-inline-generics=true -inline %s | %FileCheck --check-prefix=CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE %s + +sil_stage canonical + +import Builtin +import Swift +import SwiftShims + +public protocol AnElt { +} + +public protocol HasElt { + associatedtype Elt + init(e: Elt) +} + +struct X : AnElt { + @_hasStorage var i: Int { get set } + init(i: Int) +} + +struct S : HasElt { + typealias Elt = X + @_hasStorage var e: X { get set } + init(e: Elt) +} + +class Klass {} + +class EltTrivialKlass : AnElt { + var i: Int { get set } + init(i: Int) {} +} + +class ContainerKlass : HasElt { + typealias Elt = EltTrivialKlass + var e: Elt { get set } + required init(e: Elt) +} + +public struct G { + var container: Container? = nil + public func getContainer(e: Container.Elt) -> Container + init() +} + +public struct GTrivial { + public func getContainer(e: Container.Elt) -> Container + init() +} + +// CHECK: @_specialize(exported: false, kind: full, where T == S) +// CHECK: public func getGenericContainer(g: G, e: T.Elt) -> T where T : HasElt, T.Elt : AnElt +@_specialize(where T == S) +public func getGenericContainer(g: G, e: T.Elt) -> T where T.Elt : AnElt + +// CHECK: @_specialize(exported: false, kind: full, where T == S) +// CHECK: @_specialize(exported: false, kind: full, where T == ContainerKlass) +// CHECK: public func getGenericContainerTrivial(g: GTrivial, e: T.Elt) -> T where T : HasElt, T.Elt : AnElt +@_specialize(where T == S) +@_specialize(where T == ContainerKlass) +public func getGenericContainerTrivial(g: GTrivial, e: T.Elt) -> T where T.Elt : AnElt + +// CHECK: @_specialize(exported: false, kind: full, where T == ContainerKlass) +// CHECK: public func getGenericContainer2_owned(g: G, e: T.Elt) -> T where T : HasElt, T.Elt : AnElt +@_specialize(where T == ContainerKlass) +public func getGenericContainer2_owned(g: G, e: T.Elt) -> T where T.Elt : AnElt + +@_specialize(where T == ContainerKlass) +public func getGenericContainer2_guaranteed(g: G, e: T.Elt) -> T where T.Elt : AnElt + +enum ArithmeticError : Error { + case DivByZero + func hash(into hasher: inout Hasher) + var _code: Int { get } +} + +// CHECK: @_specialize(exported: false, kind: full, where T == Int) +// CHECK: public func divideNum(num: T, den: T) throws -> T where T : SignedInteger, T : _ExpressibleByBuiltinIntegerLiteral +@_specialize(where T == Int) +public func divideNum(num: T, den: T) throws -> T + +@inline(never) @_optimize(none) func foo(t: T) -> Int64 + +// CHECK: @_specialize(exported: false, kind: full, where T == Int64) +// CHECK: @_specialize(exported: false, kind: full, where T == Float) +// CHECK: @_specialize(exported: false, kind: full, where T == Klass) +// CHECK: public func voidReturn(t: T) +@_specialize(where T == Int64) +@_specialize(where T == Float) +@_specialize(where T == Klass) +public func voidReturn(t: T) + +// CHECK: @_specialize(exported: false, kind: full, where T == Int64) +// CHECK: @_specialize(exported: false, kind: full, where T == Float) +// CHECK: @_specialize(exported: false, kind: full, where T == Klass) +// CHECK: public func nonvoidReturn(t: T) -> Int64 +@_specialize(where T == Int64) +@_specialize(where T == Float) +@_specialize(where T == Klass) +public func nonvoidReturn(t: T) -> Int64 + +// --- test: protocol conformance, substitution for dependent types +// non-layout dependent generic arguments, emitUncheckedBitCast (non +// address-type) + +// Helper +// +// G.getContainer(A.Elt) -> A +sil [ossa] @$s16eager_specialize1GV12getContaineryx3EltQzF : $@convention(method) (@in Container.Elt, @in_guaranteed G) -> @out Container { +bb0(%0 : $*Container, %1 : $*Container.Elt, %2 : $*G): + %4 = witness_method $Container, #HasElt.init!allocator : $@convention(witness_method: HasElt) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @thick τ_0_0.Type) -> @out τ_0_0 + %5 = metatype $@thick Container.Type + %6 = apply %4(%0, %1, %5) : $@convention(witness_method: HasElt) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @thick τ_0_0.Type) -> @out τ_0_0 + %7 = tuple () + return %7 : $() +} + +// getGenericContainer (G, e : A.Elt) -> A +sil [_specialize where T == S] [ossa] @$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in T.Elt) -> @out T { +bb0(%0 : $*T, %1 : $*G, %2 : $*T.Elt): + // function_ref G.getContainer(A.Elt) -> A + %5 = function_ref @$s16eager_specialize1GV12getContaineryx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + %6 = apply %5(%0, %2, %1) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + %7 = tuple () + return %7 : $() +} + +// Specialization getGenericContainer +// +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5 : $@convention(thin) (G, X) -> S { +// CHECK: bb0(%0 : $G, %1 : $X): +// CHECK: return %{{.*}} : $S +// CHECK: } // end sil function '$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5' + +// Generic with specialized dispatch. No more [specialize] attribute. +// +// CHECK-LABEL: sil [ossa] @$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in T.Elt) -> @out T { +// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : $*G, [[ARG2:%.*]] : $*T.Elt): +// CHECK: [[META_LHS:%.*]] = metatype $@thick T.Type +// CHECK: [[META_RHS:%.*]] = metatype $@thick S.Type +// CHECK: [[META_LHS_WORD:%.*]] = unchecked_bitwise_cast [[META_LHS]] : $@thick T.Type to $Builtin.Word +// CHECK: [[META_RHS_WORD:%.*]] = unchecked_bitwise_cast [[META_RHS]] : $@thick S.Type to $Builtin.Word +// CHECK: [[CMP:%.*]] = builtin "cmp_eq_Word"([[META_LHS_WORD]] : $Builtin.Word, [[META_RHS_WORD]] : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br [[CMP]], bb3, bb1 + +// CHECK: bb1: +// CHECK: [[ORIGINAL_FN:%.*]] = function_ref @$s16eager_specialize1GV12getContaineryx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 +// CHECK: %10 = apply [[ORIGINAL_FN]]([[ARG0]], [[ARG2]], [[ARG1]]) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 +// CHECK: br bb2 + +// CHECK: bb2: +// CHECK: tuple () +// CHECK: return {{%.*}} : $() + +// CHECK: bb3: +// CHECK: [[ARG0_CAST:%.*]] = unchecked_addr_cast [[ARG0]] : $*T to $*S +// CHECK: [[ARG1_CAST:%.*]] = unchecked_addr_cast [[ARG1]] : $*G to $*G +// CHECK: [[ARG1_VAL:%.*]] = load [trivial] [[ARG1_CAST]] +// CHECK: [[ARG2_CAST:%.*]] = unchecked_addr_cast [[ARG2]] : $*T.Elt to $*X +// CHECK: [[ARG2_VAL:%.*]] = load [trivial] [[ARG2_CAST]] : $*X + // function_ref specialized getGenericContainer (G, e : A.Elt) -> A +// CHECK: [[SPEC_FUNC:%.*]] = function_ref @$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5 : $@convention(thin) (G, X) -> S +// CHECK: [[RESULT:%.*]] = apply [[SPEC_FUNC]]([[ARG1_VAL]], [[ARG2_VAL]]) : $@convention(thin) (G, X) -> S +// CHECK: store [[RESULT]] to [trivial] [[ARG0_CAST]] : $*S +// CHECK: [[TUP:%.*]] = tuple () +// CHECK: unchecked_trivial_bit_cast [[TUP]] : $() to $() +// CHECK: br bb2 +// CHECK: } // end sil function '$s16eager_specialize19getGenericContainer_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF' + +// Helper +sil [ossa] @$s16eager_specialize1GV19getContainerTrivialyx3EltQzF : $@convention(method) (@in Container.Elt, GTrivial) -> @out Container { +bb0(%0 : $*Container, %1 : $*Container.Elt, %2 : $GTrivial): + %4 = witness_method $Container, #HasElt.init!allocator : $@convention(witness_method: HasElt) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @thick τ_0_0.Type) -> @out τ_0_0 + %5 = metatype $@thick Container.Type + %6 = apply %4(%0, %1, %5) : $@convention(witness_method: HasElt) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @thick τ_0_0.Type) -> @out τ_0_0 + %7 = tuple () + return %7 : $() +} + +sil [_specialize where T == ContainerKlass] [_specialize where T == S] [ossa] @$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (GTrivial, @in T.Elt) -> @out T { +bb0(%0 : $*T, %1 : $GTrivial, %2 : $*T.Elt): + // function_ref G.getContainer(A.Elt) -> A + %5 = function_ref @$s16eager_specialize1GV19getContainerTrivialyx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, GTrivial<τ_0_0>) -> @out τ_0_0 + %6 = apply %5(%0, %2, %1) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, GTrivial<τ_0_0>) -> @out τ_0_0 + %7 = tuple () + return %7 : $() +} + +// Specialization getGenericContainerTrivial +// +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main0E5KlassC_Tg5 : $@convention(thin) (GTrivial, @owned EltTrivialKlass) -> @owned ContainerKlass { +// CHECK: return {{%.*}} : $ContainerKlass +// CHECK: } // end sil function '$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main0E5KlassC_Tg5' +// +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5 : $@convention(thin) (GTrivial, X) -> S { +// CHECK: bb0(%0 : $GTrivial, %1 : $X): +// CHECK: return %{{.*}} : $S +// CHECK: } // end sil function '$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5' + +// Generic with specialized dispatch. No more [specialize] attribute. +// +// CHECK-LABEL: sil [ossa] @$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (GTrivial, @in T.Elt) -> @out T { +// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : $GTrivial, [[ARG2:%.*]] : $*T.Elt): +// CHECK: [[META_LHS:%.*]] = metatype $@thick T.Type +// CHECK: [[META_RHS:%.*]] = metatype $@thick S.Type +// CHECK: [[META_LHS_WORD:%.*]] = unchecked_bitwise_cast [[META_LHS]] : $@thick T.Type to $Builtin.Word +// CHECK: [[META_RHS_WORD:%.*]] = unchecked_bitwise_cast [[META_RHS]] : $@thick S.Type to $Builtin.Word +// CHECK: [[CMP:%.*]] = builtin "cmp_eq_Word"([[META_LHS_WORD]] : $Builtin.Word, [[META_RHS_WORD]] : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br [[CMP]], bb5, bb1 + +// CHECK: bb2: +// CHECK: [[ORIGINAL_FN:%.*]] = function_ref @$s16eager_specialize1GV19getContainerTrivialyx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, GTrivial<τ_0_0>) -> @out τ_0_0 +// CHECK: apply [[ORIGINAL_FN]]([[ARG0]], [[ARG2]], [[ARG1]]) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, GTrivial<τ_0_0>) -> @out τ_0_0 +// CHECK: br bb3 + +// CHECK: bb3: +// CHECK: tuple () +// CHECK: return {{%.*}} : $() + +// CHECK: bb4: +// CHECK: [[ARG0_CAST:%.*]] = unchecked_addr_cast [[ARG0]] : $*T to $*S +// CHECK: [[ARG1_CAST:%.*]] = unchecked_trivial_bit_cast [[ARG1]] : $GTrivial to $GTrivial +// CHECK: [[ARG2_CAST:%.*]] = unchecked_addr_cast [[ARG2]] : $*T.Elt to $*X +// CHECK: [[ARG2_VAL:%.*]] = load [trivial] [[ARG2_CAST]] : $*X + // function_ref specialized getGenericContainer (G, e : A.Elt) -> A +// CHECK: [[SPEC_FUNC:%.*]] = function_ref @$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF4main1SV_Tg5 : $@convention(thin) (GTrivial, X) -> S +// CHECK: [[RESULT:%.*]] = apply [[SPEC_FUNC]]([[ARG1_CAST]], [[ARG2_VAL]]) : $@convention(thin) (GTrivial, X) -> S +// CHECK: store [[RESULT]] to [trivial] [[ARG0_CAST]] : $*S +// CHECK: [[TUP:%.*]] = tuple () +// CHECK: unchecked_trivial_bit_cast [[TUP]] : $() to $() +// CHECK: br bb3 +// CHECK: } // end sil function '$s16eager_specialize26getGenericContainerTrivial_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF' + +// Now we check getGenericContainer_owned. + +sil [_specialize where T == ContainerKlass] [ossa] @$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in T.Elt) -> @out T { +bb0(%0 : $*T, %1 : $*G, %2 : $*T.Elt): + // function_ref G.getContainer(A.Elt) -> A + %5 = function_ref @$s16eager_specialize1GV12getContaineryx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + %6 = apply %5(%0, %2, %1) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA8HasownedRzAA7AnownedAHRQlF4main0E5KlassC_Tg5 : $@convention(thin) (@guaranteed G, @owned EltTrivialKlass) -> @owned ContainerKlass { +// CHECK: } // end sil function '$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA8HasownedRzAA7AnownedAHRQlF4main0E5KlassC_Tg5' + +// CHECK-LABEL: sil [ossa] @$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in T.Elt) -> @out T { +// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : $*G, [[ARG2:%.*]] : $*T.Elt): +// CHECK: bb3: +// CHECK: [[ARG0_CAST:%.*]] = unchecked_addr_cast [[ARG0]] +// CHECK: [[ARG1_CAST:%.*]] = unchecked_addr_cast [[ARG1]] +// CHECK: [[ARG1_LOAD_BORROW:%.*]] = load_borrow [[ARG1_CAST]] +// CHECK: [[ARG2_CAST:%.*]] = unchecked_addr_cast [[ARG2]] +// CHECK: [[ARG2_LOAD:%.*]] = load [take] [[ARG2_CAST]] +// CHECK: [[FUNC:%.*]] = function_ref @$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA8HasownedRzAA7AnownedAHRQlF4main0E5KlassC_Tg5 : $@convention(thin) (@guaranteed G, @owned EltTrivialKlass) -> @owned ContainerKlass +// CHECK: [[RESULT:%.*]] = apply [[FUNC]]([[ARG1_LOAD_BORROW]], [[ARG2_LOAD]]) : $@convention(thin) (@guaranteed G, @owned EltTrivialKlass) -> @owned ContainerKlass // user: %22 +// CHECK: store [[RESULT]] to [init] [[ARG0_CAST]] +// CHECK: } // end sil function '$s16eager_specialize25getGenericContainer_owned_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF' + +sil [_specialize where T == ContainerKlass] [ossa] @$s16eager_specialize30getGenericContainer_guaranteed_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in_guaranteed T.Elt) -> @out T { +bb0(%0 : $*T, %1 : $*G, %2 : $*T.Elt): + %a = alloc_stack $*T.Elt + copy_addr %2 to [initialization] %a : $*T.Elt + // function_ref G.getContainer(A.Elt) -> A + %5 = function_ref @$s16eager_specialize1GV12getContaineryx3EltQzF : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + %6 = apply %5(%0, %a, %1) : $@convention(method) <τ_0_0 where τ_0_0 : HasElt> (@in τ_0_0.Elt, @in_guaranteed G<τ_0_0>) -> @out τ_0_0 + dealloc_stack %a : $*T.Elt + %7 = tuple () + return %7 : $() +} + +// CHECK-LABEL: sil [ossa] @$s16eager_specialize30getGenericContainer_guaranteed_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF : $@convention(thin) (@in_guaranteed G, @in_guaranteed T.Elt) -> @out T { +// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : $*G, [[ARG2:%.*]] : $*T.Elt): +// CHECK: bb3: +// CHECK: [[ARG0_CAST:%.*]] = unchecked_addr_cast [[ARG0]] +// CHECK: [[ARG1_CAST:%.*]] = unchecked_addr_cast [[ARG1]] +// CHECK: [[ARG1_LOAD_BORROW:%.*]] = load_borrow [[ARG1_CAST]] +// CHECK: [[ARG2_CAST:%.*]] = unchecked_addr_cast [[ARG2]] +// CHECK: [[ARG2_LOAD:%.*]] = load_borrow [[ARG2_CAST]] +// CHECK: [[FUNC:%.*]] = function_ref @$s16eager_specialize30getGenericContainer_guaranteed_1exAA1GVyxG_3EltQztAA13HasguaranteedRzAA12AnguaranteedAHRQlF4main0E5KlassC_Tg5 : $@convention(thin) (@guaranteed G, @guaranteed EltTrivialKlass) -> @owned ContainerKlass +// CHECK: [[RESULT:%.*]] = apply [[FUNC]]([[ARG1_LOAD_BORROW]], [[ARG2_LOAD]]) : $@convention(thin) (@guaranteed G, @guaranteed EltTrivialKlass) -> @owned ContainerKlass +// CHECK: store [[RESULT]] to [init] [[ARG0_CAST]] +// CHECK: } // end sil function '$s16eager_specialize30getGenericContainer_guaranteed_1exAA1GVyxG_3EltQztAA03HasF0RzAA02AnF0AHRQlF' + +// --- test: rethrow + +// Helper +// +// static != infix (A, A) -> Bool +sil public_external [serialized] [ossa] @$ss2neoiySbx_xts9EquatableRzlFZ : $@convention(thin) (@in T, @in T) -> Bool { +bb0(%0 : $*T, %1 : $*T): + %4 = witness_method $T, #Equatable."==" : $@convention(witness_method: Equatable) <τ_0_0 where τ_0_0 : Equatable> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> Bool + %5 = metatype $@thick T.Type + %6 = apply %4(%0, %1, %5) : $@convention(witness_method: Equatable) <τ_0_0 where τ_0_0 : Equatable> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> Bool + %7 = struct_extract %6 : $Bool, #Bool._value + %8 = integer_literal $Builtin.Int1, -1 + %9 = builtin "xor_Int1"(%7 : $Builtin.Int1, %8 : $Builtin.Int1) : $Builtin.Int1 + %10 = struct $Bool (%9 : $Builtin.Int1) + return %10 : $Bool +} + +// divideNum (A, den : A) throws -> A +sil [_specialize where T == Int] [ossa] @$s16eager_specialize9divideNum_3denxx_xtKs13SignedIntegerRzlF : $@convention(thin) (@in T, @in T) -> (@out T, @error Error) { +bb0(%0 : $*T, %1 : $*T, %2 : $*T): + // function_ref static != infix (A, A) -> Bool + %5 = function_ref @$ss2neoiySbx_xts9EquatableRzlFZ : $@convention(thin) <τ_0_0 where τ_0_0 : Equatable> (@in τ_0_0, @in τ_0_0) -> Bool + %6 = alloc_stack $T + copy_addr %2 to [initialization] %6 : $*T + %8 = witness_method $T, #_ExpressibleByBuiltinIntegerLiteral.init!allocator : $@convention(witness_method: _ExpressibleByBuiltinIntegerLiteral) <τ_0_0 where τ_0_0 : _ExpressibleByBuiltinIntegerLiteral> (Builtin.IntLiteral, @thick τ_0_0.Type) -> @out τ_0_0 + %9 = metatype $@thick T.Type + %10 = integer_literal $Builtin.IntLiteral, 0 + %11 = alloc_stack $T + %12 = apply %8(%11, %10, %9) : $@convention(witness_method: _ExpressibleByBuiltinIntegerLiteral) <τ_0_0 where τ_0_0 : _ExpressibleByBuiltinIntegerLiteral> (Builtin.IntLiteral, @thick τ_0_0.Type) -> @out τ_0_0 + %13 = apply %5(%6, %11) : $@convention(thin) <τ_0_0 where τ_0_0 : Equatable> (@in τ_0_0, @in τ_0_0) -> Bool + %14 = struct_extract %13 : $Bool, #Bool._value + dealloc_stack %11 : $*T + dealloc_stack %6 : $*T + cond_br %14, bb2, bb1 + +bb1: + destroy_addr %2 : $*T + destroy_addr %1 : $*T + %24 = alloc_existential_box $Error, $ArithmeticError + %25 = project_existential_box $ArithmeticError in %24 : $Error + %26 = enum $ArithmeticError, #ArithmeticError.DivByZero!enumelt + store %26 to [trivial] %25 : $*ArithmeticError + throw %24 : $Error + +bb2: + %18 = witness_method $T, #BinaryInteger."/" : $@convention(witness_method: BinaryInteger) <τ_0_0 where τ_0_0 : BinaryInteger> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> @out τ_0_0 + %19 = apply %18(%0, %1, %2, %9) : $@convention(witness_method: BinaryInteger) <τ_0_0 where τ_0_0 : BinaryInteger> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> @out τ_0_0 + %20 = tuple () + return %20 : $() +} + +// specialized divideNum (A, den : A) throws -> A +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize9divideNum_3denxx_xtKSZRzlFSi_Tg5 : $@convention(thin) (Int, Int) -> (Int, @error Error) { +// CHECK: bb0(%0 : $Int, %1 : $Int): +// CHECK: return %{{.*}} +// CHECK: throw %{{.*}} + +// Generic with specialized dispatch. No more [specialize] attribute. +// +// CHECK-LABEL: sil [ossa] @$s16eager_specialize9divideNum_3denxx_xtKs13SignedIntegerRzlF : $@convention(thin) (@in T, @in T) -> (@out T, @error Error) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): +// CHECK: %3 = metatype $@thick T.Type +// CHECK: %4 = metatype $@thick Int.Type +// CHECK: %5 = unchecked_bitwise_cast %3 : $@thick T.Type to $Builtin.Word +// CHECK: %6 = unchecked_bitwise_cast %4 : $@thick Int.Type to $Builtin.Word +// CHECK: %7 = builtin "cmp_eq_Word"(%5 : $Builtin.Word, %6 : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br %7, bb6, bb1 + +// CHECK: bb1: +// CHECK: // function_ref static != infix(_:_:) +// CHECK: cond_br %{{.*}}, bb4, bb2 + +// CHECK: bb2: +// CHECK: br bb3(%{{.*}} : $Error) + +// CHECK: bb3(%{{.*}} : @owned $Error): +// CHECK: throw %{{.*}} : $Error + +// CHECK: bb4: +// CHECK: %{{.*}} = witness_method $T, #BinaryInteger."/" : {{.*}} : $@convention(witness_method: BinaryInteger) <τ_0_0 where τ_0_0 : BinaryInteger> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> @out τ_0_0 +// CHECK: apply %{{.*}}({{.*}}) : $@convention(witness_method: BinaryInteger) <τ_0_0 where τ_0_0 : BinaryInteger> (@in τ_0_0, @in τ_0_0, @thick τ_0_0.Type) -> @out τ_0_0 +// CHECK: br bb5 + +// CHECK: bb5: +// CHECK: %{{.*}} = tuple () +// CHECK: return %{{.*}} : $() + +// CHECK: bb6: +// CHECK: %{{.*}} = unchecked_addr_cast %0 : $*T to $*Int +// CHECK: %{{.*}} = unchecked_addr_cast %1 : $*T to $*Int +// CHECK: %{{.*}} = load [trivial] %{{.*}} : $*Int +// CHECK: %{{.*}} = unchecked_addr_cast %2 : $*T to $*Int +// CHECK: %{{.*}} = load [trivial] %{{.*}} : $*Int +// CHECK: // function_ref specialized divideNum(_:den:) +// CHECK: %{{.*}} = function_ref @$s16eager_specialize9divideNum_3denxx_xtKSZRzlFSi_Tg5 : $@convention(thin) (Int, Int) -> (Int, @error Error) +// CHECK: try_apply %{{.*}}(%{{.*}}, %{{.*}}) : $@convention(thin) (Int, Int) -> (Int, @error Error), normal bb8, error bb7 + +// CHECK: bb7(%{{.*}} : @owned $Error): +// CHECK: br bb3(%{{.*}} : $Error) + +// CHECK: bb8(%{{.*}} : $Int): +// CHECK: store %{{.*}} to [trivial] %{{.*}} : $*Int +// CHECK: %{{.*}} = tuple () +// CHECK: %{{.*}} = unchecked_trivial_bit_cast %{{.*}} : $() to $() +// CHECK: br bb5 + +// --- test: multiple void and non-void return values + +// foo (A) -> Int64 +sil hidden [noinline] [Onone] [ossa] @$s16eager_specialize3fooys5Int64VxlF : $@convention(thin) (@in T) -> Int64 { +// %0 // users: %1, %4 +bb0(%0 : $*T): + %2 = integer_literal $Builtin.Int64, 3 + %3 = struct $Int64 (%2 : $Builtin.Int64) + destroy_addr %0 : $*T + return %3 : $Int64 +} + +// voidReturn (A) -> () +sil [_specialize where T == Float] [_specialize where T == Int64] [ossa] @$s16eager_specialize10voidReturnyyxlF : $@convention(thin) (@in T) -> () { +bb0(%0 : $*T): + // function_ref foo (A) -> Int64 + %2 = function_ref @$s16eager_specialize3fooys5Int64VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 + %3 = apply %2(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 + %4 = tuple () + return %4 : $() +} + +// CHECK-LABEL: // specialized voidReturn(_:) +// CHECK: sil shared [ossa] @$s16eager_specialize10voidReturnyyxlFSf_Tg5 : $@convention(thin) (Float) -> () { +// %0 // user: %2 +// CHECK: bb0(%0 : $Float): +// CHECK: return %5 : $() + +// CHECK-LABEL: // specialized voidReturn(_:) +// CHECK: sil shared [ossa] @$s16eager_specialize10voidReturnyyxlFs5Int64V_Tg5 : $@convention(thin) (Int64) -> () { +// CHECK: bb0(%0 : $Int64): +// CHECK: return %5 : $() + +// Generic with specialized dispatch. No more [specialize] attribute. +// +// CHECK-LABEL: // voidReturn(_:) +// CHECK: sil [ossa] @$s16eager_specialize10voidReturnyyxlF : $@convention(thin) (@in T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK: builtin "cmp_eq_Word" +// CHECK: cond_br %5, bb5, bb1 + +// CHECK: bb1: +// CHECK: builtin "cmp_eq_Word" +// CHECK: cond_br %11, bb4, bb2 + +// CHECK: bb2: +// CHECK: function_ref @$s16eager_specialize3fooys5Int64VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 +// CHECK: apply %13(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 +// CHECK: br bb3 + +// CHECK: bb3: +// CHECK: tuple () +// CHECK: return + +// CHECK: bb4: +// CHECK: function_ref @$s16eager_specialize10voidReturnyyxlFSf_Tg5 : $@convention(thin) (Float) -> () +// CHECK: br bb3 + +// CHECK: bb5: +// CHECK: br bb3 + +// nonvoidReturn(A) -> Int64 +sil [_specialize where T == Float] [_specialize where T == Int64] [ossa] @$s16eager_specialize13nonvoidReturnys5Int64VxlF : $@convention(thin) (@in T) -> Int64 { +// %0 // users: %1, %3 +bb0(%0 : $*T): + // function_ref foo(A) -> Int64 + %2 = function_ref @$s16eager_specialize3fooys5Int64VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 + %3 = apply %2(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 + return %3 : $Int64 +} + +// CHECK-LABEL: // specialized nonvoidReturn(_:) +// CHECK: sil shared [ossa] @$s16eager_specialize13nonvoidReturnys5Int64VxlFSf_Tg5 : $@convention(thin) (Float) -> Int64 { +// CHECK: bb0(%0 : $Float): +// CHECK: return %4 : $Int64 +// CHECK: } // end sil function '$s16eager_specialize13nonvoidReturnys5Int64VxlFSf_Tg5' + +// CHECK-LABEL: // specialized nonvoidReturn(_:) +// CHECK: sil shared [ossa] @$s16eager_specialize13nonvoidReturnys5Int64VxlFAD_Tg5 : $@convention(thin) (Int64) -> Int64 { +// CHECK: bb0(%0 : $Int64): +// CHECK: return %4 : $Int64 +// CHECK: } // end sil function '$s16eager_specialize13nonvoidReturnys5Int64VxlFAD_Tg5' + +// CHECK-LABEL: // nonvoidReturn(_:) +// CHECK: sil [ossa] @$s16eager_specialize13nonvoidReturnys5Int64VxlF : $@convention(thin) (@in T) -> Int64 { +// CHECK: bb0(%0 : $*T): +// CHECK: builtin "cmp_eq_Word" +// CHECK: cond_br %{{.*}}, bb5, bb1 + +// CHECK: bb1: +// CHECK: builtin "cmp_eq_Word" +// CHECK: cond_br %{{.*}}, bb4, bb2 + +// CHECK: bb2: +// CHECK: // function_ref foo(_:) +// CHECK: function_ref @$s16eager_specialize3fooys5Int64VxlF : $@convention(thin) <τ_0_0> (@in τ_0_0) -> Int64 +// CHECK: apply %13 +// CHECK: br bb3(%{{.*}} : $Int64) + +// CHECK: bb3(%{{.*}} : $Int64): +// CHECK: return %{{.*}} : $Int64 + +// CHECK: bb4: +// CHECK: br bb3(%{{.*}} : $Int64) + +// CHECK: bb5: +// CHECK: br bb3(%{{.*}} : $Int64) + +//////////////////////////////////////////////////////////////////// +// Check the ability to specialize for _Trivial(64) and _Trivial(32) +//////////////////////////////////////////////////////////////////// + +// copyValueAndReturn (A, s : inout A) -> A +sil [noinline] [_specialize where S : _Trivial(32)] [_specialize where S : _Trivial(64)] [ossa] @$s16eager_specialize18copyValueAndReturn_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S { +bb0(%0 : $*S, %1 : $*S, %2 : $*S): + copy_addr %2 to [initialization] %0 : $*S + destroy_addr %1 : $*S + %7 = tuple () + return %7 : $() +} // end sil function '$s16eager_specialize18copyValueAndReturn_1sxx_xztlF' + + +// Check specialized for 32 bits +// specialized copyValueAndReturn(A, s : inout A) -> A +// CHECK-LABEL: sil shared [noinline] [ossa] @$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze31_lIetilr_Tp5 : $@convention(thin) (@in S, @inout S) -> @out S +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: %5 = tuple () +// CHECK: return %5 : $() +// CHECK: } // end sil function '$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze31_lIetilr_Tp5' + +// Check specialized for 64 bits +// specialized copyValueAndReturn(A, s : inout A) -> A +// CHECK-LABEL: sil shared [noinline] [ossa] @$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze63_lIetilr_Tp5 : $@convention(thin) (@in S, @inout S) -> @out S +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: %5 = tuple () +// CHECK: return %5 : $() +// CHECK: } // end sil function '$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze63_lIetilr_Tp5' + +// Generic with specialized dispatch. No more [specialize] attribute. +// +// CHECK-LABEL: sil [noinline] [ossa] @$s16eager_specialize18copyValueAndReturn_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S +// Check if size == 8 bytes, i.e. 64 444its +// CHECK: [[META_S:%.*]] = metatype $@thick S.Type +// CHECK: [[SIZE_S:%.*]] = builtin "sizeof"([[META_S]] : $@thick S.Type) : $Builtin.Word +// CHECK: [[SIZE_CHECK:%.*]] = integer_literal $Builtin.Word, 8 +// CHECK: [[CMP:%.*]] = builtin "cmp_eq_Word"([[SIZE_S]] : $Builtin.Word, [[SIZE_CHECK]] : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br [[CMP]], bb6, bb1 + +// Check if size == 4 bytes, i.32 2 2 2 bits +// CHECK: bb1: +// CHECK: [[META:%.*]] = metatype $@thick S.Type +// CHECK: [[SIZE_S:%.*]] = builtin "sizeof"([[META]] : $@thick S.Type) : $Builtin.Word +// CHECK: [[EXPECTED_SIZE:%.*]] = integer_literal $Builtin.Word, 4 +// CHECK: [[CMP:%.*]] = builtin "cmp_eq_Word"([[SIZE_S]] : $Builtin.Word, [[EXPECTED_SIZE]] : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br [[CMP]], bb4, bb2 + +// None of the constraint checks was successful, perform a generic copy. +// CHECK: bb2: +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: br bb3 + +// CHECK: bb3: +// CHECK: %16 = tuple () +// CHECK: return %16 : $() + +// Check if it is a trivial type +// CHECK: bb4: +// CHECK: %18 = builtin "ispod"(%8 : $@thick S.Type) : $Builtin.Int1 +// CHECK: cond_br %18, bb5, bb2 + +// Invoke the specialized function for 32 bits +// CHECK: bb5: +// CHECK: %20 = unchecked_addr_cast %0 : $*S to $*S +// CHECK: %21 = unchecked_addr_cast %1 : $*S to $*S +// CHECK: %22 = unchecked_addr_cast %2 : $*S to $*S +// function_ref specialized copyValueAndReturn (A, s : inout A) -> A +// CHECK: %23 = function_ref @$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze31_lIetilr_Tp5 : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial(32)> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: apply %23(%20, %21, %22) : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial(32)> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %25 = tuple () +// CHECK: unchecked_trivial_bit_cast %25 : $() to $() +// CHECK: br bb3 + +// Check if it is a trivial type +// CHECK: bb6: +// CHECK: %28 = builtin "ispod"(%3 : $@thick S.Type) : $Builtin.Int1 +// CHECK: cond_br %28, bb7, bb1 + +// Invoke the specialized function for 64 bits +// CHECK: bb7: +// CHECK: %30 = unchecked_addr_cast %0 : $*S to $*S +// CHECK: %31 = unchecked_addr_cast %1 : $*S to $*S +// CHECK: %32 = unchecked_addr_cast %2 : $*S to $*S +// function_ref specialized copyValueAndReturn (A, s : inout A) -> A +// CHECK: %33 = function_ref @$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze63_lIetilr_Tp5 : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial(64)> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: apply %33(%30, %31, %32) : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial(64)> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %35 = tuple () +// CHECK: unchecked_trivial_bit_cast %35 : $() to $() +// CHECK: br bb3 +// CHECK: } // end sil function '$s16eager_specialize18copyValueAndReturn_1sxx_xztlF' + +//////////////////////////////////////////////////////////////////// +// Check the ability to specialize for _Trivial +//////////////////////////////////////////////////////////////////// + +// copyValueAndReturn2 (A, s : inout A) -> A +sil [noinline] [_specialize where S : _Trivial] [ossa] @$s16eager_specialize19copyValueAndReturn2_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S { +bb0(%0 : $*S, %1 : $*S, %2 : $*S): + copy_addr %2 to [initialization] %0 : $*S + destroy_addr %1 : $*S + %7 = tuple () + return %7 : $() +} // end sil function '$s16eager_specialize19copyValueAndReturn2_1sxx_xztlF' + +// Check the specialization for _Trivial +// specialized copyValueAndReturn2 (A, s : inout A) -> A +// CHECK-LABEL: sil shared [noinline] [ossa] @$s16eager_specialize19copyValueAndReturn2_1sxx_xztlFxxxRlzTlIetilr_Tp5 : $@convention(thin) (@in S, @inout S) -> @out S +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: %5 = tuple () +// CHECK: return %5 : $() +// CHECK: } // end sil function '$s16eager_specialize19copyValueAndReturn2_1sxx_xztlFxxxRlzTlIetilr_Tp5' + +// Generic with specialized dispatch. No more [specialize] attribute. +// copyValueAndReturn2 (A, s : inout A) -> A +// CHECK-LABEL: sil [noinline] [ossa] @$s16eager_specialize19copyValueAndReturn2_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: %3 = metatype $@thick S.Type +// CHECK: %4 = builtin "ispod"(%3 : $@thick S.Type) : $Builtin.Int1 +// CHECK: cond_br %4, bb3, bb1 + +// None of the constraint checks was successful, perform a generic copy. +// CHECK: bb1: +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: br bb2 + +// CHECK: bb2: +// CHECK: %9 = tuple () +// CHECK: return %9 : $() + +// Invoke the specialized function for trivial types +// CHECK: bb3: +// CHECK: %11 = unchecked_addr_cast %0 : $*S to $*S +// CHECK: %12 = unchecked_addr_cast %1 : $*S to $*S +// CHECK: %13 = unchecked_addr_cast %2 : $*S to $*S + // function_ref specialized copyValueAndReturn2 (A, s : inout A) -> A +// CHECK: %14 = function_ref @$s16eager_specialize19copyValueAndReturn2_1sxx_xztlFxxxRlzTlIetilr_Tp5 : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %15 = apply %14(%11, %12, %13) : $@convention(thin) <τ_0_0 where τ_0_0 : _Trivial> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %16 = tuple () +// CHECK: %17 = unchecked_trivial_bit_cast %16 : $() to $() +// CHECK: br bb2 +// CHECK: } // end sil function '$s16eager_specialize19copyValueAndReturn2_1sxx_xztlF' + +//////////////////////////////////////////////////////////////////// +// Check the ability to specialize for _RefCountedObject +//////////////////////////////////////////////////////////////////// + +// copyValueAndReturn3 (A, s : inout A) -> A +sil [noinline] [_specialize where S : _RefCountedObject] [ossa] @$s16eager_specialize19copyValueAndReturn3_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S { +bb0(%0 : $*S, %1 : $*S, %2 : $*S): + copy_addr %2 to [initialization] %0 : $*S + destroy_addr %1 : $*S + %7 = tuple () + return %7 : $() +} // end sil function '$s16eager_specialize19copyValueAndReturn3_1sxx_xztlF' + + +// Check for specialized function for _RefCountedObject +// specialized copyValueAndReturn3 (A, s : inout A) -> A +// CHECK-LABEL: sil shared [noinline] [ossa] @$s16eager_specialize19copyValueAndReturn3_1sxx_xztlFxxxRlzRlIetilr_Tp5 : $@convention(thin) (@in S, @inout S) -> @out S +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: %5 = tuple () +// CHECK: return %5 : $() +// CHECK: } // end sil function '$s16eager_specialize19copyValueAndReturn3_1sxx_xztlFxxxRlzRlIetilr_Tp5' + + +// Generic with specialized dispatch. No more [specialize] attribute. +// copyValueAndReturn3 (A, s : inout A) -> A +// CHECK-LABEL: sil [noinline] [ossa] @$s16eager_specialize19copyValueAndReturn3_1sxx_xztlF : $@convention(thin) (@in S, @inout S) -> @out S { +// Check if can be a class +// CHECK: bb0(%0 : $*S, %1 : $*S, %2 : $*S): +// CHECK: %3 = metatype $@thick S.Type +// CHECK: %4 = builtin "canBeClass"(%3 : $@thick S.Type) : $Builtin.Int8 +// CHECK: %5 = integer_literal $Builtin.Int8, 1 +// CHECK: %6 = builtin "cmp_eq_Int8"(%4 : $Builtin.Int8, %5 : $Builtin.Int8) : $Builtin.Int1 +// True if it is a Swift class +// CHECK: cond_br %6, bb3, bb4 + +// CHECK: bb1: +// CHECK: copy_addr %2 to [initialization] %0 : $*S +// CHECK: destroy_addr %1 : $*S +// CHECK: br bb2 + +// CHECK: bb2: +// CHECK: %11 = tuple () +// CHECK: return %11 : $() + +// Invoke the specialized function for ref-conted objects +// CHECK: bb3: +// CHECK: %13 = unchecked_addr_cast %0 : $*S to $*S +// CHECK: %14 = unchecked_addr_cast %1 : $*S to $*S +// CHECK: %15 = unchecked_addr_cast %2 : $*S to $*S + // function_ref specialized copyValueAndReturn3 (A, s : inout A) -> A +// CHECK: %16 = function_ref @$s16eager_specialize19copyValueAndReturn3_1sxx_xztlFxxxRlzRlIetilr_Tp5 : $@convention(thin) <τ_0_0 where τ_0_0 : _RefCountedObject> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %17 = apply %16(%13, %14, %15) : $@convention(thin) <τ_0_0 where τ_0_0 : _RefCountedObject> (@in τ_0_0, @inout τ_0_0) -> @out τ_0_0 +// CHECK: %18 = tuple () +// CHECK: %19 = unchecked_trivial_bit_cast %18 : $() to $() +// CHECK: br bb2 + +// Check if the object could be of a class or objc existential type +// CHECK: bb4: +// CHECK: %21 = integer_literal $Builtin.Int8, 2 +// CHECK: %22 = builtin "cmp_eq_Int8"(%4 : $Builtin.Int8, %21 : $Builtin.Int8) : $Builtin.Int1 +// CHECK: cond_br %22, bb5, bb1 + +// CHECK: bb5: +// CHECK: %24 = function_ref @_swift_isClassOrObjCExistentialType : $@convention(thin) <τ_0_0> (@thick τ_0_0.Type) -> Bool +// CHECK: %25 = apply %24(%3) : $@convention(thin) <τ_0_0> (@thick τ_0_0.Type) -> Bool +// CHECK: %26 = struct_extract %25 : $Bool, #Bool._value +// CHECK: cond_br %26, bb3, bb1 +// CHECK: } // end sil function '$s16eager_specialize19copyValueAndReturn3_1sxx_xztlF' + +//////////////////////////////////////////////////////////////////// +// Check the ability to produce exported specializations, which can +// be referenced from other object files. +//////////////////////////////////////////////////////////////////// + +// exportSpecializations (A) -> () +sil [_specialize exported: true, where T == Int64] [ossa] @$s16eager_specialize21exportSpecializationsyyxlF : $@convention(thin) (@in T) -> () { +bb0(%0 : $*T): + destroy_addr %0 : $*T + %3 = tuple () + return %3 : $() +} // end sil function '$s16eager_specialize21exportSpecializationsyyxlF' + +//////////////////////////////////////////////////////////////////// +// Check the ability to produce explicit partial specializations. +//////////////////////////////////////////////////////////////////// + +// checkExplicitPartialSpecialization (A, B) -> () +sil [_specialize kind: partial, where T == Int64] [ossa] @$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lF : $@convention(thin) (@in T, @in S) -> () { +bb0(%0 : $*T, %1 : $*S): + destroy_addr %1 : $*S + destroy_addr %0 : $*T + %6 = tuple () + return %6 : $() +} // end sil function '$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lF' + + +// Check for specialized function for τ_0_0 == Int64 +// specialized checkExplicitPartialSpecialization (A, B) -> () +// CHECK-LABEL: sil shared [ossa] @$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lFs5Int64Vq_ADRszr0_lIetyi_Tp5 : $@convention(thin) (Int64, @in S) -> () +// CHECK: bb0(%0 : $Int64, %1 : $*S): +// CHECK: %2 = alloc_stack $Int64 +// CHECK: store %0 to [trivial] %2 : $*Int64 +// CHECK: destroy_addr %1 : $*S +// CHECK: destroy_addr %2 : $*Int64 +// CHECK: %6 = tuple () +// CHECK: dealloc_stack %2 : $*Int64 +// CHECK: return %6 : $() +// CHECK: } // end sil function '$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lFs5Int64Vq_ADRszr0_lIetyi_Tp5' + +// Generic with specialized dispatch. No more [specialize] attribute. +// checkExplicitPartialSpecialization (A, B) -> () +// CHECK-LABEL: sil [ossa] @$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lF : $@convention(thin) (@in T, @in S) -> () +// CHECK: bb0(%0 : $*T, %1 : $*S): +// CHECK: %2 = metatype $@thick T.Type +// CHECK: %3 = metatype $@thick Int64.Type +// CHECK: %4 = unchecked_bitwise_cast %2 : $@thick T.Type to $Builtin.Word +// CHECK: %5 = unchecked_bitwise_cast %3 : $@thick Int64.Type to $Builtin.Word +// CHECK: %6 = builtin "cmp_eq_Word"(%4 : $Builtin.Word, %5 : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br %6, bb3, bb1 + +// Type dispatch was not successful. +// CHECK: bb1: +// CHECK: destroy_addr %1 : $*S +// CHECK: destroy_addr %0 : $*T +// CHECK: br bb2 + +// CHECK: bb2: +// CHECK: %11 = tuple () +// CHECK: return %11 : $() + +// Invoke a partially specialized function. +// CHECK: bb3: +// CHECK: %13 = unchecked_addr_cast %0 : $*T to $*Int64 +// CHECK: %14 = load [trivial] %13 : $*Int64 +// CHECK: %15 = unchecked_addr_cast %1 : $*S to $*S +// function_ref specialized checkExplicitPartialSpecialization (A, B) -> () +// CHECK: %16 = function_ref @$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lFs5Int64Vq_ADRszr0_lIetyi_Tp5 : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 == Int64> (Int64, @in τ_0_1) -> () +// CHECK: %17 = apply %16(%14, %15) : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 == Int64> (Int64, @in τ_0_1) -> () +// CHECK: %18 = tuple () +// CHECK: %19 = unchecked_trivial_bit_cast %18 : $() to $() +// CHECK: br bb2 +// CHECK: } // end sil function '$s16eager_specialize34checkExplicitPartialSpecializationyyx_q_tr0_lF' + +///////////////////////////////////////////////////////////////////////// +// Check that functions with unreachable instructions can be specialized. +///////////////////////////////////////////////////////////////////////// + +protocol P { +} + +struct T : P { + init() +} + +extension P { + public static func f(_ x: Self) -> Self +} + +sil @error : $@convention(thin) () -> Never + +// CHECK-LABEL: sil [ossa] @$s16eager_specialize1PPAAE1fyxxFZ : $@convention(method) (@in Self, @thick Self.Type) -> @out Self +// CHECK: %3 = metatype $@thick Self.Type +// CHECK: %4 = metatype $@thick T.Type +// CHECK: %5 = unchecked_bitwise_cast %3 : $@thick Self.Type to $Builtin.Word +// CHECK: %6 = unchecked_bitwise_cast %4 : $@thick T.Type to $Builtin.Word +// CHECK: %7 = builtin "cmp_eq_Word"(%5 : $Builtin.Word, %6 : $Builtin.Word) : $Builtin.Int1 +// CHECK: cond_br %7, bb2, bb1 + +// CHECK: bb1: +// CHECK: %9 = function_ref @error : $@convention(thin) () -> Never +// CHECK: %10 = apply %9() : $@convention(thin) () -> Never +// CHECK: unreachable + +// CHECK: bb2: +// CHECK: %12 = unchecked_addr_cast %0 : $*Self to $*T +// CHECK: %13 = unchecked_addr_cast %1 : $*Self to $*T +// CHECK: %14 = load [trivial] %13 : $*T +// CHECK: %15 = unchecked_trivial_bit_cast %2 : $@thick Self.Type to $@thick T.Type +// CHECK: %16 = function_ref @$s16eager_specialize1PPAAE1fyxxFZ4main1TV_Tg5 : $@convention(method) (T, @thick T.Type) -> T +// CHECK: %17 = apply %16(%14, %15) : $@convention(method) (T, @thick T.Type) -> T +// CHECK: store %17 to [trivial] %12 : $*T +// CHECK: %19 = tuple () +// CHECK: unreachable +// CHECK: } // end sil function '$s16eager_specialize1PPAAE1fyxxFZ' + +sil [_specialize exported: false, kind: full, where Self == T] [ossa] @$s16eager_specialize1PPAAE1fyxxFZ : $@convention(method) (@in Self, @thick Self.Type) -> @out Self { +bb0(%0 : $*Self, %1 : $*Self, %2 : $@thick Self.Type): + // function_ref error + %5 = function_ref @error : $@convention(thin) () -> Never + %6 = apply %5() : $@convention(thin) () -> Never + unreachable +} // end sil function '$s16eager_specialize1PPAAE1fyxxFZ' + + +//////////////////////////////////////////////////////////////////// +// Check that IRGen generates efficient code for fixed-size Trivial +// constraints. +//////////////////////////////////////////////////////////////////// + +// Check that a specialization for _Trivial(32) uses direct loads and stores +// instead of value witness functions to load and store the value of a generic type. +// CHECK-IRGEN-LABEL: define linkonce_odr hidden swiftcc void @"$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze31_lIetilr_Tp5"(i32* noalias nocapture sret %0, i32* noalias nocapture dereferenceable(4) %1, i32* nocapture dereferenceable(4) %2, %swift.type* %S +// CHECK-IRGEN: entry: +// CHECK-IRGEN: %3 = load i32, i32* %2 +// CHECK-IRGEN-NEXT: store i32 %3, i32* %0 +// CHECK-IRGEN-NEXT: ret void +// CHECK-IRGEN-NEXT:} + +// Check that a specialization for _Trivial(64) uses direct loads and stores +// instead of value witness functions to load and store the value of a generic type. +// CHECK-IRGEN-LABEL: define linkonce_odr hidden swiftcc void @"$s16eager_specialize18copyValueAndReturn_1sxx_xztlFxxxRlze63_lIetilr_Tp5"(i64* noalias nocapture sret %0, i64* noalias nocapture dereferenceable(8) %1, i64* nocapture dereferenceable(8) %2, %swift.type* %S +// CHECK-IRGEN: entry: +// CHECK-IRGEN: %3 = load i64, i64* %2 +// CHECK-IRGEN-NEXT: store i64 %3, i64* %0 +// CHECK-IRGEN-NEXT: ret void +// CHECK-IRGEN-NEXT: } + +// Check that a specialization for _Trivial does not call the 'destroy' value witness, +// because it is known that the object is Trivial, i.e. contains no references. +// CHECK-IRGEN-LABEL: define linkonce_odr hidden swiftcc void @"$s16eager_specialize19copyValueAndReturn2_1sxx_xztlFxxxRlzTlIetilr_Tp5"(%swift.opaque* noalias nocapture sret %0, %swift.opaque* noalias nocapture %1, %swift.opaque* nocapture %2, %swift.type* %S +// CHECK-IRGEN-NEXT: entry: +// CHECK-IRGEN: %3 = bitcast %swift.type* %S to i8*** +// CHECK-IRGEN-NEXT: %4 = getelementptr inbounds i8**, i8*** %3, i{{.*}} -1 +// CHECK-IRGEN-NEXT: %S.valueWitnesses = load i8**, i8*** %4 +// CHECK-IRGEN-NEXT: %5 = getelementptr inbounds i8*, i8** %S.valueWitnesses +// CHECK-IRGEN-NEXT: %6 = load i8*, i8** %5 +// CHECK-IRGEN-NEXT: %initializeWithCopy = {{.*}} +// CHECK-IRGEN-arm64e-NEXT: ptrtoint i8** %5 to i64 +// CHECK-IRGEN-arm64e-NEXT: call i64 @llvm.ptrauth.blend.i64 +// CHECK-IRGEN-NEXT: call {{.*}} %initializeWithCopy +// CHECK-IRGEN-NEXT: ret void +// CHECK-IRGEN-NEXT: } + +// Check that a specialization for _RefCountedObject just copies the fixed-size reference, +// and call retain/release directly, instead of calling the value witness functions. +// The matching patterns in this test are rather non-precise to cover both objc and non-objc platforms. +// CHECK-IRGEN-LABEL: define{{.*}}@"$s16eager_specialize19copyValueAndReturn3_1sxx_xztlFxxxRlzRlIetilr_Tp5" +// CHECK-IRGEN: entry: +// CHECK-IRGEN-NOT: ret void +// CHECK-IRGEN: call {{.*}}etain +// CHECK-IRGEN-NOT: ret void +// CHECK-IRGEN: call {{.*}}elease +// CHECK-IRGEN: ret void + +//////////////////////////////////////////////////////////////////// +// Check that try_apply instructions are handled correctly by the +// eager specializer. +//////////////////////////////////////////////////////////////////// + +protocol ThrowingP { + func action() throws -> Int64 +} + +extension Int64 : ThrowingP { + public func action() throws -> Int64 +} + +extension Klass : ThrowingP { + public func action() throws -> Int64 +} + +class ClassUsingThrowingP { + required init() + + @_specialize(exported: false, kind: full, where T == Klass) + @_specialize(exported: false, kind: full, where T == Int64) + public static func f(_: T) throws -> Self where T : ThrowingP + + @_specialize(exported: false, kind: full, where T == Klass) + @_specialize(exported: false, kind: full, where T == Int64) + public static func g(_ t: T) throws -> Int64 where T : ThrowingP + + @_specialize(exported: false, kind: full, where T == Klass) + @_specialize(exported: false, kind: full, where T == Int64) + public static func h(_: T) throws -> Self where T : ThrowingP + + deinit +} + +// Int64.action() +sil @$ss5Int64V34eager_specialize_throwing_functionE6actionAByKF : $@convention(method) (Int64) -> (Int64, @error Error) + +// protocol witness for ThrowingP.action() in conformance Int64 +sil @$ss5Int64V34eager_specialize_throwing_function9ThrowingPA2cDP6actionAByKFTW : $@convention(witness_method: ThrowingP) (@in_guaranteed Int64) -> (Int64, @error Error) + +sil @$s34eager_specialize_throwing_function19ClassUsingThrowingPCACycfc : $@convention(method) (@owned ClassUsingThrowingP) -> @owned ClassUsingThrowingP + +sil @$s34eager_specialize_throwing_function19ClassUsingThrowingPCfd : $@convention(method) (@guaranteed ClassUsingThrowingP) -> @owned Builtin.NativeObject + +// ClassUsingThrowingP.__allocating_init() +sil @$s34eager_specialize_throwing_function19ClassUsingThrowingPCACycfC : $@convention(method) (@thick ClassUsingThrowingP.Type) -> @owned ClassUsingThrowingP + +// ClassUsingThrowingP.__deallocating_deinit +sil @$s34eager_specialize_throwing_function19ClassUsingThrowingPCfD : $@convention(method) (@owned ClassUsingThrowingP) -> () + +// f is a function that may throw according to its type, but does not actually do it. +// Check that this function is properly specialized by the eager specializer. +// It should dispatch to its specialized version, but use apply [nothrow] to invoke +// the specialized version. + +// CHECK-LABEL: sil [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1fyACXDxKAA0G1PRzlFZ : $@convention(method) (@in T, @thick ClassUsingThrowingP.Type) -> (@owned ClassUsingThrowingP, @error Error) +// CHECK: [[SPECIALIZED:%.*]] = function_ref @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1fyACXDxKAA0G1PRzlFZs5Int64V_Tg5 : $@convention(method) (Int64, @thick ClassUsingThrowingP.Type) -> (@owned ClassUsingThrowingP, @error Error) +// CHECK: apply [nothrow] [[SPECIALIZED]] +// CHECK: // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1fyACXDxKAA0G1PRzlFZ' +// static ClassUsingThrowingP.f(_:) +sil [_specialize exported: false, kind: full, where T == Int64] [_specialize exported: false, kind: full, where T == Klass] [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1fyACXDxKAA0G1PRzlFZ : $@convention(method) (@in T, @thick ClassUsingThrowingP.Type) -> (@owned ClassUsingThrowingP, @error Error) { +bb0(%0 : $*T, %1 : $@thick ClassUsingThrowingP.Type): + destroy_addr %0 : $*T + %4 = unchecked_trivial_bit_cast %1 : $@thick ClassUsingThrowingP.Type to $@thick @dynamic_self ClassUsingThrowingP.Type + // function_ref ClassUsingThrowingP.__allocating_init() + %7 = function_ref @$s34eager_specialize_throwing_function19ClassUsingThrowingPCACycfC : $@convention(method) (@thick ClassUsingThrowingP.Type) -> @owned ClassUsingThrowingP + %8 = upcast %4 : $@thick @dynamic_self ClassUsingThrowingP.Type to $@thick ClassUsingThrowingP.Type + %9 = apply %7(%8) : $@convention(method) (@thick ClassUsingThrowingP.Type) -> @owned ClassUsingThrowingP + %10 = unchecked_ref_cast %9 : $ClassUsingThrowingP to $ClassUsingThrowingP + return %10 : $ClassUsingThrowingP +} // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1fyACXDxKAA0G1PRzlFZ' + +// g is a function that may throw according to its type and has a try_apply inisde +// its body. +// Check that this function is properly specialized by the eager specializer. +// It should dispatch to its specialized version and use try_apply to invoke +// the specialized version. + +// CHECK-LABEL: sil [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZ : $@convention(method) (@in T, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) +// CHECK: [[SPECIALIZED:%.*]] = function_ref @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZAF_Tg5 : $@convention(method) (Int64, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) +// CHECK: try_apply [[SPECIALIZED]] +// CHECK: // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZ' + +// static ClassUsingThrowingP.g(_:) +sil [_specialize exported: false, kind: full, where T == Int64] [_specialize exported: false, kind: full, where T == Klass] [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZ : $@convention(method) (@in T, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) { +bb0(%0 : $*T, %1 : $@thick ClassUsingThrowingP.Type): + %5 = witness_method $T, #ThrowingP.action : (Self) -> () throws -> Int64 : $@convention(witness_method: ThrowingP) <τ_0_0 where τ_0_0 : ThrowingP> (@in_guaranteed τ_0_0) -> (Int64, @error Error) + try_apply %5(%0) : $@convention(witness_method: ThrowingP) <τ_0_0 where τ_0_0 : ThrowingP> (@in_guaranteed τ_0_0) -> (Int64, @error Error), normal bb1, error bb2 + +bb1(%7 : $Int64): // Preds: bb0 + destroy_addr %0 : $*T + return %7 : $Int64 + +bb2(%10 : @owned $Error): // Preds: bb0 + destroy_addr %0 : $*T + throw %10 : $Error +} // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZ' + +// Make sure we specialize this appropriately. This will cause us to need to use +// load_borrows when reabstracting. Make sure that we put the end_borrow in the +// same place. +// +// CHECK-LABEL: sil [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1hys5Int64VxKAA0G1PRzlFZ : $@convention(method) (@in_guaranteed T, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) { +// CHECK: [[SPECIALIZED_1:%.*]] = function_ref @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1hys5Int64VxKAA0G1PRzlFZ4main5KlassC_Tg5 : $@convention(method) (@guaranteed Klass, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) +// CHECK: try_apply [[SPECIALIZED_1]] +// CHECK: } // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1hys5Int64VxKAA0G1PRzlFZ' + +sil [_specialize exported: false, kind: full, where T == Int64] [_specialize exported: false, kind: full, where T == Klass] [ossa] @$s34eager_specialize_throwing_function19ClassUsingThrowingPC1hys5Int64VxKAA0G1PRzlFZ : $@convention(method) (@in_guaranteed T, @thick ClassUsingThrowingP.Type) -> (Int64, @error Error) { +bb0(%0 : $*T, %1 : $@thick ClassUsingThrowingP.Type): + %5 = witness_method $T, #ThrowingP.action : (Self) -> () throws -> Int64 : $@convention(witness_method: ThrowingP) <τ_0_0 where τ_0_0 : ThrowingP> (@in_guaranteed τ_0_0) -> (Int64, @error Error) + try_apply %5(%0) : $@convention(witness_method: ThrowingP) <τ_0_0 where τ_0_0 : ThrowingP> (@in_guaranteed τ_0_0) -> (Int64, @error Error), normal bb1, error bb2 + +bb1(%7 : $Int64): // Preds: bb0 + return %7 : $Int64 + +bb2(%10 : @owned $Error): // Preds: bb0 + throw %10 : $Error +} // end sil function '$s34eager_specialize_throwing_function19ClassUsingThrowingPC1gys5Int64VxKAA0G1PRzlFZ' + +// Check that a specialization was produced and it is not inlined. +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE-LABEL: sil{{.*}}@{{.*}}testSimpleGeneric{{.*}}where T : _Trivial(64, 64) +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE-LABEL: sil{{.*}}@testSimpleGeneric : +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: [[METATYPE:%.*]] = metatype $@thick T.Type +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: [[SIZEOF:%.*]] = builtin "sizeof"([[METATYPE]] : $@thick T.Type) +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: [[SIZE:%.*]] = integer_literal $Builtin.Word, 8 +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: builtin "cmp_eq_Word"([[SIZEOF]] : $Builtin.Word, [[SIZE]] : $Builtin.Word) +// Invoke the specialization, but do not inline it! +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: function_ref @{{.*}}testSimpleGeneric{{.*}} +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: apply +// CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE: // end sil function 'testSimpleGeneric' + +sil [_specialize exported: false, kind: full, where T: _Trivial(64, 64)] [ossa] @testSimpleGeneric : $@convention(thin) (@in T) -> Builtin.Int64 { +bb0(%0 : $*T): + %1 = metatype $@thick T.Type + %2 = builtin "sizeof"(%1 : $@thick T.Type) : $Builtin.Word + %8 = builtin "zextOrBitCast_Word_Int64"(%2 : $Builtin.Word) : $Builtin.Int64 + destroy_addr %0 : $*T + return %8 : $Builtin.Int64 +} + +sil_vtable ClassUsingThrowingP { + #ClassUsingThrowingP.init!allocator: (ClassUsingThrowingP.Type) -> () -> ClassUsingThrowingP : @$s34eager_specialize_throwing_function19ClassUsingThrowingPCACycfC // ClassUsingThrowingP.__allocating_init() + #ClassUsingThrowingP.init!initializer: (ClassUsingThrowingP.Type) -> () -> ClassUsingThrowingP : @$s34eager_specialize_throwing_function19ClassUsingThrowingPCACycfc // ClassUsingThrowingP.init() + #ClassUsingThrowingP.deinit!deallocator: @$s34eager_specialize_throwing_function19ClassUsingThrowingPCfD // ClassUsingThrowingP.__deallocating_deinit +} + +sil_vtable Klass { +} + +sil_vtable EltTrivialKlass { +} + +sil_vtable ContainerKlass { +} + +sil_witness_table hidden Int64: ThrowingP module eager_specialize_throwing_function { + method #ThrowingP.action: (Self) -> () throws -> Int64 : @$ss5Int64V34eager_specialize_throwing_function9ThrowingPA2cDP6actionAByKFTW // protocol witness for ThrowingP.action() in conformance Int64 +} + +sil_default_witness_table hidden ThrowingP { + no_default +} diff --git a/test/SILOptimizer/ownership_model_eliminator.sil b/test/SILOptimizer/ownership_model_eliminator.sil index 2de217a619903..5a119d1d5278e 100644 --- a/test/SILOptimizer/ownership_model_eliminator.sil +++ b/test/SILOptimizer/ownership_model_eliminator.sil @@ -342,3 +342,14 @@ bb0(%0a : $Builtin.Int32, %0b : $Builtin.Int32): %9999 = tuple() return %9999 : $() } + +// Just make sure that we do not crash on this function. +// +// CHECK-LABEL: sil @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 { +// CHECK: unchecked_bitwise_cast +// CHECK: } // end sil function 'lower_unchecked_value_cast_to_unchecked_bitwise_cast' +sil [ossa] @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 { +bb0(%0a : $Builtin.Int32): + %0b = unchecked_value_cast %0a : $Builtin.Int32 to $Builtin.Int32 + return %0b : $Builtin.Int32 +} diff --git a/utils/sil-mode.el b/utils/sil-mode.el index b55cf6ccee8b0..fe29c99d9b58b 100644 --- a/utils/sil-mode.el +++ b/utils/sil-mode.el @@ -150,6 +150,7 @@ "unchecked_ref_cast" "unchecked_trivial_bit_cast" "unchecked_bitwise_cast" + "unchecked_value_cast" "ref_to_raw_pointer" "raw_pointer_to_ref" "unowned_to_ref" "ref_to_unowned" "convert_function" "convert_escape_to_noescape"