diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 5e485ccb85a13..83c54144366b5 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -1103,6 +1103,11 @@ class VarDecl : public DeclaratorDecl, public Redeclarable { LLVM_PREFERRED_TYPE(bool) unsigned IsCXXCondDecl : 1; + + /// Whether this is a __block variable that is captured by a block + /// referenced in its own initializer. + LLVM_PREFERRED_TYPE(bool) + unsigned CapturedByOwnInit : 1; }; union { @@ -1588,10 +1593,23 @@ class VarDecl : public DeclaratorDecl, public Redeclarable { /// escaping block. bool isNonEscapingByref() const; - void setEscapingByref() { - NonParmVarDeclBits.EscapingByref = true; + void setEscapingByref(); + + /// Indicates this is a __block variable that is captured by a block + /// referenced in its own initializer. + bool isCapturedByOwnInit() const { + return !isa(this) && NonParmVarDeclBits.CapturedByOwnInit; } + void setCapturedByOwnInit() { + assert(!isa(this)); + NonParmVarDeclBits.CapturedByOwnInit = true; + } + + /// Check if this is a __block variable which needs to be initialized + /// directly on the heap. + bool needsInitOnHeap() const; + bool isCXXCondDecl() const { return isa(this) ? false : NonParmVarDeclBits.IsCXXCondDecl; } diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 5a32463763aa6..c1b214fa28f07 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -2358,6 +2358,12 @@ def note_var_fixit_add_initialization : Note< def note_uninit_fixit_remove_cond : Note< "remove the %select{'%1' if its condition|condition if it}0 " "is always %select{false|true}2">; +def warn_implicit_block_var_alloc : Warning< + "variable %q0 will be initialized in a heap allocation">, + InGroup>, DefaultIgnore; +def note_because_captured_by_block : Note< + "because it is captured by a block used in its own initializer">; + def err_init_incomplete_type : Error<"initialization of incomplete type %0">; def err_list_init_in_parens : Error< "cannot initialize %select{non-class|reference}0 type %1 with a " diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index 41fbfe281ef65..3a584849160a8 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -2680,6 +2680,19 @@ bool VarDecl::isNonEscapingByref() const { return hasAttr() && !NonParmVarDeclBits.EscapingByref; } +void VarDecl::setEscapingByref() { + assert(!isa(this) && hasAttr()); + NonParmVarDeclBits.EscapingByref = true; +} + +bool VarDecl::needsInitOnHeap() const { + // We need direct initialization on the heap for self-capturing __block + // variables of types that cause CodeGenFunction::getEvaluationKind to return + // TEK_Aggregate. The only such types that can be captured are records. + return isCapturedByOwnInit() && + getType().getAtomicUnqualifiedType()->isRecordType(); +} + bool VarDecl::hasDependentAlignment() const { QualType T = getType(); return T->isDependentType() || T->isUndeducedType() || diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index bf50f2025de57..eb9e7d0f16107 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -2336,7 +2336,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo, // Create a scope with an artificial location for the body of this function. auto AL = ApplyDebugLocation::CreateArtificial(CGF); - if (generator.needsCopy()) { + if (generator.needsCopy() && !byrefInfo.ForInitOnHeap) { // dst->x Address destField = CGF.GetAddrOfLocalVar(&Dst); destField = Address(CGF.Builder.CreateLoad(destField), byrefInfo.Type, @@ -2405,9 +2405,32 @@ generateByrefDisposeHelper(CodeGenFunction &CGF, Address addr = CGF.GetAddrOfLocalVar(&Src); addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.Type, byrefInfo.ByrefAlignment); - addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object"); + llvm::BasicBlock *skipBB = nullptr; + if (byrefInfo.ForInitOnHeap) { + // For objects initialized on the heap, a separate field tracks whether + // the object has completed initialization. If it's still false here, + // then initialization threw an exception, so we don't want to run the + // destructor. + skipBB = CGF.createBasicBlock("skip"); + llvm::BasicBlock *runBB = CGF.createBasicBlock("run"); + + Address initializedAddr = CGF.Builder.CreateStructGEP( + addr, byrefInfo.IndexOfInitializedFlag, "byref.initialized"); + // This is safe to load non-atomically because the store of true (if any) + // happens while the creating function still holds a reference. + llvm::Value *initialized = + CGF.Builder.CreateFlagLoad(initializedAddr.emitRawPointer(CGF)); + + CGF.Builder.CreateCondBr(initialized, runBB, skipBB); + CGF.EmitBlock(runBB); + } + + addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object"); generator.emitDispose(CGF, addr); + + if (skipBB) + CGF.EmitBlock(skipBB); } CGF.FinishFunction(); @@ -2503,15 +2526,14 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, // ARC __strong __block variables need to be retained. case Qualifiers::OCL_Strong: - // Block pointers need to be copied, and there's no direct - // transfer possible. if (type->isBlockPointerType()) { + // Block pointers need to be copied, and there's no direct + // transfer possible. return ::buildByrefHelpers(CGM, byrefInfo, ARCStrongBlockByrefHelpers(valueAlignment)); - - // Otherwise, we transfer ownership of the retain from the stack - // to the heap. } else { + // Otherwise, we transfer ownership of the retain from the stack + // to the heap. return ::buildByrefHelpers(CGM, byrefInfo, ARCStrongByrefHelpers(valueAlignment)); } @@ -2623,6 +2645,14 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) { size += CharUnits::fromQuantity(PointerSizeInBytes); } + unsigned indexOfInitializedFlag = UINT_MAX; + if (D->needsInitOnHeap()) { + /// uint8_t initialized; + types.push_back(Int8Ty); + size += CharUnits::fromQuantity(1); + indexOfInitializedFlag = types.size() - 1; + } + // T x; llvm::Type *varTy = ConvertTypeForMem(Ty); @@ -2652,12 +2682,56 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) { info.FieldIndex = types.size() - 1; info.FieldOffset = varOffset; info.ByrefAlignment = std::max(varAlign, getPointerAlign()); + info.ForInitOnHeap = D->needsInitOnHeap(); + info.IndexOfInitializedFlag = indexOfInitializedFlag; auto pair = BlockByrefInfos.insert({D, info}); assert(pair.second && "info was inserted recursively?"); return pair.first->second; } +/// Allocate a byref on the heap (but don't initialize it yet). +void CodeGenFunction::emitByrefHeapAlloc(llvm::Value *stackByref, + QualType declType) { + // The object itself is initialized directly on the heap. But for ABI + // backwards compatibility reasons, we have to set up a fake byref struct on + // the stack (with the structural components initialized but not the object + // itself), then call _Block_object_assign to move it to the heap (which is + // safe because we forced a no-op copy helper), then call + // _Block_object_dispose to release the extra ref from _Block_object_assign. + BlockFieldFlags flags = BLOCK_FIELD_IS_BYREF; + RawAddress heapByref = + CreateDefaultAlignTempAlloca(stackByref->getType(), "initOnHeap.ptr"); + + llvm::Value *args[] = {heapByref.getPointer(), stackByref, + llvm::ConstantInt::get(Int32Ty, flags.getBitMask())}; + EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args); + + BuildBlockRelease(stackByref, flags, false); + + // We need to add a cleanup as soon as the allocation happens, to ensure the + // allocation is freed if initialization throws. + if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) { + enterByrefCleanup(NormalAndEHCleanup, heapByref, BLOCK_FIELD_IS_BYREF, + /*LoadBlockVarAddr*/ true, + cxxDestructorCanThrow(declType)); + } +} + +/// Set the initialized flag of a heap-initialized byref. +void CodeGenFunction::emitByrefMarkInitialized( + const AutoVarEmission &emission) { + auto &info = getBlockByrefInfo(emission.Variable); + Address forwardingAddr = + Builder.CreateStructGEP(emission.Addr, 1, "forwarding"); + Address heapByref(Builder.CreateLoad(forwardingAddr), info.Type, + info.ByrefAlignment); + + Address initializedAddr = Builder.CreateStructGEP( + heapByref, info.IndexOfInitializedFlag, "byref.initialized"); + Builder.CreateFlagStore(true, initializedAddr.emitRawPointer(*this)); +} + /// Initialize the structural components of a __block variable, i.e. /// everything but the actual object. void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { @@ -2699,8 +2773,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { storeHeaderField(V, getPointerSize(), "byref.isa"); // Store the address of the variable into its own forwarding pointer. - storeHeaderField(addr.emitRawPointer(*this), getPointerSize(), - "byref.forwarding"); + llvm::Value *pointer = addr.emitRawPointer(*this); + storeHeaderField(pointer, getPointerSize(), "byref.forwarding"); // Blocks ABI: // c) the flags field is set to either 0 if no helper functions are @@ -2764,6 +2838,12 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { auto layoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type); storeHeaderField(layoutInfo, getPointerSize(), "byref.layout"); } + + if (emission.NeedsInitOnHeap) { + V = llvm::ConstantInt::get(Int8Ty, 0); + storeHeaderField(V, CharUnits::fromQuantity(1), "byref.initialized"); + emitByrefHeapAlloc(pointer, type); + } } void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags, diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h index 8d10c4f69b202..9b0d2087da31c 100644 --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -136,9 +136,14 @@ inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) { class BlockByrefInfo { public: llvm::StructType *Type; + /// Index of the field for the variable itself. unsigned FieldIndex; CharUnits ByrefAlignment; CharUnits FieldOffset; + /// Whether this will be initialized directly on the heap. + bool ForInitOnHeap; + /// If ForInitOnHeap is true, index of the 'initialized' field. + unsigned IndexOfInitializedFlag; }; /// Represents a type of copy/destroy operation that should be performed for an diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 4a213990d1e36..f833de95fe3f2 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -1450,6 +1450,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { bool isEscapingByRef = D.isEscapingByref(); emission.IsEscapingByRef = isEscapingByRef; + emission.IsCapturedByOwnInit = D.isCapturedByOwnInit(); + emission.NeedsInitOnHeap = D.needsInitOnHeap(); CharUnits alignment = getContext().getDeclAlign(&D); @@ -1692,68 +1694,6 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { return emission; } -static bool isCapturedBy(const VarDecl &, const Expr *); - -/// Determines whether the given __block variable is potentially -/// captured by the given statement. -static bool isCapturedBy(const VarDecl &Var, const Stmt *S) { - if (const Expr *E = dyn_cast(S)) - return isCapturedBy(Var, E); - for (const Stmt *SubStmt : S->children()) - if (isCapturedBy(Var, SubStmt)) - return true; - return false; -} - -/// Determines whether the given __block variable is potentially -/// captured by the given expression. -static bool isCapturedBy(const VarDecl &Var, const Expr *E) { - // Skip the most common kinds of expressions that make - // hierarchy-walking expensive. - E = E->IgnoreParenCasts(); - - if (const BlockExpr *BE = dyn_cast(E)) { - const BlockDecl *Block = BE->getBlockDecl(); - for (const auto &I : Block->captures()) { - if (I.getVariable() == &Var) - return true; - } - - // No need to walk into the subexpressions. - return false; - } - - if (const StmtExpr *SE = dyn_cast(E)) { - const CompoundStmt *CS = SE->getSubStmt(); - for (const auto *BI : CS->body()) - if (const auto *BIE = dyn_cast(BI)) { - if (isCapturedBy(Var, BIE)) - return true; - } - else if (const auto *DS = dyn_cast(BI)) { - // special case declarations - for (const auto *I : DS->decls()) { - if (const auto *VD = dyn_cast((I))) { - const Expr *Init = VD->getInit(); - if (Init && isCapturedBy(Var, Init)) - return true; - } - } - } - else - // FIXME. Make safe assumption assuming arbitrary statements cause capturing. - // Later, provide code to poke into statements for capture analysis. - return true; - return false; - } - - for (const Stmt *SubStmt : E->children()) - if (isCapturedBy(Var, SubStmt)) - return true; - - return false; -} - /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool CodeGenFunction::isTrivialInitializer(const Expr *Init) { @@ -1918,8 +1858,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { // Check whether this is a byref variable that's potentially // captured and moved by its own initializer. If so, we'll need to // emit the initializer first, then copy into the variable. - bool capturedByInit = - Init && emission.IsEscapingByRef && isCapturedBy(D, Init); + bool capturedByInit = emission.IsCapturedByOwnInit; bool locIsByrefHeader = !capturedByInit; const Address Loc = @@ -1951,8 +1890,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { llvm::Constant *constant = nullptr; if (emission.IsConstantAggregate || D.mightBeUsableInConstantExpressions(getContext())) { - assert(!capturedByInit && "constant init contains a capturing block?"); constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); + assert(!(constant && capturedByInit) && + "constant init contains a capturing block?"); if (constant && !constant->isZeroValue() && (trivialAutoVarInit != LangOptions::TrivialAutoVarInitKind::Uninitialized)) { @@ -1975,6 +1915,10 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = MakeAddrLValue(Loc, type); lv.setNonGC(true); + if (emission.NeedsInitOnHeap) { + drillIntoBlockVariable(*this, lv, &D); + capturedByInit = false; // We don't need to drill again. + } return EmitExprAsInit(Init, &D, lv, capturedByInit); } @@ -2023,6 +1967,9 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D, return; } case TEK_Aggregate: + assert( + !capturedByInit && + "Capture-by-initializer should have been handled by NeedsInitOnHeap!"); if (type->isAtomicType()) { EmitAtomicInit(const_cast(init), lvalue); } else { @@ -2031,7 +1978,6 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D, Overlap = AggValueSlot::DoesNotOverlap; else if (auto *FD = dyn_cast(D)) Overlap = getOverlapForFieldInit(FD); - // TODO: how can we delay here if D is captured by its initializer? EmitAggExpr(init, AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, @@ -2122,9 +2068,30 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { const VarDecl &D = *emission.Variable; - // Check the type for a cleanup. - if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) - emitAutoVarTypeCleanup(emission, dtorKind); + if (emission.NeedsInitOnHeap) { + // If this is a __block variable that we initialized directly on the heap, + // then the stack object is uninitialized and doesn't need destruction. + // But we do need to tell the destroy helper that the real byref is + // successfully initialized. + emitByrefMarkInitialized(emission); + } else { + // Check the type for a cleanup. + if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) + emitAutoVarTypeCleanup(emission, dtorKind); + + // If this is a block variable, call _Block_object_destroy (on the + // unforwarded address). Don't enter this cleanup if we're in pure-GC + // mode. + if (emission.IsEscapingByRef && + CGM.getLangOpts().getGC() != LangOptions::GCOnly) { + BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF; + if (D.getType().isObjCGCWeak()) + Flags |= BLOCK_FIELD_IS_WEAK; + enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags, + /*LoadBlockVarAddr*/ false, + cxxDestructorCanThrow(D.getType())); + } + } // In GC mode, honor objc_precise_lifetime. if (getLangOpts().getGC() != LangOptions::NonGC && @@ -2142,19 +2109,6 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD); EHStack.pushCleanup(NormalAndEHCleanup, F, &Info, &D); } - - // If this is a block variable, call _Block_object_destroy - // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC - // mode. - if (emission.IsEscapingByRef && - CGM.getLangOpts().getGC() != LangOptions::GCOnly) { - BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF; - if (emission.Variable->getType().isObjCGCWeak()) - Flags |= BLOCK_FIELD_IS_WEAK; - enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags, - /*LoadBlockVarAddr*/ false, - cxxDestructorCanThrow(emission.Variable->getType())); - } } CodeGenFunction::Destroyer * diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 5f3ee7eb943f9..e7ed5281057d5 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -2313,6 +2313,8 @@ class CodeGenFunction : public CodeGenTypeCache { class AutoVarEmission; + void emitByrefHeapAlloc(llvm::Value *fakeByref, QualType declType); + void emitByrefMarkInitialized(const AutoVarEmission &emission); void emitByrefStructureInit(const AutoVarEmission &emission); /// Enter a cleanup to destroy a __block variable. Note that this @@ -3356,6 +3358,14 @@ class CodeGenFunction : public CodeGenTypeCache { /// escaping block. bool IsEscapingByRef; + /// True if the variable is a __block variable that is captured by a block + // referenced in its own initializer. + bool IsCapturedByOwnInit; + + /// True if the variable is a __block variable that needs to be initialized + /// directly on the heap. + bool NeedsInitOnHeap; + /// True if the variable is of aggregate type and has a constant /// initializer. bool IsConstantAggregate; @@ -3374,7 +3384,8 @@ class CodeGenFunction : public CodeGenTypeCache { AutoVarEmission(const VarDecl &variable) : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr), - IsEscapingByRef(false), IsConstantAggregate(false), + IsEscapingByRef(false), IsCapturedByOwnInit(false), + NeedsInitOnHeap(false), IsConstantAggregate(false), SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {} bool wasEmittedAsGlobal() const { return !Addr.isValid(); } diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp index 2c5774da3f666..15fcc287d68da 100644 --- a/clang/lib/Sema/Sema.cpp +++ b/clang/lib/Sema/Sema.cpp @@ -2177,6 +2177,13 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) { SourceLocation Loc = VD->getLocation(); Expr *VarRef = new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); + // Note: If needsInitOnHeap is true, the move/copy constructor will not + // actually be called, so in theory we don't have to require that it exists. + // But needsInitOnHeap is based on a conservative estimate of __block + // variables that might be retained in their own initializers; it could be + // refined in the future, which could cause variables to suddenly start + // requiring move constructors again. To avoid the backwards compatibility + // hazard, require a move/copy constructor regardless of needsInitOnHeap. ExprResult Result; auto IE = InitializedEntity::InitializeBlock(Loc, T); if (S.getLangOpts().CPlusPlus23) { @@ -2204,6 +2211,62 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) { } } +namespace { +// Find blocks within a __block variable's initializer that capture that __block +// variable. +class CapturedByOwnInitVisitor + : public EvaluatedExprVisitor { + typedef EvaluatedExprVisitor Inherited; + VarDecl *const VD; + +public: + const BlockExpr *FoundBE; + CapturedByOwnInitVisitor(Sema &S, VarDecl *VD) + : Inherited(S.getASTContext()), VD(VD), FoundBE(nullptr) {} + + void VisitBlockExpr(const BlockExpr *BE) { + if (FoundBE) + return; // No more work to do if already found. + for (const BlockDecl::Capture &BC : BE->getBlockDecl()->captures()) { + if (BC.getVariable() == VD) { + FoundBE = BE; + return; + } + } + } +}; +} // namespace + +static void checkCapturedByOwnInit(VarDecl *VD, Sema &S) { + Expr *I = VD->getInit(); + if (!I) + return; + + // Check whether the variable is captured by a block literal within its own + // initializer. + CapturedByOwnInitVisitor V(S, VD); + V.Visit(I); + if (!V.FoundBE) + return; + + if (VD->hasConstantInitialization()) { + // Block literals with captures can't be constant-evaluated. But we can + // still reach here if the initializer syntactically contains a block + // literal that's never actually evaluated. And if it's never evaluated, + // then we don't need to care about the capture. + return; + } + + // Mark as self-capturing. + VD->setCapturedByOwnInit(); + // If this is a record type, then it will need an implicit heap + // allocation, so warn. + if (VD->needsInitOnHeap()) { + S.Diag(VD->getLocation(), diag::warn_implicit_block_var_alloc) << VD; + S.Diag(V.FoundBE->getCaretLocation(), diag::note_because_captured_by_block); + } +} + static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { // Set the EscapingByref flag of __block variables captured by // escaping blocks. @@ -2233,6 +2296,9 @@ static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { // __block variables might require us to capture a copy-initializer. if (!VD->isEscapingByref()) continue; + + checkCapturedByOwnInit(VD, S); + // It's currently invalid to ever have a __block variable with an // array type; should we diagnose that here? // Regardless, we don't want to ignore array nesting when diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp index a6254b70560c3..d1ea9d4dbde48 100644 --- a/clang/lib/Serialization/ASTReaderDecl.cpp +++ b/clang/lib/Serialization/ASTReaderDecl.cpp @@ -1629,6 +1629,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) { VarDeclBits.getNextBit(); VD->NonParmVarDeclBits.EscapingByref = VarDeclBits.getNextBit(); + VD->NonParmVarDeclBits.CapturedByOwnInit = VarDeclBits.getNextBit(); HasDeducedType = VarDeclBits.getNextBit(); VD->NonParmVarDeclBits.ImplicitParamKind = VarDeclBits.getNextBits(/*Width*/ 3); diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp index c2f1d1b44241c..dc519fb9852ff 100644 --- a/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/clang/lib/Serialization/ASTWriterDecl.cpp @@ -1153,6 +1153,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) { VarDeclBits.addBit(D->isPreviousDeclInSameBlockScope()); VarDeclBits.addBit(D->isEscapingByref()); + VarDeclBits.addBit(D->isCapturedByOwnInit()); HasDeducedType = D->getType()->getContainedDeducedType(); VarDeclBits.addBit(HasDeducedType); @@ -2513,13 +2514,14 @@ void ASTWriter::WriteDeclAbbrevs() { // VarDecl Abv->Add(BitCodeAbbrevOp( BitCodeAbbrevOp::Fixed, - 21)); // Packed Var Decl bits: Linkage, ModulesCodegen, + 22)); // Packed Var Decl bits: Linkage, ModulesCodegen, // SClass, TSCSpec, InitStyle, // isARCPseudoStrong, IsThisDeclarationADemotedDefinition, // isExceptionVariable, isNRVOVariable, isCXXForRangeDecl, // isInline, isInlineSpecified, isConstexpr, // isInitCapture, isPrevDeclInSameScope, - // EscapingByref, HasDeducedType, ImplicitParamKind, isObjCForDecl + // EscapingByref, CapturedByOwnInit, HasDeducedType, + // ImplicitParamKind, isObjCForDecl Abv->Add(BitCodeAbbrevOp(0)); // VarKind (local enum) // Type Source Info Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); diff --git a/clang/test/CodeGenCXX/block-capture.cpp b/clang/test/CodeGenCXX/block-capture.cpp index c235d97bf5fbc..0b193354d878d 100644 --- a/clang/test/CodeGenCXX/block-capture.cpp +++ b/clang/test/CodeGenCXX/block-capture.cpp @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -x c++ -std=c++11 -fblocks -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple arm64e-apple-ios -x c++ -std=c++20 -fblocks -Wimplicit-block-var-alloc -fexceptions \ +// RUN: -verify -emit-llvm %s -o - | \ +// RUN: FileCheck --implicit-check-not "call{{.*}}_ZN3BigD" %s // CHECK: %struct.__block_byref_baz = type { ptr, ptr, i32, i32, i32 } // CHECK: [[baz:%[0-9a-z_]*]] = alloca %struct.__block_byref_baz @@ -6,8 +8,184 @@ // CHECK: store ptr [[baz]], ptr [[bazref]] // CHECK: call void @_Block_object_dispose(ptr [[baz]] -int main() { +int a() { __block int baz = [&]() { return 0; }(); - ^{ (void)baz; }; + (void)^{ + (void)baz; + }; return 0; } + +class Big { +public: + Big(const Big &); + ~Big(); + +private: + Big(); + int s[100]; +}; +Big getBig(Big * (^)()); + +void myCleanup(void *); + +// CHECK: define void @_Z11heapInitBigv +// CHECK: store i8 0, ptr {{.*}}initialized +// CHECK: call void @_Block_object_assign(ptr {{.*}}, ptr {{.*}}, i32 8) +// CHECK: call void @_Block_object_dispose(ptr {{.*}}, i32 8) +// CHECK: %B11 = getelementptr inbounds %struct.__block_byref_B1, ptr %{{.*}}, i32 0, i32 +// CHECK: invoke void @_Z6getBigU13block_pointerFP3BigvE({{[^,]*}} %B11, +// +// CHECK: invoke.cont: +// CHECK: store i1 true, ptr {{.*}}initialized +// CHECK: call void @_Block_object_dispose +// (no call to destructor, enforced by --implicit-check-not) +// +// CHECK: define internal void @__Block_byref_object_copy_ +// (no call to copy constructor, enforced by --implicit-check-not) +// +// CHECK: define internal void @__Block_byref_object_dispose_ +// CHECK: load {{.*}}initialized +// CHECK: br {{.*}}label %skip +// CHECK: call {{.*}} @_ZN3BigD1Ev( +// CHECK: skip: +// +void heapInitBig() { + // expected-warning@+1{{variable 'B1' will be initialized in a heap allocation}} + __block Big B1 = ({ // Make sure this works with statement expressions. + getBig( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^{ + return &B1; + }); + }); +} + +struct Small { + int x[2]; +}; +extern Small getSmall(const void * (^)()); +extern Small getSmall(const void * (^)(), const void * (^)()); +extern Small getSmall(__SIZE_TYPE__); +extern int getInt(const void *(^)()); + +// CHECK: define {{.*}} @_Z19heapInitSmallAtomicv +// CHECK: %S11 = getelementptr inbounds %struct.__block_byref_S1, ptr %{{.*}}, i32 0, i32 +// CHECK: [[call:%[0-9a-z_\.]*]] = invoke i64 @_Z8getSmallU13block_pointerFPKvvE( +// CHECK: [[dive:%[0-9a-z_\.]*]] = getelementptr inbounds %struct.Small, ptr %S11, i32 0, i32 0 +// CHECK: store i64 [[call]], ptr [[dive]], align 8 + +void heapInitSmallAtomic() { + // expected-warning@+1{{variable 'S1' will be initialized in a heap allocation}} + __block _Atomic(Small) S1 = getSmall( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^const void * { + return &S1; + }); +} + +// With multiple blocks capturing the same variable, we only note the first one. +// In theory it would be more helpful to note each block, but that would mess up +// the grammar of the diagnostic. +void heapInitTwoSmall() { + // expected-warning@+1{{variable 'S2' will be initialized in a heap allocation}} + __block Small S2 = getSmall( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^const void * { + return &S2; + }, + ^const void * { + return &S2; + }); +} + + +// Test __attribute__((cleanup)) with direct init on heap. We expect this to +// first call the cleanup function on the heap allocation, then call +// _Block_object_dispose (which runs the destructor and frees the allocation). +// +// CHECK: define {{.*}} @_Z19heapInitWithCleanupv +// CHECK: store i1 true, ptr {{.*}}initialized +// CHECK: [[fwd:%.*]] = load ptr, ptr %forwarding +// CHECK: [[obj:%.*]] = getelementptr inbounds %struct.__block_byref_B2, ptr [[fwd]], i32 0, i32 8 +// CHECK: invoke void @_Z9myCleanupPv({{.*}}[[obj]]) +// CHECK-NOT: ret +// CHECK: call void @_Block_object_dispose(ptr % +void heapInitWithCleanup() { + // expected-warning@+1{{variable 'B2' will be initialized in a heap allocation}} + __attribute__((cleanup(myCleanup))) __block Big B2 = getBig( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^{ + return &B2; + }); +} + +// Same as above but without direct init on heap; therefore, the stack copy +// needs to be destroyed in addition to the above operations. This must happen +// after the cleanup call (and also happens after _Block_object_dispose, but +// doesn't need to). +// +// CHECK: define {{.*}} @_Z22nonHeapInitWithCleanupv() +// CHECK: [[fwd:%.*]] = load ptr, ptr %forwarding +// CHECK: [[obj:%.*]] = getelementptr inbounds %struct.__block_byref_B3, ptr [[fwd]], i32 0, i32 +// CHECK: invoke void @_Z9myCleanupPv({{.*}}[[obj]]) +// +// CHECK: invoke.cont: +// CHECK: call void @_Block_object_dispose +// CHECK: call {{.*}} @_ZN3BigD1Ev( +// +// CHECK: lpad: +// CHECK: call void @_Block_object_dispose +// CHECK: call {{.*}} @_ZN3BigD1Ev( +// +// CHECK: define internal void @__Block_byref_object_dispose_ +// CHECK: call {{.*}} @_ZN3BigD1Ev( +void nonHeapInitWithCleanup() { + __attribute__((cleanup(myCleanup))) __block Big B3 = getBig(nullptr); + (void)^{ return &B3; }; +} + +// This used to cause an ICE because the type of the variable makes it eligible +// for constant initialization (but the actual initializer doesn't). +// +// It's also not actually a self-reference, so we should not get the warning. +// The block is not capturing itself; it's only capturing a pointer to itself +// (thus, e.g., calling Block_copy would not make it safe to use after the +// function returns). You might expect C++ lifetime extension to affect this +// but it doesn't. +void constRef() { + __block const Small &S = getSmall(^const void * { + return &S; + }); +} + +// This is also not actually a self-reference because the block literal +// is in an unevaluated expression. We should not get the warning. +void unevaluated() { + __block Small S = getSmall(sizeof(^const void * { + return &S; + })); +} + +// These are not self-references because the initializers are +// const-evaluated and the block literal happens to never get executed +// (despite not being in an "unevaluated expression" type-system-wise). +void constInits() { + __block constexpr const int I = true ? 1000 : getInt(^const void *{ + return &I; + }); + __block constexpr Small S = true ? Small{2000, 3000} : getSmall(^const void *{ + return &S; + }); + __block const Small &S2 = true ? Small{4000, 5000} : getSmall(^const void *{ + return &S2; + }); +} + +// This is definitely not a self-reference. +void irrelevantVariable() { + __block Small Irrelevant; + __block Small NotCaptured = getSmall(^const void * { + return &Irrelevant; + }); +}