diff --git a/.ci/generate-buildkite-pipeline-premerge b/.ci/generate-buildkite-pipeline-premerge index 2e503c867403b..81e9246de9b58 100755 --- a/.ci/generate-buildkite-pipeline-premerge +++ b/.ci/generate-buildkite-pipeline-premerge @@ -108,7 +108,7 @@ function add-dependencies() { compiler-rt|libc|openmp) echo clang lld ;; - flang|lldb) + flang|lldb|libclc) for p in llvm clang; do echo $p done diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml index a0428336d300f..9cf64417d3cb2 100644 --- a/.github/new-prs-labeler.yml +++ b/.github/new-prs-labeler.yml @@ -1,3 +1,9 @@ +ClangIR: + - clang/include/clang/CIR/**/* + - clang/lib/CIR/**/* + - clang/tools/cir-*/**/* + - clang/test/CIR/**/* + clang:dataflow: - clang/include/clang/Analysis/FlowSensitive/**/* - clang/lib/Analysis/FlowSensitive/**/* @@ -938,3 +944,6 @@ openmp:libomptarget: bazel: - utils/bazel/** + +offload: + - offload/** diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 81961a1803c41..5068ae6f90cf9 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -1,4 +1,8 @@ name: "Check code formatting" + +permissions: + contents: read + on: pull_request: branches: diff --git a/bolt/docs/BAT.md b/bolt/docs/BAT.md index f23ef1abf8761..7ffb5d7c00816 100644 --- a/bolt/docs/BAT.md +++ b/bolt/docs/BAT.md @@ -81,9 +81,10 @@ Hot indices are delta encoded, implicitly starting at zero. | `FuncHash` | 8b | Function hash for input function | Hot | | `NumBlocks` | ULEB128 | Number of basic blocks in the original function | Hot | | `NumSecEntryPoints` | ULEB128 | Number of secondary entry points in the original function | Hot | +| `ColdInputSkew` | ULEB128 | Skew to apply to all input offsets | Cold | | `NumEntries` | ULEB128 | Number of address translation entries for a function | Both | -| `EqualElems` | ULEB128 | Number of equal offsets in the beginning of a function | Hot | -| `BranchEntries` | Bitmask, `alignTo(EqualElems, 8)` bits | If `EqualElems` is non-zero, bitmask denoting entries with `BRANCHENTRY` bit | Hot | +| `EqualElems` | ULEB128 | Number of equal offsets in the beginning of a function | Both | +| `BranchEntries` | Bitmask, `alignTo(EqualElems, 8)` bits | If `EqualElems` is non-zero, bitmask denoting entries with `BRANCHENTRY` bit | Both | Function header is followed by *Address Translation Table* with `NumEntries` total entries, and *Secondary Entry Points* table with `NumSecEntryPoints` @@ -99,8 +100,8 @@ entry is encoded. Input offsets implicitly start at zero. | `BBHash` | Optional, 8b | Basic block hash in input binary | BB | | `BBIdx` | Optional, Delta, ULEB128 | Basic block index in input binary | BB | -For hot fragments, the table omits the first `EqualElems` input offsets -where the input offset equals output offset. +The table omits the first `EqualElems` input offsets where the input offset +equals output offset. `BRANCHENTRY` bit denotes whether a given offset pair is a control flow source (branch or call instruction). If not set, it signifies a control flow target diff --git a/bolt/include/bolt/Core/BinaryData.h b/bolt/include/bolt/Core/BinaryData.h index 495163f1b61aa..8a67b3e73b802 100644 --- a/bolt/include/bolt/Core/BinaryData.h +++ b/bolt/include/bolt/Core/BinaryData.h @@ -107,7 +107,6 @@ class BinaryData { std::vector &getSymbols() { return Symbols; } bool hasName(StringRef Name) const; - bool hasNameRegex(StringRef Name) const; bool nameStartsWith(StringRef Prefix) const; bool hasSymbol(const MCSymbol *Symbol) const { diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h index bc047fefa3151..26d2d01f86267 100644 --- a/bolt/include/bolt/Core/BinaryFunction.h +++ b/bolt/include/bolt/Core/BinaryFunction.h @@ -1402,7 +1402,8 @@ class BinaryFunction { /// Return true if the function has CFI instructions bool hasCFI() const { - return !FrameInstructions.empty() || !CIEFrameInstructions.empty(); + return !FrameInstructions.empty() || !CIEFrameInstructions.empty() || + IsInjected; } /// Return unique number associated with the function. diff --git a/bolt/include/bolt/Profile/BoltAddressTranslation.h b/bolt/include/bolt/Profile/BoltAddressTranslation.h index 6a0a477f27583..68b993ee363cc 100644 --- a/bolt/include/bolt/Profile/BoltAddressTranslation.h +++ b/bolt/include/bolt/Profile/BoltAddressTranslation.h @@ -119,11 +119,6 @@ class BoltAddressTranslation { /// True if a given \p Address is a function with translation table entry. bool isBATFunction(uint64_t Address) const { return Maps.count(Address); } - /// Returns branch offsets grouped by containing basic block in a given - /// function. - std::unordered_map> - getBFBranches(uint64_t FuncOutputAddress) const; - /// For a given \p Symbol in the output binary and known \p InputOffset /// return a corresponding pair of parent BinaryFunction and secondary entry /// point in it. @@ -154,9 +149,9 @@ class BoltAddressTranslation { /// entries in function address translation map. APInt calculateBranchEntriesBitMask(MapTy &Map, size_t EqualElems); - /// Calculate the number of equal offsets (output = input) in the beginning - /// of the function. - size_t getNumEqualOffsets(const MapTy &Map) const; + /// Calculate the number of equal offsets (output = input - skew) in the + /// beginning of the function. + size_t getNumEqualOffsets(const MapTy &Map, uint32_t Skew) const; std::map Maps; @@ -193,7 +188,7 @@ class BoltAddressTranslation { EntryTy(unsigned Index, size_t Hash) : Index(Index), Hash(Hash) {} }; - std::unordered_map Map; + std::map Map; const EntryTy &getEntry(uint32_t BBInputOffset) const { auto It = Map.find(BBInputOffset); assert(It != Map.end()); @@ -218,6 +213,10 @@ class BoltAddressTranslation { } size_t getNumBasicBlocks() const { return Map.size(); } + + auto begin() const { return Map.begin(); } + auto end() const { return Map.end(); } + auto upper_bound(uint32_t Offset) const { return Map.upper_bound(Offset); } }; /// Map function output address to its hash and basic blocks hash map. diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h index 4fbe524b1c385..84f76caae9dbb 100644 --- a/bolt/include/bolt/Profile/DataAggregator.h +++ b/bolt/include/bolt/Profile/DataAggregator.h @@ -225,6 +225,10 @@ class DataAggregator : public DataReader { /// Aggregation statistics uint64_t NumInvalidTraces{0}; uint64_t NumLongRangeTraces{0}; + /// Specifies how many samples were recorded in cold areas if we are dealing + /// with profiling data collected in a bolted binary. For LBRs, incremented + /// for the source of the branch to avoid counting cold activity twice (one + /// for source and another for destination). uint64_t NumColdSamples{0}; /// Looks into system PATH for Linux Perf and set up the aggregator to use it @@ -245,14 +249,12 @@ class DataAggregator : public DataReader { /// disassembled BinaryFunctions BinaryFunction *getBinaryFunctionContainingAddress(uint64_t Address) const; + /// Perform BAT translation for a given \p Func and return the parent + /// BinaryFunction or nullptr. + BinaryFunction *getBATParentFunction(const BinaryFunction &Func) const; + /// Retrieve the location name to be used for samples recorded in \p Func. - /// If doing BAT translation, link cold parts to the hot part names (used by - /// the original binary). \p Count specifies how many samples were recorded - /// at that location, so we can tally total activity in cold areas if we are - /// dealing with profiling data collected in a bolted binary. For LBRs, - /// \p Count should only be used for the source of the branch to avoid - /// counting cold activity twice (one for source and another for destination). - StringRef getLocationName(BinaryFunction &Func, uint64_t Count); + StringRef getLocationName(const BinaryFunction &Func) const; /// Semantic actions - parser hooks to interpret parsed perf samples /// Register a sample (non-LBR mode), i.e. a new hit at \p Address @@ -467,9 +469,6 @@ class DataAggregator : public DataReader { std::error_code writeBATYAML(BinaryContext &BC, StringRef OutputFilename) const; - /// Fixup profile collected on BOLTed binary, namely handle split functions. - void fixupBATProfile(BinaryContext &BC); - /// Filter out binaries based on PID void filterBinaryMMapInfo(); diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h index 826677cd63b22..af832b4c7c84c 100644 --- a/bolt/include/bolt/Rewrite/RewriteInstance.h +++ b/bolt/include/bolt/Rewrite/RewriteInstance.h @@ -368,13 +368,6 @@ class RewriteInstance { /// rewritten binary. void patchBuildID(); - /// Return file offset corresponding to a given virtual address. - uint64_t getFileOffsetFor(uint64_t Address) { - assert(Address >= NewTextSegmentAddress && - "address in not in the new text segment"); - return Address - NewTextSegmentAddress + NewTextSegmentOffset; - } - /// Return file offset corresponding to a virtual \p Address. /// Return 0 if the address has no mapping in the file, including being /// part of .bss section. @@ -398,9 +391,6 @@ class RewriteInstance { /// Return true if the section holds debug information. static bool isDebugSection(StringRef SectionName); - /// Return true if the section holds linux kernel symbol information. - static bool isKSymtabSection(StringRef SectionName); - /// Adds Debug section to overwrite. static void addToDebugSectionsToOverwrite(const char *Section) { DebugSectionsToOverwrite.emplace_back(Section); diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp index 47eae964e816c..ad2eb18caf109 100644 --- a/bolt/lib/Core/BinaryContext.cpp +++ b/bolt/lib/Core/BinaryContext.cpp @@ -555,6 +555,9 @@ bool BinaryContext::analyzeJumpTable(const uint64_t Address, const uint64_t NextJTAddress, JumpTable::AddressesType *EntriesAsAddress, bool *HasEntryInFragment) const { + // Target address of __builtin_unreachable. + const uint64_t UnreachableAddress = BF.getAddress() + BF.getSize(); + // Is one of the targets __builtin_unreachable? bool HasUnreachable = false; @@ -564,9 +567,15 @@ bool BinaryContext::analyzeJumpTable(const uint64_t Address, // Number of targets other than __builtin_unreachable. uint64_t NumRealEntries = 0; - auto addEntryAddress = [&](uint64_t EntryAddress) { - if (EntriesAsAddress) - EntriesAsAddress->emplace_back(EntryAddress); + // Size of the jump table without trailing __builtin_unreachable entries. + size_t TrimmedSize = 0; + + auto addEntryAddress = [&](uint64_t EntryAddress, bool Unreachable = false) { + if (!EntriesAsAddress) + return; + EntriesAsAddress->emplace_back(EntryAddress); + if (!Unreachable) + TrimmedSize = EntriesAsAddress->size(); }; ErrorOr Section = getSectionForAddress(Address); @@ -618,8 +627,8 @@ bool BinaryContext::analyzeJumpTable(const uint64_t Address, : *getPointerAtAddress(EntryAddress); // __builtin_unreachable() case. - if (Value == BF.getAddress() + BF.getSize()) { - addEntryAddress(Value); + if (Value == UnreachableAddress) { + addEntryAddress(Value, /*Unreachable*/ true); HasUnreachable = true; LLVM_DEBUG(dbgs() << formatv("OK: {0:x} __builtin_unreachable\n", Value)); continue; @@ -673,6 +682,13 @@ bool BinaryContext::analyzeJumpTable(const uint64_t Address, addEntryAddress(Value); } + // Trim direct/normal jump table to exclude trailing unreachable entries that + // can collide with a function address. + if (Type == JumpTable::JTT_NORMAL && EntriesAsAddress && + TrimmedSize != EntriesAsAddress->size() && + getBinaryFunctionAtAddress(UnreachableAddress)) + EntriesAsAddress->resize(TrimmedSize); + // It's a jump table if the number of real entries is more than 1, or there's // one real entry and one or more special targets. If there are only multiple // special targets, then it's not a jump table. @@ -1864,7 +1880,7 @@ MarkerSymType BinaryContext::getMarkerType(const SymbolRef &Symbol) const { // For aarch64 and riscv, the ABI defines mapping symbols so we identify data // in the code section (see IHI0056B). $x identifies a symbol starting code or // the end of a data chunk inside code, $d identifies start of data. - if ((!isAArch64() && !isRISCV()) || ELFSymbolRef(Symbol).getSize()) + if (isX86() || ELFSymbolRef(Symbol).getSize()) return MarkerSymType::NONE; Expected NameOrError = Symbol.getName(); diff --git a/bolt/lib/Core/BinaryData.cpp b/bolt/lib/Core/BinaryData.cpp index 0068a93580042..e9ddf08d8695f 100644 --- a/bolt/lib/Core/BinaryData.cpp +++ b/bolt/lib/Core/BinaryData.cpp @@ -55,14 +55,6 @@ bool BinaryData::hasName(StringRef Name) const { return false; } -bool BinaryData::hasNameRegex(StringRef NameRegex) const { - Regex MatchName(NameRegex); - for (const MCSymbol *Symbol : Symbols) - if (MatchName.match(Symbol->getName())) - return true; - return false; -} - bool BinaryData::nameStartsWith(StringRef Prefix) const { for (const MCSymbol *Symbol : Symbols) if (Symbol->getName().starts_with(Prefix)) diff --git a/bolt/lib/Core/BinaryEmitter.cpp b/bolt/lib/Core/BinaryEmitter.cpp index 97d19b75200f5..6f86ddc774544 100644 --- a/bolt/lib/Core/BinaryEmitter.cpp +++ b/bolt/lib/Core/BinaryEmitter.cpp @@ -512,7 +512,7 @@ void BinaryEmitter::emitFunctionBody(BinaryFunction &BF, FunctionFragment &FF, // Emit sized NOPs via MCAsmBackend::writeNopData() interface on x86. // This is a workaround for invalid NOPs handling by asm/disasm layer. - if (BC.MIB->isNoop(Instr) && BC.isX86()) { + if (BC.isX86() && BC.MIB->isNoop(Instr)) { if (std::optional Size = BC.MIB->getSize(Instr)) { SmallString<15> Code; raw_svector_ostream VecOS(Code); diff --git a/bolt/lib/Core/Relocation.cpp b/bolt/lib/Core/Relocation.cpp index d16b7a94787c6..4e888a5b147ac 100644 --- a/bolt/lib/Core/Relocation.cpp +++ b/bolt/lib/Core/Relocation.cpp @@ -1064,21 +1064,19 @@ MCBinaryExpr::Opcode Relocation::getComposeOpcodeFor(uint64_t Type) { } } -#define ELF_RELOC(name, value) #name, - void Relocation::print(raw_ostream &OS) const { - static const char *X86RelocNames[] = { -#include "llvm/BinaryFormat/ELFRelocs/x86_64.def" - }; - static const char *AArch64RelocNames[] = { -#include "llvm/BinaryFormat/ELFRelocs/AArch64.def" - }; switch (Arch) { default: OS << "RType:" << Twine::utohexstr(Type); break; case Triple::aarch64: + static const char *const AArch64RelocNames[] = { +#define ELF_RELOC(name, value) #name, +#include "llvm/BinaryFormat/ELFRelocs/AArch64.def" +#undef ELF_RELOC + }; + assert(Type < ArrayRef(AArch64RelocNames).size()); OS << AArch64RelocNames[Type]; break; @@ -1088,16 +1086,22 @@ void Relocation::print(raw_ostream &OS) const { switch (Type) { default: llvm_unreachable("illegal RISC-V relocation"); -#undef ELF_RELOC #define ELF_RELOC(name, value) \ case value: \ OS << #name; \ break; #include "llvm/BinaryFormat/ELFRelocs/RISCV.def" +#undef ELF_RELOC } break; case Triple::x86_64: + static const char *const X86RelocNames[] = { +#define ELF_RELOC(name, value) #name, +#include "llvm/BinaryFormat/ELFRelocs/x86_64.def" +#undef ELF_RELOC + }; + assert(Type < ArrayRef(X86RelocNames).size()); OS << X86RelocNames[Type]; break; } diff --git a/bolt/lib/Profile/BoltAddressTranslation.cpp b/bolt/lib/Profile/BoltAddressTranslation.cpp index 59d499f97be72..7cfb9c132c2c6 100644 --- a/bolt/lib/Profile/BoltAddressTranslation.cpp +++ b/bolt/lib/Profile/BoltAddressTranslation.cpp @@ -153,12 +153,13 @@ APInt BoltAddressTranslation::calculateBranchEntriesBitMask(MapTy &Map, return BitMask; } -size_t BoltAddressTranslation::getNumEqualOffsets(const MapTy &Map) const { +size_t BoltAddressTranslation::getNumEqualOffsets(const MapTy &Map, + uint32_t Skew) const { size_t EqualOffsets = 0; for (const std::pair &KeyVal : Map) { const uint32_t OutputOffset = KeyVal.first; const uint32_t InputOffset = KeyVal.second >> 1; - if (OutputOffset == InputOffset) + if (OutputOffset == InputOffset - Skew) ++EqualOffsets; else break; @@ -196,12 +197,17 @@ void BoltAddressTranslation::writeMaps(std::map &Maps, SecondaryEntryPointsMap.count(Address) ? SecondaryEntryPointsMap[Address].size() : 0; + uint32_t Skew = 0; if (Cold) { auto HotEntryIt = Maps.find(ColdPartSource[Address]); assert(HotEntryIt != Maps.end()); size_t HotIndex = std::distance(Maps.begin(), HotEntryIt); encodeULEB128(HotIndex - PrevIndex, OS); PrevIndex = HotIndex; + // Skew of all input offsets for cold fragments is simply the first input + // offset. + Skew = Map.begin()->second >> 1; + encodeULEB128(Skew, OS); } else { // Function hash size_t BFHash = getBFHash(HotInputAddress); @@ -217,24 +223,21 @@ void BoltAddressTranslation::writeMaps(std::map &Maps, << '\n'); } encodeULEB128(NumEntries, OS); - // For hot fragments only: encode the number of equal offsets - // (output = input) in the beginning of the function. Only encode one offset - // in these cases. - const size_t EqualElems = Cold ? 0 : getNumEqualOffsets(Map); - if (!Cold) { - encodeULEB128(EqualElems, OS); - if (EqualElems) { - const size_t BranchEntriesBytes = alignTo(EqualElems, 8) / 8; - APInt BranchEntries = calculateBranchEntriesBitMask(Map, EqualElems); - OS.write(reinterpret_cast(BranchEntries.getRawData()), - BranchEntriesBytes); - LLVM_DEBUG({ - dbgs() << "BranchEntries: "; - SmallString<8> BitMaskStr; - BranchEntries.toString(BitMaskStr, 2, false); - dbgs() << BitMaskStr << '\n'; - }); - } + // Encode the number of equal offsets (output = input - skew) in the + // beginning of the function. Only encode one offset in these cases. + const size_t EqualElems = getNumEqualOffsets(Map, Skew); + encodeULEB128(EqualElems, OS); + if (EqualElems) { + const size_t BranchEntriesBytes = alignTo(EqualElems, 8) / 8; + APInt BranchEntries = calculateBranchEntriesBitMask(Map, EqualElems); + OS.write(reinterpret_cast(BranchEntries.getRawData()), + BranchEntriesBytes); + LLVM_DEBUG({ + dbgs() << "BranchEntries: "; + SmallString<8> BitMaskStr; + BranchEntries.toString(BitMaskStr, 2, false); + dbgs() << BitMaskStr << '\n'; + }); } const BBHashMapTy &BBHashMap = getBBHashMap(HotInputAddress); size_t Index = 0; @@ -315,10 +318,12 @@ void BoltAddressTranslation::parseMaps(std::vector &HotFuncs, uint64_t HotAddress = Cold ? 0 : Address; PrevAddress = Address; uint32_t SecondaryEntryPoints = 0; + uint64_t ColdInputSkew = 0; if (Cold) { HotIndex += DE.getULEB128(&Offset, &Err); HotAddress = HotFuncs[HotIndex]; ColdPartSource.emplace(Address, HotAddress); + ColdInputSkew = DE.getULEB128(&Offset, &Err); } else { HotFuncs.push_back(Address); // Function hash @@ -339,28 +344,25 @@ void BoltAddressTranslation::parseMaps(std::vector &HotFuncs, getULEB128Size(SecondaryEntryPoints))); } const uint32_t NumEntries = DE.getULEB128(&Offset, &Err); - // Equal offsets, hot fragments only. - size_t EqualElems = 0; + // Equal offsets. + const size_t EqualElems = DE.getULEB128(&Offset, &Err); APInt BEBitMask; - if (!Cold) { - EqualElems = DE.getULEB128(&Offset, &Err); - LLVM_DEBUG(dbgs() << formatv("Equal offsets: {0}, {1} bytes\n", - EqualElems, getULEB128Size(EqualElems))); - if (EqualElems) { - const size_t BranchEntriesBytes = alignTo(EqualElems, 8) / 8; - BEBitMask = APInt(alignTo(EqualElems, 8), 0); - LoadIntFromMemory( - BEBitMask, - reinterpret_cast( - DE.getBytes(&Offset, BranchEntriesBytes, &Err).data()), - BranchEntriesBytes); - LLVM_DEBUG({ - dbgs() << "BEBitMask: "; - SmallString<8> BitMaskStr; - BEBitMask.toString(BitMaskStr, 2, false); - dbgs() << BitMaskStr << ", " << BranchEntriesBytes << " bytes\n"; - }); - } + LLVM_DEBUG(dbgs() << formatv("Equal offsets: {0}, {1} bytes\n", EqualElems, + getULEB128Size(EqualElems))); + if (EqualElems) { + const size_t BranchEntriesBytes = alignTo(EqualElems, 8) / 8; + BEBitMask = APInt(alignTo(EqualElems, 8), 0); + LoadIntFromMemory( + BEBitMask, + reinterpret_cast( + DE.getBytes(&Offset, BranchEntriesBytes, &Err).data()), + BranchEntriesBytes); + LLVM_DEBUG({ + dbgs() << "BEBitMask: "; + SmallString<8> BitMaskStr; + BEBitMask.toString(BitMaskStr, 2, false); + dbgs() << BitMaskStr << ", " << BranchEntriesBytes << " bytes\n"; + }); } MapTy Map; @@ -375,7 +377,7 @@ void BoltAddressTranslation::parseMaps(std::vector &HotFuncs, PrevAddress = OutputAddress; int64_t InputDelta = 0; if (J < EqualElems) { - InputOffset = (OutputOffset << 1) | BEBitMask[J]; + InputOffset = ((OutputOffset + ColdInputSkew) << 1) | BEBitMask[J]; } else { InputDelta = DE.getSLEB128(&Offset, &Err); InputOffset += InputDelta; @@ -582,26 +584,6 @@ void BoltAddressTranslation::saveMetadata(BinaryContext &BC) { } } -std::unordered_map> -BoltAddressTranslation::getBFBranches(uint64_t OutputAddress) const { - std::unordered_map> Branches; - auto FuncIt = Maps.find(OutputAddress); - assert(FuncIt != Maps.end()); - std::vector InputOffsets; - for (const auto &KV : FuncIt->second) - InputOffsets.emplace_back(KV.second); - // Sort with LSB BRANCHENTRY bit. - llvm::sort(InputOffsets); - uint32_t BBOffset{0}; - for (uint32_t InOffset : InputOffsets) { - if (InOffset & BRANCHENTRY) - Branches[BBOffset].push_back(InOffset >> 1); - else - BBOffset = InOffset >> 1; - } - return Branches; -} - unsigned BoltAddressTranslation::getSecondaryEntryPointId(uint64_t Address, uint32_t Offset) const { diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp index 71824e2cc0e97..0b2a4e86561f3 100644 --- a/bolt/lib/Profile/DataAggregator.cpp +++ b/bolt/lib/Profile/DataAggregator.cpp @@ -604,8 +604,6 @@ Error DataAggregator::readProfile(BinaryContext &BC) { // BAT YAML is handled by DataAggregator since normal YAML output requires // CFG which is not available in BAT mode. if (usesBAT()) { - // Postprocess split function profile for BAT - fixupBATProfile(BC); if (opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML) if (std::error_code EC = writeBATYAML(BC, opts::OutputFilename)) report_error("cannot create output data file", EC); @@ -664,18 +662,19 @@ DataAggregator::getBinaryFunctionContainingAddress(uint64_t Address) const { /*UseMaxSize=*/true); } -StringRef DataAggregator::getLocationName(BinaryFunction &Func, - uint64_t Count) { +BinaryFunction * +DataAggregator::getBATParentFunction(const BinaryFunction &Func) const { + if (BAT) + if (const uint64_t HotAddr = BAT->fetchParentAddress(Func.getAddress())) + return getBinaryFunctionContainingAddress(HotAddr); + return nullptr; +} + +StringRef DataAggregator::getLocationName(const BinaryFunction &Func) const { if (!BAT) return Func.getOneName(); const BinaryFunction *OrigFunc = &Func; - if (const uint64_t HotAddr = BAT->fetchParentAddress(Func.getAddress())) { - NumColdSamples += Count; - BinaryFunction *HotFunc = getBinaryFunctionContainingAddress(HotAddr); - if (HotFunc) - OrigFunc = HotFunc; - } // If it is a local function, prefer the name containing the file name where // the local function was declared for (StringRef AlternativeName : OrigFunc->getNames()) { @@ -690,12 +689,17 @@ StringRef DataAggregator::getLocationName(BinaryFunction &Func, return OrigFunc->getOneName(); } -bool DataAggregator::doSample(BinaryFunction &Func, uint64_t Address, +bool DataAggregator::doSample(BinaryFunction &OrigFunc, uint64_t Address, uint64_t Count) { + BinaryFunction *ParentFunc = getBATParentFunction(OrigFunc); + BinaryFunction &Func = ParentFunc ? *ParentFunc : OrigFunc; + if (ParentFunc) + NumColdSamples += Count; + auto I = NamesToSamples.find(Func.getOneName()); if (I == NamesToSamples.end()) { bool Success; - StringRef LocName = getLocationName(Func, Count); + StringRef LocName = getLocationName(Func); std::tie(I, Success) = NamesToSamples.insert( std::make_pair(Func.getOneName(), FuncSampleData(LocName, FuncSampleData::ContainerTy()))); @@ -715,22 +719,12 @@ bool DataAggregator::doIntraBranch(BinaryFunction &Func, uint64_t From, FuncBranchData *AggrData = getBranchData(Func); if (!AggrData) { AggrData = &NamesToBranches[Func.getOneName()]; - AggrData->Name = getLocationName(Func, Count); + AggrData->Name = getLocationName(Func); setBranchData(Func, AggrData); } - From -= Func.getAddress(); - To -= Func.getAddress(); LLVM_DEBUG(dbgs() << "BOLT-DEBUG: bumpBranchCount: " << formatv("{0} @ {1:x} -> {0} @ {2:x}\n", Func, From, To)); - if (BAT) { - From = BAT->translate(Func.getAddress(), From, /*IsBranchSrc=*/true); - To = BAT->translate(Func.getAddress(), To, /*IsBranchSrc=*/false); - LLVM_DEBUG( - dbgs() << "BOLT-DEBUG: BAT translation on bumpBranchCount: " - << formatv("{0} @ {1:x} -> {0} @ {2:x}\n", Func, From, To)); - } - AggrData->bumpBranchCount(From, To, Count, Mispreds); return true; } @@ -744,30 +738,24 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc, StringRef SrcFunc; StringRef DstFunc; if (FromFunc) { - SrcFunc = getLocationName(*FromFunc, Count); + SrcFunc = getLocationName(*FromFunc); FromAggrData = getBranchData(*FromFunc); if (!FromAggrData) { FromAggrData = &NamesToBranches[FromFunc->getOneName()]; FromAggrData->Name = SrcFunc; setBranchData(*FromFunc, FromAggrData); } - From -= FromFunc->getAddress(); - if (BAT) - From = BAT->translate(FromFunc->getAddress(), From, /*IsBranchSrc=*/true); recordExit(*FromFunc, From, Mispreds, Count); } if (ToFunc) { - DstFunc = getLocationName(*ToFunc, 0); + DstFunc = getLocationName(*ToFunc); ToAggrData = getBranchData(*ToFunc); if (!ToAggrData) { ToAggrData = &NamesToBranches[ToFunc->getOneName()]; ToAggrData->Name = DstFunc; setBranchData(*ToFunc, ToAggrData); } - To -= ToFunc->getAddress(); - if (BAT) - To = BAT->translate(ToFunc->getAddress(), To, /*IsBranchSrc=*/false); recordEntry(*ToFunc, To, Mispreds, Count); } @@ -783,15 +771,32 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc, bool DataAggregator::doBranch(uint64_t From, uint64_t To, uint64_t Count, uint64_t Mispreds) { - BinaryFunction *FromFunc = getBinaryFunctionContainingAddress(From); - BinaryFunction *ToFunc = getBinaryFunctionContainingAddress(To); + auto handleAddress = [&](uint64_t &Addr, bool IsFrom) -> BinaryFunction * { + if (BinaryFunction *Func = getBinaryFunctionContainingAddress(Addr)) { + Addr -= Func->getAddress(); + + if (BAT) + Addr = BAT->translate(Func->getAddress(), Addr, IsFrom); + + if (BinaryFunction *ParentFunc = getBATParentFunction(*Func)) { + Func = ParentFunc; + if (IsFrom) + NumColdSamples += Count; + } + + return Func; + } + return nullptr; + }; + + BinaryFunction *FromFunc = handleAddress(From, /*IsFrom=*/true); + BinaryFunction *ToFunc = handleAddress(To, /*IsFrom=*/false); if (!FromFunc && !ToFunc) return false; // Treat recursive control transfers as inter-branches. - if (FromFunc == ToFunc && (To != ToFunc->getAddress())) { - recordBranch(*FromFunc, From - FromFunc->getAddress(), - To - FromFunc->getAddress(), Count, Mispreds); + if (FromFunc == ToFunc && To != 0) { + recordBranch(*FromFunc, From, To, Count, Mispreds); return doIntraBranch(*FromFunc, From, To, Count, Mispreds); } @@ -842,9 +847,14 @@ bool DataAggregator::doTrace(const LBREntry &First, const LBREntry &Second, << FromFunc->getPrintName() << ":" << Twine::utohexstr(First.To) << " to " << Twine::utohexstr(Second.From) << ".\n"); - for (const std::pair &Pair : *FTs) - doIntraBranch(*FromFunc, Pair.first + FromFunc->getAddress(), - Pair.second + FromFunc->getAddress(), Count, false); + BinaryFunction *ParentFunc = getBATParentFunction(*FromFunc); + for (auto [From, To] : *FTs) { + if (BAT) { + From = BAT->translate(FromFunc->getAddress(), From, /*IsBranchSrc=*/true); + To = BAT->translate(FromFunc->getAddress(), To, /*IsBranchSrc=*/false); + } + doIntraBranch(ParentFunc ? *ParentFunc : *FromFunc, From, To, Count, false); + } return true; } @@ -2273,29 +2283,6 @@ DataAggregator::writeAggregatedFile(StringRef OutputFilename) const { return std::error_code(); } -void DataAggregator::fixupBATProfile(BinaryContext &BC) { - for (auto &[FuncName, Branches] : NamesToBranches) { - BinaryData *BD = BC.getBinaryDataByName(FuncName); - assert(BD); - uint64_t FuncAddress = BD->getAddress(); - if (!BAT->isBATFunction(FuncAddress)) - continue; - // Filter out cold fragments - if (!BD->getSectionName().equals(BC.getMainCodeSectionName())) - continue; - // Convert inter-branches between hot and cold fragments into - // intra-branches. - for (auto &[OffsetFrom, CallToMap] : Branches.InterIndex) { - for (auto &[CallToLoc, CallToIdx] : CallToMap) { - if (CallToLoc.Name != FuncName) - continue; - Branches.IntraIndex[OffsetFrom][CallToLoc.Offset] = CallToIdx; - Branches.InterIndex[OffsetFrom].erase(CallToLoc); - } - } - } -} - std::error_code DataAggregator::writeBATYAML(BinaryContext &BC, StringRef OutputFilename) const { std::error_code EC; @@ -2345,9 +2332,6 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC, uint64_t FuncAddress = BD->getAddress(); if (!BAT->isBATFunction(FuncAddress)) continue; - // Filter out cold fragments - if (!BD->getSectionName().equals(BC.getMainCodeSectionName())) - continue; BinaryFunction *BF = BC.getBinaryFunctionAtAddress(FuncAddress); assert(BF); YamlBF.Name = FuncName.str(); @@ -2357,87 +2341,68 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC, YamlBF.NumBasicBlocks = BAT->getNumBasicBlocks(FuncAddress); const BoltAddressTranslation::BBHashMapTy &BlockMap = BAT->getBBHashMap(FuncAddress); + YamlBF.Blocks.resize(YamlBF.NumBasicBlocks); - auto addSuccProfile = [&](yaml::bolt::BinaryBasicBlockProfile &YamlBB, - uint64_t SuccOffset, unsigned SuccDataIdx) { + for (auto &&[Idx, YamlBB] : llvm::enumerate(YamlBF.Blocks)) + YamlBB.Index = Idx; + + for (auto BI = BlockMap.begin(), BE = BlockMap.end(); BI != BE; ++BI) + YamlBF.Blocks[BI->second.getBBIndex()].Hash = BI->second.getBBHash(); + + auto getSuccessorInfo = [&](uint32_t SuccOffset, unsigned SuccDataIdx) { const llvm::bolt::BranchInfo &BI = Branches.Data.at(SuccDataIdx); yaml::bolt::SuccessorInfo SI; SI.Index = BlockMap.getBBIndex(SuccOffset); SI.Count = BI.Branches; SI.Mispreds = BI.Mispreds; - YamlBB.Successors.emplace_back(SI); + return SI; }; - std::unordered_map> BFBranches = - BAT->getBFBranches(FuncAddress); - - auto addCallsProfile = [&](yaml::bolt::BinaryBasicBlockProfile &YamlBB, - uint64_t Offset) { - // Iterate over BRANCHENTRY records in the current block - for (uint32_t BranchOffset : BFBranches[Offset]) { - if (!Branches.InterIndex.contains(BranchOffset)) - continue; - for (const auto &[CallToLoc, CallToIdx] : - Branches.InterIndex.at(BranchOffset)) { - const llvm::bolt::BranchInfo &BI = Branches.Data.at(CallToIdx); - yaml::bolt::CallSiteInfo YamlCSI; - YamlCSI.DestId = 0; // designated for unknown functions - YamlCSI.EntryDiscriminator = 0; - YamlCSI.Count = BI.Branches; - YamlCSI.Mispreds = BI.Mispreds; - YamlCSI.Offset = BranchOffset - Offset; - BinaryData *CallTargetBD = BC.getBinaryDataByName(CallToLoc.Name); - if (!CallTargetBD) { - YamlBB.CallSites.emplace_back(YamlCSI); - continue; - } - uint64_t CallTargetAddress = CallTargetBD->getAddress(); - BinaryFunction *CallTargetBF = - BC.getBinaryFunctionAtAddress(CallTargetAddress); - if (!CallTargetBF) { - YamlBB.CallSites.emplace_back(YamlCSI); - continue; - } - // Calls between hot and cold fragments must be handled in - // fixupBATProfile. - assert(CallTargetBF != BF && "invalid CallTargetBF"); - YamlCSI.DestId = CallTargetBF->getFunctionNumber(); - if (CallToLoc.Offset) { - if (BAT->isBATFunction(CallTargetAddress)) { - LLVM_DEBUG(dbgs() << "BOLT-DEBUG: Unsupported secondary " - "entry point in BAT function " - << CallToLoc.Name << '\n'); - } else if (const BinaryBasicBlock *CallTargetBB = - CallTargetBF->getBasicBlockAtOffset( - CallToLoc.Offset)) { - // Only record true call information, ignoring returns (normally - // won't have a target basic block) and jumps to the landing - // pads (not an entry point). - if (CallTargetBB->isEntryPoint()) { - YamlCSI.EntryDiscriminator = - CallTargetBF->getEntryIDForSymbol( - CallTargetBB->getLabel()); - } - } - } - YamlBB.CallSites.emplace_back(YamlCSI); - } - } + auto getCallSiteInfo = [&](Location CallToLoc, unsigned CallToIdx, + uint32_t Offset) { + const llvm::bolt::BranchInfo &BI = Branches.Data.at(CallToIdx); + yaml::bolt::CallSiteInfo CSI; + CSI.DestId = 0; // designated for unknown functions + CSI.EntryDiscriminator = 0; + CSI.Count = BI.Branches; + CSI.Mispreds = BI.Mispreds; + CSI.Offset = Offset; + if (BinaryData *BD = BC.getBinaryDataByName(CallToLoc.Name)) + YAMLProfileWriter::setCSIDestination(BC, CSI, BD->getSymbol(), BAT, + CallToLoc.Offset); + return CSI; }; for (const auto &[FromOffset, SuccKV] : Branches.IntraIndex) { - yaml::bolt::BinaryBasicBlockProfile YamlBB; if (!BlockMap.isInputBlock(FromOffset)) continue; - YamlBB.Index = BlockMap.getBBIndex(FromOffset); - YamlBB.Hash = BlockMap.getBBHash(FromOffset); + const unsigned Index = BlockMap.getBBIndex(FromOffset); + yaml::bolt::BinaryBasicBlockProfile &YamlBB = YamlBF.Blocks[Index]; for (const auto &[SuccOffset, SuccDataIdx] : SuccKV) - addSuccProfile(YamlBB, SuccOffset, SuccDataIdx); - addCallsProfile(YamlBB, FromOffset); - if (YamlBB.ExecCount || !YamlBB.Successors.empty() || - !YamlBB.CallSites.empty()) - YamlBF.Blocks.emplace_back(YamlBB); + if (BlockMap.isInputBlock(SuccOffset)) + YamlBB.Successors.emplace_back( + getSuccessorInfo(SuccOffset, SuccDataIdx)); + } + for (const auto &[FromOffset, CallTo] : Branches.InterIndex) { + auto BlockIt = BlockMap.upper_bound(FromOffset); + --BlockIt; + const unsigned BlockOffset = BlockIt->first; + const unsigned BlockIndex = BlockIt->second.getBBIndex(); + yaml::bolt::BinaryBasicBlockProfile &YamlBB = YamlBF.Blocks[BlockIndex]; + const uint32_t Offset = FromOffset - BlockOffset; + for (const auto &[CallToLoc, CallToIdx] : CallTo) + YamlBB.CallSites.emplace_back( + getCallSiteInfo(CallToLoc, CallToIdx, Offset)); + llvm::sort(YamlBB.CallSites, [](yaml::bolt::CallSiteInfo &A, + yaml::bolt::CallSiteInfo &B) { + return A.Offset < B.Offset; + }); } + // Drop blocks without a hash, won't be useful for stale matching. + llvm::erase_if(YamlBF.Blocks, + [](const yaml::bolt::BinaryBasicBlockProfile &YamlBB) { + return YamlBB.Hash == (yaml::Hex64)0; + }); BP.Functions.emplace_back(YamlBF); } } diff --git a/bolt/lib/Rewrite/BinaryPassManager.cpp b/bolt/lib/Rewrite/BinaryPassManager.cpp index 6c26bb7957269..be4888ccfa564 100644 --- a/bolt/lib/Rewrite/BinaryPassManager.cpp +++ b/bolt/lib/Rewrite/BinaryPassManager.cpp @@ -377,8 +377,9 @@ Error BinaryFunctionPassManager::runAllPasses(BinaryContext &BC) { Manager.registerPass(std::make_unique(PrintNormalized)); - Manager.registerPass(std::make_unique(NeverPrint), - opts::StripRepRet); + if (BC.isX86()) + Manager.registerPass(std::make_unique(NeverPrint), + opts::StripRepRet); Manager.registerPass(std::make_unique(PrintICF), opts::ICF); diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp index 0c8ee0d417233..4e0096cf988ae 100644 --- a/bolt/lib/Rewrite/RewriteInstance.cpp +++ b/bolt/lib/Rewrite/RewriteInstance.cpp @@ -556,7 +556,7 @@ Error RewriteInstance::discoverStorage() { if (Error E = SectionNameOrErr.takeError()) return E; StringRef SectionName = SectionNameOrErr.get(); - if (SectionName == ".text") { + if (SectionName == BC->getMainCodeSectionName()) { BC->OldTextSectionAddress = Section.getAddress(); BC->OldTextSectionSize = Section.getSize(); @@ -1670,7 +1670,9 @@ void RewriteInstance::disassemblePLT() { return disassemblePLTSectionAArch64(Section); if (BC->isRISCV()) return disassemblePLTSectionRISCV(Section); - return disassemblePLTSectionX86(Section, EntrySize); + if (BC->isX86()) + return disassemblePLTSectionX86(Section, EntrySize); + llvm_unreachable("Unmplemented PLT"); }; for (BinarySection &Section : BC->allocatableSections()) { @@ -1864,7 +1866,8 @@ Error RewriteInstance::readSpecialSections() { "Use -update-debug-sections to keep it.\n"; } - HasTextRelocations = (bool)BC->getUniqueSectionByName(".rela.text"); + HasTextRelocations = (bool)BC->getUniqueSectionByName( + ".rela" + std::string(BC->getMainCodeSectionName())); HasSymbolTable = (bool)BC->getUniqueSectionByName(".symtab"); EHFrameSection = BC->getUniqueSectionByName(".eh_frame"); BuildIDSection = BC->getUniqueSectionByName(".note.gnu.build-id"); @@ -2305,9 +2308,13 @@ void RewriteInstance::processRelocations() { return; for (const SectionRef &Section : InputFile->sections()) { - if (cantFail(Section.getRelocatedSection()) != InputFile->section_end() && - !BinarySection(*BC, Section).isAllocatable()) - readRelocations(Section); + section_iterator SecIter = cantFail(Section.getRelocatedSection()); + if (SecIter == InputFile->section_end()) + continue; + if (BinarySection(*BC, Section).isAllocatable()) + continue; + + readRelocations(Section); } if (NumFailedRelocations) @@ -2600,7 +2607,7 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, const bool IsToCode = ReferencedSection && ReferencedSection->isText(); // Special handling of PC-relative relocations. - if (!IsAArch64 && !BC->isRISCV() && Relocation::isPCRelative(RType)) { + if (BC->isX86() && Relocation::isPCRelative(RType)) { if (!IsFromCode && IsToCode) { // PC-relative relocations from data to code are tricky since the // original information is typically lost after linking, even with @@ -2854,15 +2861,14 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, BC->isRISCV()) ForceRelocation = true; - if (IsFromCode) { + if (IsFromCode) ContainingBF->addRelocation(Rel.getOffset(), ReferencedSymbol, RType, Addend, ExtractedValue); - } else if (IsToCode || ForceRelocation) { + else if (IsToCode || ForceRelocation) BC->addRelocation(Rel.getOffset(), ReferencedSymbol, RType, Addend, ExtractedValue); - } else { + else LLVM_DEBUG(dbgs() << "BOLT-DEBUG: ignoring relocation from data to data\n"); - } } void RewriteInstance::selectFunctionsToProcess() { @@ -3441,7 +3447,8 @@ void RewriteInstance::emitAndLink() { ErrorOr TextSection = BC->getUniqueSectionByName(BC->getMainCodeSectionName()); if (BC->HasRelocations && TextSection) - BC->renameSection(*TextSection, getOrgSecPrefix() + ".text"); + BC->renameSection(*TextSection, + getOrgSecPrefix() + BC->getMainCodeSectionName()); ////////////////////////////////////////////////////////////////////////////// // Assign addresses to new sections. @@ -4298,7 +4305,7 @@ RewriteInstance::getOutputSections(ELFObjectFile *File, for (auto &SectionKV : OutputSections) { ELFShdrTy &Section = SectionKV.second; - // Ignore TLS sections as they don't take any space in the file. + // Ignore NOBITS sections as they don't take any space in the file. if (Section.sh_type == ELF::SHT_NOBITS) continue; @@ -4306,10 +4313,9 @@ RewriteInstance::getOutputSections(ELFObjectFile *File, // placed in different loadable segments. if (PrevSection && PrevSection->sh_offset + PrevSection->sh_size > Section.sh_offset) { - if (opts::Verbosity > 1) { + if (opts::Verbosity > 1) BC->outs() << "BOLT-INFO: adjusting size for section " << PrevBinSec->getOutputName() << '\n'; - } PrevSection->sh_size = Section.sh_offset - PrevSection->sh_offset; } @@ -4417,6 +4423,7 @@ void RewriteInstance::patchELFSectionHeaderTable(ELFObjectFile *File) { raw_fd_ostream &OS = Out->os(); const ELFFile &Obj = File->getELFFile(); + // Mapping from old section indices to new ones std::vector NewSectionIndex; std::vector OutputSections = getOutputSections(File, NewSectionIndex); @@ -4434,10 +4441,8 @@ void RewriteInstance::patchELFSectionHeaderTable(ELFObjectFile *File) { // Write all section header entries while patching section references. for (ELFShdrTy &Section : OutputSections) { Section.sh_link = NewSectionIndex[Section.sh_link]; - if (Section.sh_type == ELF::SHT_REL || Section.sh_type == ELF::SHT_RELA) { - if (Section.sh_info) - Section.sh_info = NewSectionIndex[Section.sh_info]; - } + if (Section.sh_type == ELF::SHT_REL || Section.sh_type == ELF::SHT_RELA) + Section.sh_info = NewSectionIndex[Section.sh_info]; OS.write(reinterpret_cast(&Section), sizeof(Section)); } @@ -5762,10 +5767,3 @@ bool RewriteInstance::isDebugSection(StringRef SectionName) { return false; } - -bool RewriteInstance::isKSymtabSection(StringRef SectionName) { - if (SectionName.starts_with("__ksymtab")) - return true; - - return false; -} diff --git a/bolt/test/X86/bolt-address-translation.test b/bolt/test/X86/bolt-address-translation.test index 63234b4c1d218..e6b21c14077b4 100644 --- a/bolt/test/X86/bolt-address-translation.test +++ b/bolt/test/X86/bolt-address-translation.test @@ -37,7 +37,7 @@ # CHECK: BOLT: 3 out of 7 functions were overwritten. # CHECK: BOLT-INFO: Wrote 6 BAT maps # CHECK: BOLT-INFO: Wrote 3 function and 58 basic block hashes -# CHECK: BOLT-INFO: BAT section size (bytes): 924 +# CHECK: BOLT-INFO: BAT section size (bytes): 928 # # usqrt mappings (hot part). We match against any key (left side containing # the bolted binary offsets) because BOLT may change where it puts instructions diff --git a/bolt/test/X86/patch-entries.test b/bolt/test/X86/patch-entries.test index 54f358f273e79..4a725412dd616 100644 --- a/bolt/test/X86/patch-entries.test +++ b/bolt/test/X86/patch-entries.test @@ -7,4 +7,25 @@ REQUIRES: system-linux RUN: %clang %cflags -no-pie -g %p/Inputs/patch-entries.c -fuse-ld=lld -o %t.exe \ RUN: -Wl,-q -I%p/../Inputs -RUN: llvm-bolt -relocs %t.exe -o %t.out --update-debug-sections --force-patch +RUN: llvm-bolt -relocs %t.exe -o %t.out --update-debug-sections --force-patch \ +RUN: --enable-bat + +# Check that patched functions can be disassembled (override FDE from the +# original function) +# PREAGG: B X:0 #foo.org.0# 1 0 +RUN: link_fdata %s %t.out %t.preagg PREAGG +RUN: perf2bolt %t.out -p %t.preagg --pa -o %t.yaml --profile-format=yaml \ +RUN: -print-disasm -print-only=foo.org.0/1 2>&1 | FileCheck %s +CHECK-NOT: BOLT-WARNING: sizes differ for function foo.org.0/1 +CHECK: Binary Function "foo.org.0/1(*2)" after disassembly { + +# Check the expected eh_frame contents +RUN: llvm-nm --print-size %t.out > %t.foo +RUN: llvm-objdump %t.out --dwarf=frames >> %t.foo +RUN: FileCheck %s --input-file %t.foo --check-prefix=CHECK-FOO +CHECK-FOO: 0000000000[[#%x,FOO:]] [[#%x,OPTSIZE:]] t foo +CHECK-FOO: 0000000000[[#%x,ORG:]] [[#%x,ORGSIZE:]] t foo.org.0 +# patched FDE comes first +CHECK-FOO: FDE {{.*}} pc=00[[#%x,ORG]]...00[[#%x,ORG+ORGSIZE]] +# original FDE comes second +CHECK-FOO: FDE {{.*}} pc=00[[#%x,ORG]]...00[[#%x,ORG+OPTSIZE]] diff --git a/bolt/test/X86/yaml-secondary-entry-discriminator.s b/bolt/test/X86/yaml-secondary-entry-discriminator.s index 78e7e55aa98eb..5d6e291fd7c22 100644 --- a/bolt/test/X86/yaml-secondary-entry-discriminator.s +++ b/bolt/test/X86/yaml-secondary-entry-discriminator.s @@ -1,5 +1,5 @@ -# This reproduces a bug with BOLT setting incorrect discriminator for -# secondary entry points in YAML profile. +## This reproduces a bug with BOLT setting incorrect discriminator for +## secondary entry points in YAML profile. # REQUIRES: system-linux # RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o @@ -24,7 +24,7 @@ # CHECK-NEXT: hash: {{.*}} # CHECK-NEXT: calls: [ { off: 0x0, fid: 1, disc: 1, cnt: 1, mis: 1 } ] -# Make sure that the profile is attached correctly +## Make sure that the profile is attached correctly # RUN: llvm-bolt %t.exe -o %t.out --data %t.yaml --print-profile \ # RUN: --print-only=main | FileCheck %s --check-prefix=CHECK-CFG @@ -33,25 +33,77 @@ # CHECK-CFG: callq *%rax # Offset: [[#]] # CallProfile: 1 (1 misses) : # CHECK-CFG-NEXT: { secondary_entry: 1 (1 misses) } -# YAML BAT test of calling BAT secondary entry from non-BAT function -# Now force-split func and skip main (making it call secondary entries) +## YAML BAT test of calling BAT secondary entry from non-BAT function +## Now force-split func and skip main (making it call secondary entries) # RUN: llvm-bolt %t.exe -o %t.bat --data %t.fdata --funcs=func \ # RUN: --split-functions --split-strategy=all --split-all-cold --enable-bat +## Prepare pre-aggregated profile using %t.bat +# RUN: link_fdata %s %t.bat %t.preagg PREAGG +## Strip labels used for pre-aggregated profile +# RUN: llvm-strip -NLcall -NLindcall %t.bat + +## Convert pre-aggregated profile using BAT +# RUN: perf2bolt %t.bat -p %t.preagg --pa -o %t.bat.fdata -w %t.bat.yaml + +## Convert BAT fdata into YAML +# RUN: llvm-bolt %t.exe -data %t.bat.fdata -w %t.bat.fdata-yaml -o /dev/null + +## Check fdata YAML - make sure that a direct call has discriminator field +# RUN: FileCheck %s --input-file %t.bat.fdata-yaml -check-prefix CHECK-BAT-YAML + +## Check BAT YAML - make sure that a direct call has discriminator field +# RUN: FileCheck %s --input-file %t.bat.yaml --check-prefix CHECK-BAT-YAML + +## YAML BAT test of calling BAT secondary entry from BAT function +# RUN: llvm-bolt %t.exe -o %t.bat2 --data %t.fdata --funcs=main,func \ +# RUN: --split-functions --split-strategy=all --split-all-cold --enable-bat + +## Prepare pre-aggregated profile using %t.bat +# RUN: link_fdata %s %t.bat2 %t.preagg2 PREAGG2 + +## Strip labels used for pre-aggregated profile +# RUN: llvm-strip -NLcall -NLindcall %t.bat2 + +## Convert pre-aggregated profile using BAT +# RUN: perf2bolt %t.bat2 -p %t.preagg2 --pa -o %t.bat2.fdata -w %t.bat2.yaml + +## Convert BAT fdata into YAML +# RUN: llvm-bolt %t.exe -data %t.bat2.fdata -w %t.bat2.fdata-yaml -o /dev/null + +## Check fdata YAML - make sure that a direct call has discriminator field +# RUN: FileCheck %s --input-file %t.bat2.fdata-yaml -check-prefix CHECK-BAT-YAML + +## Check BAT YAML - make sure that a direct call has discriminator field +# RUN: FileCheck %s --input-file %t.bat2.yaml --check-prefix CHECK-BAT-YAML + +# CHECK-BAT-YAML: - name: main +# CHECK-BAT-YAML-NEXT: fid: [[#]] +# CHECK-BAT-YAML-NEXT: hash: 0xADF270D550151185 +# CHECK-BAT-YAML-NEXT: exec: 0 +# CHECK-BAT-YAML-NEXT: nblocks: 4 +# CHECK-BAT-YAML-NEXT: blocks: +# CHECK-BAT-YAML: - bid: 1 +# CHECK-BAT-YAML-NEXT: insns: [[#]] +# CHECK-BAT-YAML-NEXT: hash: 0x36A303CBA4360018 +# CHECK-BAT-YAML-NEXT: calls: [ { off: 0x0, fid: [[#]], disc: 1, cnt: 1 + .globl func .type func, @function func: # FDATA: 0 [unknown] 0 1 func 0 1 0 +# PREAGG: B X:0 #func# 1 1 +# PREAGG2: B X:0 #func# 1 1 .cfi_startproc pushq %rbp movq %rsp, %rbp - # Placeholder code to make splitting profitable +## Placeholder code to make splitting profitable .rept 5 testq %rax, %rax .endr .globl secondary_entry secondary_entry: - # Placeholder code to make splitting profitable +## Placeholder code to make splitting profitable .rept 5 testq %rax, %rax .endr @@ -71,17 +123,23 @@ main: movl $0, -4(%rbp) testq %rax, %rax jne Lindcall +.globl Lcall Lcall: call secondary_entry # FDATA: 1 main #Lcall# 1 secondary_entry 0 1 1 +# PREAGG: B #Lcall# #secondary_entry# 1 1 +# PREAGG2: B #main.cold.0# #func.cold.0# 1 1 +.globl Lindcall Lindcall: callq *%rax # FDATA: 1 main #Lindcall# 1 secondary_entry 0 1 1 +# PREAGG: B #Lindcall# #secondary_entry# 1 1 +# PREAGG2: B #main.cold.1# #func.cold.0# 1 1 xorl %eax, %eax addq $16, %rsp popq %rbp retq -# For relocations against .text +## For relocations against .text call exit .cfi_endproc .size main, .-main diff --git a/bolt/test/runtime/X86/jt-confusion.s b/bolt/test/runtime/X86/jt-confusion.s new file mode 100644 index 0000000000000..f15c83b35b6a4 --- /dev/null +++ b/bolt/test/runtime/X86/jt-confusion.s @@ -0,0 +1,164 @@ +# REQUIRES: system-linux + +# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o +# RUN: llvm-strip --strip-unneeded %t.o +# RUN: %clang %cflags -no-pie -nostartfiles -nostdlib -lc %t.o -o %t.exe -Wl,-q + +# RUN: llvm-bolt %t.exe -o %t.exe.bolt --relocs=1 --lite=0 + +# RUN: %t.exe.bolt + +## Check that BOLT's jump table detection diffrentiates between +## __builtin_unreachable() targets and function pointers. + +## The test case was built from the following two source files and +## modiffied for standalone build. main became _start, etc. +## $ $(CC) a.c -O1 -S -o a.s +## $ $(CC) b.c -O0 -S -o b.s + +## a.c: + +## typedef int (*fptr)(int); +## void check_fptr(fptr, int); +## +## int foo(int a) { +## check_fptr(foo, 0); +## switch (a) { +## default: +## __builtin_unreachable(); +## case 0: +## return 3; +## case 1: +## return 5; +## case 2: +## return 7; +## case 3: +## return 11; +## case 4: +## return 13; +## case 5: +## return 17; +## } +## return 0; +## } +## +## int main(int argc) { +## check_fptr(main, 1); +## return foo(argc); +## } +## +## const fptr funcs[2] = {foo, main}; + +## b.c.: + +## typedef int (*fptr)(int); +## extern const fptr funcs[2]; +## +## #define assert(C) { if (!(C)) (*(unsigned long long *)0) = 0; } +## void check_fptr(fptr f, int i) { +## assert(f == funcs[i]); +## } + + + .text + .globl foo + .type foo, @function +foo: +.LFB0: + .cfi_startproc + pushq %rbx + .cfi_def_cfa_offset 16 + .cfi_offset 3, -16 + movl %edi, %ebx + movl $0, %esi + movl $foo, %edi + call check_fptr + movl %ebx, %ebx + jmp *.L4(,%rbx,8) +.L8: + movl $5, %eax + jmp .L1 +.L7: + movl $7, %eax + jmp .L1 +.L6: + movl $11, %eax + jmp .L1 +.L5: + movl $13, %eax + jmp .L1 +.L3: + movl $17, %eax + jmp .L1 +.L10: + movl $3, %eax +.L1: + popq %rbx + .cfi_def_cfa_offset 8 + ret + .cfi_endproc +.LFE0: + .size foo, .-foo + .globl _start + .type _start, @function +_start: +.LFB1: + .cfi_startproc + pushq %rbx + .cfi_def_cfa_offset 16 + .cfi_offset 3, -16 + movl %edi, %ebx + movl $1, %esi + movl $_start, %edi + call check_fptr + movl $1, %edi + call foo + popq %rbx + .cfi_def_cfa_offset 8 + callq exit@PLT + .cfi_endproc +.LFE1: + .size _start, .-_start + .globl check_fptr + .type check_fptr, @function +check_fptr: +.LFB2: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + .cfi_offset 6, -16 + movq %rsp, %rbp + .cfi_def_cfa_register 6 + movq %rdi, -8(%rbp) + movl %esi, -12(%rbp) + movl -12(%rbp), %eax + cltq + movq funcs(,%rax,8), %rax + cmpq %rax, -8(%rbp) + je .L33 + movl $0, %eax + movq $0, (%rax) +.L33: + nop + popq %rbp + .cfi_def_cfa 7, 8 + ret + .cfi_endproc + + .section .rodata + .align 8 + .align 4 +.L4: + .quad .L10 + .quad .L8 + .quad .L7 + .quad .L6 + .quad .L5 + .quad .L3 + + .globl funcs + .type funcs, @object + .size funcs, 16 +funcs: + .quad foo + .quad _start diff --git a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp index c608fe713f9f5..e7be8134781e4 100644 --- a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp @@ -72,7 +72,7 @@ void ForwardingReferenceOverloadCheck::registerMatchers(MatchFinder *Finder) { DeclarationMatcher FindOverload = cxxConstructorDecl( - hasParameter(0, ForwardingRefParm), + hasParameter(0, ForwardingRefParm), unless(isDeleted()), unless(hasAnyParameter( // No warning: enable_if as constructor parameter. parmVarDecl(hasType(isEnableIf())))), diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp index 855c4a2efc373..9c3c7cc70c187 100644 --- a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp +++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProTypeMemberInitCheck.cpp @@ -444,7 +444,7 @@ void ProTypeMemberInitCheck::checkMissingMemberInitializer( if (!F->hasInClassInitializer() && utils::type_traits::isTriviallyDefaultConstructible(F->getType(), Context) && - !isEmpty(Context, F->getType()) && !F->isUnnamedBitfield() && + !isEmpty(Context, F->getType()) && !F->isUnnamedBitField() && !AnyMemberHasInitPerUnion) FieldsToInit.insert(F); }); diff --git a/clang-tools-extra/clang-tidy/linuxkernel/MustCheckErrsCheck.h b/clang-tools-extra/clang-tidy/linuxkernel/MustCheckErrsCheck.h index f08fed4798392..7406aaead836e 100644 --- a/clang-tools-extra/clang-tidy/linuxkernel/MustCheckErrsCheck.h +++ b/clang-tools-extra/clang-tidy/linuxkernel/MustCheckErrsCheck.h @@ -17,15 +17,8 @@ namespace clang::tidy::linuxkernel { /// linux/err.h. Also checks to see if code uses the results from functions that /// directly return a value from one of these error functions. /// -/// This is important in the Linux kernel because ERR_PTR, PTR_ERR, IS_ERR, -/// IS_ERR_OR_NULL, ERR_CAST, and PTR_ERR_OR_ZERO return values must be checked, -/// since positive pointers and negative error codes are being used in the same -/// context. These functions are marked with -/// __attribute__((warn_unused_result)), but some kernel versions do not have -/// this warning enabled for clang. -/// /// For the user-facing documentation see: -/// http://clang.llvm.org/extra/clang-tidy/checks/linuxkernel/must-use-errs.html +/// http://clang.llvm.org/extra/clang-tidy/checks/linuxkernel/must-check-errs.html class MustCheckErrsCheck : public ClangTidyCheck { public: MustCheckErrsCheck(StringRef Name, ClangTidyContext *Context) diff --git a/clang-tools-extra/clang-tidy/modernize/UseEqualsDefaultCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseEqualsDefaultCheck.cpp index 5134eb51a0322..93151024064b4 100644 --- a/clang-tools-extra/clang-tidy/modernize/UseEqualsDefaultCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/UseEqualsDefaultCheck.cpp @@ -26,7 +26,7 @@ getAllNamedFields(const CXXRecordDecl *Record) { std::set Result; for (const auto *Field : Record->fields()) { // Static data members are not in this range. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; Result.insert(Field); } diff --git a/clang-tools-extra/clang-tidy/modernize/UseStdNumbersCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseStdNumbersCheck.cpp index b299afd540b9a..1548fc454cfb3 100644 --- a/clang-tools-extra/clang-tidy/modernize/UseStdNumbersCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/UseStdNumbersCheck.cpp @@ -29,6 +29,7 @@ #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/MathExtras.h" #include +#include #include #include #include diff --git a/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.cpp b/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.cpp index 2fa7cd0baf98f..c507043c367a8 100644 --- a/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.cpp +++ b/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.cpp @@ -85,10 +85,10 @@ void UnnecessaryValueParamCheck::check(const MatchFinder::MatchResult &Result) { TraversalKindScope RAII(*Result.Context, TK_AsIs); - FunctionParmMutationAnalyzer &Analyzer = - MutationAnalyzers.try_emplace(Function, *Function, *Result.Context) - .first->second; - if (Analyzer.isMutated(Param)) + FunctionParmMutationAnalyzer *Analyzer = + FunctionParmMutationAnalyzer::getFunctionParmMutationAnalyzer( + *Function, *Result.Context, MutationAnalyzerCache); + if (Analyzer->isMutated(Param)) return; const bool IsConstQualified = @@ -169,7 +169,7 @@ void UnnecessaryValueParamCheck::storeOptions( } void UnnecessaryValueParamCheck::onEndOfTranslationUnit() { - MutationAnalyzers.clear(); + MutationAnalyzerCache.clear(); } void UnnecessaryValueParamCheck::handleMoveFix(const ParmVarDecl &Var, diff --git a/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.h b/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.h index 1872e3bc9bf29..7250bffd20b2f 100644 --- a/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.h +++ b/clang-tools-extra/clang-tidy/performance/UnnecessaryValueParamCheck.h @@ -37,8 +37,7 @@ class UnnecessaryValueParamCheck : public ClangTidyCheck { void handleMoveFix(const ParmVarDecl &Var, const DeclRefExpr &CopyArgument, const ASTContext &Context); - llvm::DenseMap - MutationAnalyzers; + ExprMutationAnalyzer::Memoized MutationAnalyzerCache; utils::IncludeInserter Inserter; const std::vector AllowedTypes; }; diff --git a/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp b/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp index 1dde049051785..4a9426ee7e8bb 100644 --- a/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp +++ b/clang-tools-extra/clang-tidy/utils/ExceptionSpecAnalyzer.cpp @@ -99,7 +99,7 @@ ExceptionSpecAnalyzer::analyzeRecord(const CXXRecordDecl *RecordDecl, } for (const auto *FDecl : RecordDecl->fields()) - if (!FDecl->isInvalidDecl() && !FDecl->isUnnamedBitfield()) { + if (!FDecl->isInvalidDecl() && !FDecl->isUnnamedBitField()) { State Result = analyzeFieldDecl(FDecl, Kind); if (Result == State::Throwing || Result == State::Unknown) return Result; diff --git a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp index 69b7d40ef628d..962a243ce94d4 100644 --- a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp +++ b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp @@ -169,14 +169,14 @@ class RenamerClangTidyCheckPPCallbacks : public PPCallbacks { return; if (SM.isWrittenInCommandLineFile(MacroNameTok.getLocation())) return; - Check->checkMacro(SM, MacroNameTok, Info); + Check->checkMacro(MacroNameTok, Info, SM); } /// MacroExpands calls expandMacro for macros in the main file void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD, SourceRange /*Range*/, const MacroArgs * /*Args*/) override { - Check->expandMacro(MacroNameTok, MD.getMacroInfo()); + Check->expandMacro(MacroNameTok, MD.getMacroInfo(), SM); } private: @@ -187,7 +187,7 @@ class RenamerClangTidyCheckPPCallbacks : public PPCallbacks { class RenamerClangTidyVisitor : public RecursiveASTVisitor { public: - RenamerClangTidyVisitor(RenamerClangTidyCheck *Check, const SourceManager *SM, + RenamerClangTidyVisitor(RenamerClangTidyCheck *Check, const SourceManager &SM, bool AggressiveDependentMemberLookup) : Check(Check), SM(SM), AggressiveDependentMemberLookup(AggressiveDependentMemberLookup) {} @@ -258,7 +258,7 @@ class RenamerClangTidyVisitor // Fix overridden methods if (const auto *Method = dyn_cast(Decl)) { if (const CXXMethodDecl *Overridden = getOverrideMethod(Method)) { - Check->addUsage(Overridden, Method->getLocation()); + Check->addUsage(Overridden, Method->getLocation(), SM); return true; // Don't try to add the actual decl as a Failure. } } @@ -268,7 +268,7 @@ class RenamerClangTidyVisitor if (isa(Decl)) return true; - Check->checkNamedDecl(Decl, *SM); + Check->checkNamedDecl(Decl, SM); return true; } @@ -385,7 +385,7 @@ class RenamerClangTidyVisitor private: RenamerClangTidyCheck *Check; - const SourceManager *SM; + const SourceManager &SM; const bool AggressiveDependentMemberLookup; }; @@ -415,7 +415,7 @@ void RenamerClangTidyCheck::registerPPCallbacks( void RenamerClangTidyCheck::addUsage( const RenamerClangTidyCheck::NamingCheckId &Decl, SourceRange Range, - const SourceManager *SourceMgr) { + const SourceManager &SourceMgr) { // Do nothing if the provided range is invalid. if (Range.isInvalid()) return; @@ -425,8 +425,7 @@ void RenamerClangTidyCheck::addUsage( // spelling location to different source locations, and we only want to fix // the token once, before it is expanded by the macro. SourceLocation FixLocation = Range.getBegin(); - if (SourceMgr) - FixLocation = SourceMgr->getSpellingLoc(FixLocation); + FixLocation = SourceMgr.getSpellingLoc(FixLocation); if (FixLocation.isInvalid()) return; @@ -440,15 +439,15 @@ void RenamerClangTidyCheck::addUsage( if (!Failure.shouldFix()) return; - if (SourceMgr && SourceMgr->isWrittenInScratchSpace(FixLocation)) + if (SourceMgr.isWrittenInScratchSpace(FixLocation)) Failure.FixStatus = RenamerClangTidyCheck::ShouldFixStatus::InsideMacro; - if (!utils::rangeCanBeFixed(Range, SourceMgr)) + if (!utils::rangeCanBeFixed(Range, &SourceMgr)) Failure.FixStatus = RenamerClangTidyCheck::ShouldFixStatus::InsideMacro; } void RenamerClangTidyCheck::addUsage(const NamedDecl *Decl, SourceRange Range, - const SourceManager *SourceMgr) { + const SourceManager &SourceMgr) { // Don't keep track for non-identifier names. auto *II = Decl->getIdentifier(); if (!II) @@ -489,18 +488,24 @@ void RenamerClangTidyCheck::checkNamedDecl(const NamedDecl *Decl, } Failure.Info = std::move(Info); - addUsage(Decl, Range); + addUsage(Decl, Range, SourceMgr); } void RenamerClangTidyCheck::check(const MatchFinder::MatchResult &Result) { - RenamerClangTidyVisitor Visitor(this, Result.SourceManager, + if (!Result.SourceManager) { + // In principle SourceManager is not null but going only by the definition + // of MatchResult it must be handled. Cannot rename anything without a + // SourceManager. + return; + } + RenamerClangTidyVisitor Visitor(this, *Result.SourceManager, AggressiveDependentMemberLookup); Visitor.TraverseAST(*Result.Context); } -void RenamerClangTidyCheck::checkMacro(const SourceManager &SourceMgr, - const Token &MacroNameTok, - const MacroInfo *MI) { +void RenamerClangTidyCheck::checkMacro(const Token &MacroNameTok, + const MacroInfo *MI, + const SourceManager &SourceMgr) { std::optional MaybeFailure = getMacroFailureInfo(MacroNameTok, SourceMgr); if (!MaybeFailure) @@ -515,11 +520,12 @@ void RenamerClangTidyCheck::checkMacro(const SourceManager &SourceMgr, Failure.FixStatus = ShouldFixStatus::FixInvalidIdentifier; Failure.Info = std::move(Info); - addUsage(ID, Range); + addUsage(ID, Range, SourceMgr); } void RenamerClangTidyCheck::expandMacro(const Token &MacroNameTok, - const MacroInfo *MI) { + const MacroInfo *MI, + const SourceManager &SourceMgr) { StringRef Name = MacroNameTok.getIdentifierInfo()->getName(); NamingCheckId ID(MI->getDefinitionLoc(), Name); @@ -528,7 +534,7 @@ void RenamerClangTidyCheck::expandMacro(const Token &MacroNameTok, return; SourceRange Range(MacroNameTok.getLocation(), MacroNameTok.getEndLoc()); - addUsage(ID, Range); + addUsage(ID, Range, SourceMgr); } static std::string diff --git a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.h b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.h index 38228fb59bf62..be5b6f0c7f767 100644 --- a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.h +++ b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.h @@ -108,18 +108,19 @@ class RenamerClangTidyCheck : public ClangTidyCheck { llvm::DenseMap; /// Check Macros for style violations. - void checkMacro(const SourceManager &SourceMgr, const Token &MacroNameTok, - const MacroInfo *MI); + void checkMacro(const Token &MacroNameTok, const MacroInfo *MI, + const SourceManager &SourceMgr); /// Add a usage of a macro if it already has a violation. - void expandMacro(const Token &MacroNameTok, const MacroInfo *MI); + void expandMacro(const Token &MacroNameTok, const MacroInfo *MI, + const SourceManager &SourceMgr); void addUsage(const RenamerClangTidyCheck::NamingCheckId &Decl, - SourceRange Range, const SourceManager *SourceMgr = nullptr); + SourceRange Range, const SourceManager &SourceMgr); /// Convenience method when the usage to be added is a NamedDecl. void addUsage(const NamedDecl *Decl, SourceRange Range, - const SourceManager *SourceMgr = nullptr); + const SourceManager &SourceMgr); void checkNamedDecl(const NamedDecl *Decl, const SourceManager &SourceMgr); diff --git a/clang-tools-extra/clangd/CodeComplete.cpp b/clang-tools-extra/clangd/CodeComplete.cpp index 9e321dce4c504..89eee392837af 100644 --- a/clang-tools-extra/clangd/CodeComplete.cpp +++ b/clang-tools-extra/clangd/CodeComplete.cpp @@ -89,7 +89,11 @@ const CodeCompleteOptions::CodeCompletionRankingModel namespace { -CompletionItemKind toCompletionItemKind(index::SymbolKind Kind) { +// Note: changes to this function should also be reflected in the +// CodeCompletionResult overload where appropriate. +CompletionItemKind +toCompletionItemKind(index::SymbolKind Kind, + const llvm::StringRef *Signature = nullptr) { using SK = index::SymbolKind; switch (Kind) { case SK::Unknown: @@ -99,7 +103,10 @@ CompletionItemKind toCompletionItemKind(index::SymbolKind Kind) { case SK::NamespaceAlias: return CompletionItemKind::Module; case SK::Macro: - return CompletionItemKind::Text; + // Use macro signature (if provided) to tell apart function-like and + // object-like macros. + return Signature && Signature->contains('(') ? CompletionItemKind::Function + : CompletionItemKind::Constant; case SK::Enum: return CompletionItemKind::Enum; case SK::Struct: @@ -150,6 +157,8 @@ CompletionItemKind toCompletionItemKind(index::SymbolKind Kind) { llvm_unreachable("Unhandled clang::index::SymbolKind."); } +// Note: changes to this function should also be reflected in the +// index::SymbolKind overload where appropriate. CompletionItemKind toCompletionItemKind(const CodeCompletionResult &Res, CodeCompletionContext::Kind CtxKind) { if (Res.Declaration) @@ -379,7 +388,8 @@ struct CodeCompletionBuilder { if (Completion.Scope.empty()) Completion.Scope = std::string(C.IndexResult->Scope); if (Completion.Kind == CompletionItemKind::Missing) - Completion.Kind = toCompletionItemKind(C.IndexResult->SymInfo.Kind); + Completion.Kind = toCompletionItemKind(C.IndexResult->SymInfo.Kind, + &C.IndexResult->Signature); if (Completion.Name.empty()) Completion.Name = std::string(C.IndexResult->Name); if (Completion.FilterText.empty()) diff --git a/clang-tools-extra/clangd/index/SymbolCollector.cpp b/clang-tools-extra/clangd/index/SymbolCollector.cpp index 85b8fc549b016..5c4e2150cf312 100644 --- a/clang-tools-extra/clangd/index/SymbolCollector.cpp +++ b/clang-tools-extra/clangd/index/SymbolCollector.cpp @@ -409,7 +409,7 @@ class SymbolCollector::HeaderFileURICache { // Framework headers are spelled as , not // "path/FrameworkName.framework/Headers/Foo.h". auto &HS = PP->getHeaderSearchInfo(); - if (const auto *HFI = HS.getExistingFileInfo(*FE, /*WantExternal*/ false)) + if (const auto *HFI = HS.getExistingFileInfo(*FE)) if (!HFI->Framework.empty()) if (auto Spelling = getFrameworkHeaderIncludeSpelling(*FE, HFI->Framework, HS)) diff --git a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp index 49337bddf98d5..8fbac73cb653b 100644 --- a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp +++ b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp @@ -671,7 +671,8 @@ TEST(CompletionTest, Kinds) { #define MACRO 10 int X = ^ )cpp", - {func("indexFunction"), var("indexVariable"), cls("indexClass")}); + {func("indexFunction"), var("indexVariable"), cls("indexClass"), + macro("indexObjMacro"), macro("indexFuncMacro", "(x, y)")}); EXPECT_THAT(Results.Completions, AllOf(has("function", CompletionItemKind::Function), has("variable", CompletionItemKind::Variable), @@ -680,7 +681,9 @@ TEST(CompletionTest, Kinds) { has("MACRO", CompletionItemKind::Constant), has("indexFunction", CompletionItemKind::Function), has("indexVariable", CompletionItemKind::Variable), - has("indexClass", CompletionItemKind::Class))); + has("indexClass", CompletionItemKind::Class), + has("indexObjMacro", CompletionItemKind::Constant), + has("indexFuncMacro", CompletionItemKind::Function))); Results = completions("nam^"); EXPECT_THAT(Results.Completions, diff --git a/clang-tools-extra/clangd/unittests/HoverTests.cpp b/clang-tools-extra/clangd/unittests/HoverTests.cpp index 35db757b9c15b..5ead74748f550 100644 --- a/clang-tools-extra/clangd/unittests/HoverTests.cpp +++ b/clang-tools-extra/clangd/unittests/HoverTests.cpp @@ -1983,10 +1983,14 @@ TEST(Hover, All) { HI.Kind = index::SymbolKind::Macro; HI.Definition = R"cpp(#define MACRO \ - { return 0; } + { \ + return 0; \ + } // Expands to -{ return 0; })cpp"; +{ + return 0; +})cpp"; }}, { R"cpp(// Forward class declaration diff --git a/clang-tools-extra/clangd/unittests/TestIndex.cpp b/clang-tools-extra/clangd/unittests/TestIndex.cpp index 278336bdde2ee..b13a5d32d1752 100644 --- a/clang-tools-extra/clangd/unittests/TestIndex.cpp +++ b/clang-tools-extra/clangd/unittests/TestIndex.cpp @@ -38,7 +38,7 @@ static std::string replace(llvm::StringRef Haystack, llvm::StringRef Needle, // Helpers to produce fake index symbols for memIndex() or completions(). // USRFormat is a regex replacement string for the unqualified part of the USR. Symbol sym(llvm::StringRef QName, index::SymbolKind Kind, - llvm::StringRef USRFormat) { + llvm::StringRef USRFormat, llvm::StringRef Signature) { Symbol Sym; std::string USR = "c:"; // We synthesize a few simple cases of USRs by hand! size_t Pos = QName.rfind("::"); @@ -55,6 +55,7 @@ Symbol sym(llvm::StringRef QName, index::SymbolKind Kind, Sym.SymInfo.Kind = Kind; Sym.Flags |= Symbol::IndexedForCodeCompletion; Sym.Origin = SymbolOrigin::Static; + Sym.Signature = Signature; return Sym; } @@ -86,6 +87,10 @@ Symbol conceptSym(llvm::StringRef Name) { return sym(Name, index::SymbolKind::Concept, "@CT@\\0"); } +Symbol macro(llvm::StringRef Name, llvm::StringRef ArgList) { + return sym(Name, index::SymbolKind::Macro, "@macro@\\0", ArgList); +} + Symbol objcSym(llvm::StringRef Name, index::SymbolKind Kind, llvm::StringRef USRPrefix) { Symbol Sym; diff --git a/clang-tools-extra/clangd/unittests/TestIndex.h b/clang-tools-extra/clangd/unittests/TestIndex.h index 9280b0b12a67f..0699b29392d72 100644 --- a/clang-tools-extra/clangd/unittests/TestIndex.h +++ b/clang-tools-extra/clangd/unittests/TestIndex.h @@ -20,7 +20,7 @@ Symbol symbol(llvm::StringRef QName); // Helpers to produce fake index symbols with proper SymbolID. // USRFormat is a regex replacement string for the unqualified part of the USR. Symbol sym(llvm::StringRef QName, index::SymbolKind Kind, - llvm::StringRef USRFormat); + llvm::StringRef USRFormat, llvm::StringRef Signature = {}); // Creats a function symbol assuming no function arg. Symbol func(llvm::StringRef Name); // Creates a class symbol. @@ -35,6 +35,8 @@ Symbol var(llvm::StringRef Name); Symbol ns(llvm::StringRef Name); // Create a C++20 concept symbol. Symbol conceptSym(llvm::StringRef Name); +// Create a macro symbol. +Symbol macro(llvm::StringRef Name, llvm::StringRef ArgList = {}); // Create an Objective-C symbol. Symbol objcSym(llvm::StringRef Name, index::SymbolKind Kind, diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index a7193e90c38da..a457e6fcae946 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -100,6 +100,8 @@ Improvements to clang-tidy - Improved :program:`run-clang-tidy.py` script. Added argument `-source-filter` to filter source files from the compilation database, via a RegEx. In a similar fashion to what `-header-filter` does for header files. +- Improved :program:`check_clang_tidy.py` script. Added argument `-export-fixes` + to aid in clang-tidy and test development. New checks ^^^^^^^^^^ @@ -145,6 +147,10 @@ Changes in existing checks ` check by detecting side effect from calling a method with non-const reference parameters. +- Improved :doc:`bugprone-forwarding-reference-overload + ` + check to ignore deleted constructors which won't hide other overloads. + - Improved :doc:`bugprone-inc-dec-in-conditions ` check to ignore code within unevaluated contexts, such as ``decltype``. @@ -219,6 +225,10 @@ Changes in existing checks ` check by replacing the local option `HeaderFileExtensions` by the global option of the same name. +- Improved :doc:`misc-const-correctness + ` check by avoiding infinite recursion + for recursive forwarding reference. + - Improved :doc:`misc-definitions-in-headers ` check by replacing the local option `HeaderFileExtensions` by the global option of the same name. @@ -268,7 +278,7 @@ Changes in existing checks ` check in `GetConfigPerFile` mode by resolving symbolic links to header files. Fixed handling of Hungarian Prefix when configured to `LowerCase`. Added support for renaming designated - initializers. + initializers. Added support for renaming macro arguments. - Improved :doc:`readability-implicit-bool-conversion ` check to provide @@ -297,6 +307,10 @@ Miscellaneous ``--format`` option is specified. Now :program:`clang-apply-replacements` applies formatting only with the option. +- Fixed the :doc:`linuxkernel-must-check-errs + ` documentation to consistently + use the check's proper name. + Improvements to include-fixer ----------------------------- diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/sizeof-expression.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/sizeof-expression.rst index a3e88b837d375..c37df1706eb4e 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/bugprone/sizeof-expression.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/sizeof-expression.rst @@ -190,6 +190,6 @@ Options .. option:: WarnOnSizeOfPointerToAggregate - When `true, the check will warn on an expression like + When `true`, the check will warn on an expression like ``sizeof(expr)`` where the expression is a pointer to aggregate. Default is `true`. diff --git a/clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-use-errs.rst b/clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-check-errs.rst similarity index 88% rename from clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-use-errs.rst rename to clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-check-errs.rst index 8a85426880987..cef5a70db309e 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-use-errs.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/linuxkernel/must-check-errs.rst @@ -1,7 +1,7 @@ -.. title:: clang-tidy - linuxkernel-must-use-errs +.. title:: clang-tidy - linuxkernel-must-check-errs -linuxkernel-must-use-errs -========================= +linuxkernel-must-check-errs +=========================== Checks Linux kernel code to see if it uses the results from the functions in ``linux/err.h``. Also checks to see if code uses the results from functions that diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst index 188a42bfddd38..8bc46acad56c8 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/list.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst @@ -233,7 +233,7 @@ Clang-Tidy Checks :doc:`hicpp-multiway-paths-covered `, :doc:`hicpp-no-assembler `, :doc:`hicpp-signed-bitwise `, - :doc:`linuxkernel-must-use-errs `, + :doc:`linuxkernel-must-check-errs `, :doc:`llvm-header-guard `, :doc:`llvm-include-order `, "Yes" :doc:`llvm-namespace-comment `, diff --git a/clang-tools-extra/include-cleaner/lib/FindHeaders.cpp b/clang-tools-extra/include-cleaner/lib/FindHeaders.cpp index fd2de6a17ad4a..7b28d1c252d71 100644 --- a/clang-tools-extra/include-cleaner/lib/FindHeaders.cpp +++ b/clang-tools-extra/include-cleaner/lib/FindHeaders.cpp @@ -275,6 +275,12 @@ llvm::SmallVector
headersForSymbol(const Symbol &S, // are already ranked in the stdlib mapping. if (H.kind() == Header::Standard) continue; + // Don't apply name match hints to exporting headers. As they usually have + // names similar to the original header, e.g. foo_wrapper/foo.h vs + // foo/foo.h, but shouldn't be preferred (unless marked as the public + // interface). + if ((H.Hint & Hints::OriginHeader) == Hints::None) + continue; if (nameMatch(SymbolName, H)) H.Hint |= Hints::PreferredHeader; } diff --git a/clang-tools-extra/include-cleaner/unittests/FindHeadersTest.cpp b/clang-tools-extra/include-cleaner/unittests/FindHeadersTest.cpp index 5a2a41b2d99bd..07302142a13e3 100644 --- a/clang-tools-extra/include-cleaner/unittests/FindHeadersTest.cpp +++ b/clang-tools-extra/include-cleaner/unittests/FindHeadersTest.cpp @@ -628,5 +628,24 @@ TEST_F(HeadersForSymbolTest, StandardHeaders) { tooling::stdlib::Header::named(""))); } +TEST_F(HeadersForSymbolTest, ExporterNoNameMatch) { + Inputs.Code = R"cpp( + #include "exporter/foo.h" + #include "foo_public.h" + )cpp"; + Inputs.ExtraArgs.emplace_back("-I."); + // Deliberately named as foo_public to make sure it doesn't get name-match + // boost and also gets lexicographically bigger order than "exporter/foo.h". + Inputs.ExtraFiles["foo_public.h"] = guard(R"cpp( + struct foo {}; + )cpp"); + Inputs.ExtraFiles["exporter/foo.h"] = guard(R"cpp( + #include "foo_public.h" // IWYU pragma: export + )cpp"); + buildAST(); + EXPECT_THAT(headersForFoo(), ElementsAre(physicalHeader("foo_public.h"), + physicalHeader("exporter/foo.h"))); +} + } // namespace } // namespace clang::include_cleaner diff --git a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py index 53ffca0bad8d0..6d4b466afa691 100755 --- a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py +++ b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py @@ -8,25 +8,35 @@ # # ===------------------------------------------------------------------------===# -r""" +""" ClangTidy Test Helper ===================== -This script runs clang-tidy in fix mode and verify fixes, messages or both. +This script is used to simplify writing, running, and debugging tests compatible +with llvm-lit. By default it runs clang-tidy in fix mode and uses FileCheck to +verify messages and/or fixes. + +For debugging, with --export-fixes, the tool simply exports fixes to a provided +file and does not run FileCheck. -Usage: - check_clang_tidy.py [-resource-dir=] \ - [-assume-filename=] \ - [-check-suffix=] \ - [-check-suffixes=] \ - [-std=c++(98|11|14|17|20)[-or-later]] \ - \ - -- [optional clang-tidy arguments] +Extra arguments, those after the first -- if any, are passed to either +clang-tidy or clang: +* Arguments between the first -- and second -- are clang-tidy arguments. + * May be only whitespace if there are no clang-tidy arguments. + * clang-tidy's --config would go here. +* Arguments after the second -- are clang arguments + +Examples +-------- -Example: // RUN: %check_clang_tidy %s llvm-include-order %t -- -- -isystem %S/Inputs -Notes: +or + + // RUN: %check_clang_tidy %s llvm-include-order --export-fixes=fixes.yaml %t -std=c++20 + +Notes +----- -std=c++(98|11|14|17|20)-or-later: This flag will cause multiple runs within the same check_clang_tidy execution. Make sure you don't have shared state across these runs. @@ -34,6 +44,7 @@ import argparse import os +import pathlib import re import subprocess import sys @@ -88,6 +99,7 @@ def __init__(self, args, extra_args): self.has_check_fixes = False self.has_check_messages = False self.has_check_notes = False + self.export_fixes = args.export_fixes self.fixes = MessagePrefix("CHECK-FIXES") self.messages = MessagePrefix("CHECK-MESSAGES") self.notes = MessagePrefix("CHECK-NOTES") @@ -181,7 +193,13 @@ def run_clang_tidy(self): [ "clang-tidy", self.temp_file_name, - "-fix", + ] + + [ + "-fix" + if self.export_fixes is None + else "--export-fixes=" + self.export_fixes + ] + + [ "--checks=-*," + self.check_name, ] + self.clang_tidy_extra_args @@ -255,12 +273,14 @@ def check_notes(self, clang_tidy_output): def run(self): self.read_input() - self.get_prefixes() + if self.export_fixes is None: + self.get_prefixes() self.prepare_test_inputs() clang_tidy_output = self.run_clang_tidy() - self.check_fixes() - self.check_messages(clang_tidy_output) - self.check_notes(clang_tidy_output) + if self.export_fixes is None: + self.check_fixes() + self.check_messages(clang_tidy_output) + self.check_notes(clang_tidy_output) def expand_std(std): @@ -284,7 +304,11 @@ def csv(string): def parse_arguments(): - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser( + prog=pathlib.Path(__file__).stem, + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) parser.add_argument("-expect-clang-tidy-error", action="store_true") parser.add_argument("-resource-dir") parser.add_argument("-assume-filename") @@ -298,7 +322,19 @@ def parse_arguments(): type=csv, help="comma-separated list of FileCheck suffixes", ) - parser.add_argument("-std", type=csv, default=["c++11-or-later"]) + parser.add_argument( + "-export-fixes", + default=None, + type=str, + metavar="file", + help="A file to export fixes into instead of fixing.", + ) + parser.add_argument( + "-std", + type=csv, + default=["c++11-or-later"], + help="Passed to clang. Special -or-later values are expanded.", + ) return parser.parse_known_args() diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/forwarding-reference-overload.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/forwarding-reference-overload.cpp index 38b0691bc9f1e..92dfb718bb51b 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/forwarding-reference-overload.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/forwarding-reference-overload.cpp @@ -251,3 +251,13 @@ class Test10 { Test10(T &&Item, E e) : e(e){} }; + +// A deleted ctor cannot hide anything +class Test11 { +public: + template + Test11(T&&) = delete; + + Test11(const Test11 &) = default; + Test11(Test11 &&) = default; +}; diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/const-correctness-templates.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/const-correctness-templates.cpp index 9da468128743e..248374a71dd40 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/misc/const-correctness-templates.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/misc/const-correctness-templates.cpp @@ -58,3 +58,18 @@ void concatenate3(Args... args) (..., (stream << args)); } } // namespace gh70323 + +namespace gh60895 { + +template void f1(T &&a); +template void f2(T &&a); +template void f1(T &&a) { f2(a); } +template void f2(T &&a) { f1(a); } +void f() { + int x = 0; + // CHECK-MESSAGES:[[@LINE-1]]:3: warning: variable 'x' of type 'int' can be declared 'const' + // CHECK-FIXES: int const x = 0; + f1(x); +} + +} // namespace gh60895 diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp index 57ef4aae5ddb7..99149fe86acee 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming.cpp @@ -108,10 +108,12 @@ USER_NS::object g_s2; // NO warnings or fixes expected as USER_NS and object are declared in a header file SYSTEM_MACRO(var1); -// NO warnings or fixes expected as var1 is from macro expansion +// CHECK-MESSAGES: :[[@LINE-1]]:14: warning: invalid case style for global variable 'var1' [readability-identifier-naming] +// CHECK-FIXES: {{^}}SYSTEM_MACRO(g_var1); USER_MACRO(var2); -// NO warnings or fixes expected as var2 is declared in a macro expansion +// CHECK-MESSAGES: :[[@LINE-1]]:12: warning: invalid case style for global variable 'var2' [readability-identifier-naming] +// CHECK-FIXES: {{^}}USER_MACRO(g_var2); #define BLA int FOO_bar BLA; @@ -602,9 +604,20 @@ static void static_Function() { // CHECK-FIXES: {{^}}#define MY_TEST_MACRO(X) X() void MY_TEST_Macro(function) {} -// CHECK-FIXES: {{^}}void MY_TEST_MACRO(function) {} -} -} +// CHECK-MESSAGES: :[[@LINE-1]]:20: warning: invalid case style for global function 'function' [readability-identifier-naming] +// CHECK-FIXES: {{^}}void MY_TEST_MACRO(Function) {} + +#define MY_CAT_IMPL(l, r) l ## r +#define MY_CAT(l, r) MY_CAT_IMPL(l, r) +#define MY_MACRO2(foo) int MY_CAT(awesome_, MY_CAT(foo, __COUNTER__)) = 0 +#define MY_MACRO3(foo) int MY_CAT(awesome_, foo) = 0 +MY_MACRO2(myglob); +MY_MACRO3(myglob); +// No suggestions should occur even though the resulting decl of awesome_myglob# +// or awesome_myglob are not entirely within a macro argument. + +} // namespace InlineNamespace +} // namespace FOO_NS template struct a { // CHECK-MESSAGES: :[[@LINE-1]]:32: warning: invalid case style for struct 'a' diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index ca0501653d614..9fbf896c0e873 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -165,6 +165,13 @@ if(CLANG_ENABLE_LIBXML2) endif() endif() +if(CLANG_ENABLE_CIR) + if (NOT "${LLVM_ENABLE_PROJECTS}" MATCHES "MLIR|mlir") + message(FATAL_ERROR + "Cannot build ClangIR without MLIR in LLVM_ENABLE_PROJECTS") + endif() +endif() + include(CheckIncludeFile) check_include_file(sys/resource.h CLANG_HAVE_RLIMITS) diff --git a/clang/cmake/caches/Release.cmake b/clang/cmake/caches/Release.cmake index 1ca9138b98073..bd1f688d61a7e 100644 --- a/clang/cmake/caches/Release.cmake +++ b/clang/cmake/caches/Release.cmake @@ -4,7 +4,7 @@ # General Options set(LLVM_RELEASE_ENABLE_LTO THIN CACHE STRING "") -set(LLVM_RELEASE_ENABLE_PGO ON CACHE BOOL "") +set(LLVM_RELEASE_ENABLE_PGO OFF CACHE BOOL "") set(CMAKE_BUILD_TYPE RELEASE CACHE STRING "") diff --git a/clang/docs/ClangFormat.rst b/clang/docs/ClangFormat.rst index 80dc38a075c8f..dbd9c91ae508e 100644 --- a/clang/docs/ClangFormat.rst +++ b/clang/docs/ClangFormat.rst @@ -54,7 +54,7 @@ to format C/C++/Java/JavaScript/JSON/Objective-C/Protobuf/C# code. Objective-C: .m .mm Proto: .proto .protodevel TableGen: .td - TextProto: .textpb .pb.txt .textproto .asciipb + TextProto: .txtpb .textpb .pb.txt .textproto .asciipb Verilog: .sv .svh .v .vh --cursor= - The position of the cursor when invoking clang-format from an editor integration diff --git a/clang/docs/HIPSupport.rst b/clang/docs/HIPSupport.rst index 543c82cf90244..5ba84c2f67055 100644 --- a/clang/docs/HIPSupport.rst +++ b/clang/docs/HIPSupport.rst @@ -208,6 +208,20 @@ Host Code Compilation - These relocatable objects are then linked together. - Host code within a TU can call host functions and launch kernels from another TU. +Syntax Difference with CUDA +=========================== + +Clang's front end, used for both CUDA and HIP programming models, shares the same parsing and semantic analysis mechanisms. This includes the resolution of overloads concerning device and host functions. While there exists a comprehensive documentation on the syntax differences between Clang and NVCC for CUDA at `Dialect Differences Between Clang and NVCC `_, it is important to note that these differences also apply to HIP code compilation. + +Predefined Macros for Differentiation +------------------------------------- + +To facilitate differentiation between HIP and CUDA code, as well as between device and host compilations within HIP, Clang defines specific macros: + +- ``__HIP__`` : This macro is defined only when compiling HIP code. It can be used to conditionally compile code specific to HIP, enabling developers to write portable code that can be compiled for both CUDA and HIP. + +- ``__HIP_DEVICE_COMPILE__`` : Defined exclusively during HIP device compilation, this macro allows for conditional compilation of device-specific code. It provides a mechanism to segregate device and host code, ensuring that each can be optimized for their respective execution environments. + Function Pointers Support ========================= diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index 40b3a76fb8162..7bbc1dd63f947 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -1493,6 +1493,7 @@ Conditional ``explicit`` __cpp_conditional_explicit C+ ``if consteval`` __cpp_if_consteval C++23 C++20 ``static operator()`` __cpp_static_call_operator C++23 C++03 Attributes on Lambda-Expressions C++23 C++11 +``= delete ("should have a reason");`` __cpp_deleted_function C++26 C++03 -------------------------------------------- -------------------------------- ------------- ------------- Designated initializers (N494) C99 C89 Array & element qualification (N2607) C23 C89 @@ -1610,6 +1611,7 @@ The following type trait primitives are supported by Clang. Those traits marked * ``__is_pod`` (C++, GNU, Microsoft, Embarcadero): Note, the corresponding standard trait was deprecated in C++20. * ``__is_pointer`` (C++, Embarcadero) +* ``__is_pointer_interconvertible_base_of`` (C++, GNU, Microsoft) * ``__is_polymorphic`` (C++, GNU, Microsoft, Embarcadero) * ``__is_reference`` (C++, Embarcadero) * ``__is_referenceable`` (C++, GNU, Microsoft, Embarcadero): @@ -3464,6 +3466,54 @@ Query for this feature with ``__has_builtin(__builtin_trap)``. ``__builtin_arm_trap`` is lowered to the ``llvm.aarch64.break`` builtin, and then to ``brk #payload``. +``__builtin_allow_runtime_check`` +--------------------------------- + +``__builtin_allow_runtime_check`` return true if the check at the current +program location should be executed. It is expected to be used to implement +``assert`` like checks which can be safely removed by optimizer. + +**Syntax**: + +.. code-block:: c++ + + bool __builtin_allow_runtime_check(const char* kind) + +**Example of use**: + +.. code-block:: c++ + + if (__builtin_allow_runtime_check("mycheck") && !ExpensiveCheck()) { + abort(); + } + +**Description** + +``__builtin_allow_runtime_check`` is lowered to ` ``llvm.allow.runtime.check`` +`_ +builtin. + +The ``__builtin_allow_runtime_check()`` is expected to be used with control +flow conditions such as in ``if`` to guard expensive runtime checks. The +specific rules for selecting permitted checks can differ and are controlled by +the compiler options. + +Flags to control checks: +* ``-mllvm -lower-allow-check-percentile-cutoff-hot=N`` where N is PGO hotness +cutoff in range ``[0, 999999]`` to disallow checks in hot code. +* ``-mllvm -lower-allow-check-random-rate=P`` where P is number in range +``[0.0, 1.0]`` representation probability of keeping a check. +* If both flags are specified, ``-lower-allow-check-random-rate`` takes +precedence. +* If none is specified, ``__builtin_allow_runtime_check`` is lowered as +``true``, allowing all checks. + +Parameter ``kind`` is a string literal representing a user selected kind for +guarded check. It's unused now. It will enable kind-specific lowering in future. +E.g. a higher hotness cutoff can be used for more expensive kind of check. + +Query for this feature with ``__has_builtin(__builtin_allow_runtime_check)``. + ``__builtin_nondeterministic_value`` ------------------------------------ diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index f96cebbde3d82..3fe15934323c5 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -68,7 +68,7 @@ AST Dumping Potentially Breaking Changes Clang Frontend Potentially Breaking Changes ------------------------------------------- -- Removed support for constructing on-stack ``TemplateArgumentList``s; interfaces should instead +- Removed support for constructing on-stack ``TemplateArgumentList``\ s; interfaces should instead use ``ArrayRef`` to pass template arguments. Transitioning internal uses to ``ArrayRef`` reduces AST memory usage by 0.4% when compiling clang, and is expected to show similar improvements on other workloads. @@ -98,13 +98,13 @@ C++20 Feature Support behavior can use the flag '-Xclang -fno-skip-odr-check-in-gmf'. (#GH79240). -- Implemented the `__is_layout_compatible` intrinsic to support +- Implemented the `__is_layout_compatible` and `__is_pointer_interconvertible_base_of` + intrinsics to support `P0466R5: Layout-compatibility and Pointer-interconvertibility Traits `_. - Clang now implements [module.import]p7 fully. Clang now will import module units transitively for the module units coming from the same module of the - current module units. - Fixes `#84002 `_. + current module units. Fixes #GH84002 - Initial support for class template argument deduction (CTAD) for type alias templates (`P1814R0 `_). @@ -128,12 +128,13 @@ C++2c Feature Support - Implemented `P2662R3 Pack Indexing `_. +- Implemented `P2573R2: = delete("should have a reason"); `_ + Resolutions to C++ Defect Reports ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Substitute template parameter pack, when it is not explicitly specified - in the template parameters, but is deduced from a previous argument. - (`#78449: `_). + in the template parameters, but is deduced from a previous argument. (#GH78449) - Type qualifications are now ignored when evaluating layout compatibility of two types. @@ -147,6 +148,9 @@ Resolutions to C++ Defect Reports compatibility of two types. (`CWG2759: [[no_unique_address] and common initial sequence `_). +- Clang now diagnoses declarative nested-name-specifiers with pack-index-specifiers. + (`CWG2858: Declarative nested-name-specifiers and pack-index-specifiers `_). + C Language Changes ------------------ @@ -170,8 +174,7 @@ C23 Feature Support - Clang now generates predefined macros of the form ``__TYPE_FMTB__`` and ``__TYPE_FMTb__`` (e.g., ``__UINT_FAST64_FMTB__``) in C23 mode for use with - macros typically exposed from ````, such as ``PRIb8``. - (`#81896: `_). + macros typically exposed from ````, such as ``PRIb8``. (#GH81896) - Clang now supports `N3018 The constexpr specifier for object definitions` `_. @@ -209,7 +212,10 @@ New Compiler Flags - ``-Wmissing-designated-field-initializers``, grouped under ``-Wmissing-field-initializers``. This diagnostic can be disabled to make ``-Wmissing-field-initializers`` behave - like it did before Clang 18.x. Fixes (`#56628 `_) + like it did before Clang 18.x. Fixes #GH56628 + +- ``-fexperimental-modules-reduced-bmi`` enables the Reduced BMI for C++20 named modules. + See the document of standard C++ modules for details. Deprecated Compiler Flags ------------------------- @@ -248,8 +254,7 @@ Removed Compiler Flags - The ``-freroll-loops`` flag has been removed. It had no effect since Clang 13. - ``-m[no-]unaligned-access`` is removed for RISC-V and LoongArch. - ``-m[no-]strict-align``, also supported by GCC, should be used instead. - (`#85350 `_.) + ``-m[no-]strict-align``, also supported by GCC, should be used instead. (#GH85350) Attribute Changes in Clang -------------------------- @@ -282,6 +287,9 @@ Attribute Changes in Clang This allows the ``_Nullable`` and ``_Nonnull`` family of type attributes to apply to this class. +- Clang now warns that the ``exclude_from_explicit_instantiation`` attribute + is ignored when applied to a local class or a member thereof. + Improvements to Clang's diagnostics ----------------------------------- - Clang now applies syntax highlighting to the code snippets it @@ -319,8 +327,7 @@ Improvements to Clang's diagnostics Fixes #GH82512. - Clang now provides improved warnings for the ``cleanup`` attribute to detect misuse scenarios, - such as attempting to call ``free`` on an unallocated object. Fixes - `#79443 `_. + such as attempting to call ``free`` on an unallocated object. Fixes #GH79443. - Clang no longer warns when the ``bitand`` operator is used with boolean operands, distinguishing it from potential typographical errors or unintended @@ -357,17 +364,21 @@ Improvements to Clang's diagnostics Added the ``-Wtentative-definition-array`` warning group to cover this. Fixes #GH87766 +- Clang now uses the correct type-parameter-key (``class`` or ``typename``) when printing + template template parameter declarations. + +- Clang now diagnoses requires expressions with explicit object parameters. + Improvements to Clang's time-trace ---------------------------------- Bug Fixes in This Version ------------------------- - Clang's ``-Wundefined-func-template`` no longer warns on pure virtual - functions. - (`#74016 `_) + functions. (#GH74016) - Fixed missing warnings when comparing mismatched enumeration constants - in C (`#29217 `). + in C (#GH29217) - Clang now accepts elaborated-type-specifiers that explicitly specialize a member class template for an implicit instantiation of a class template. @@ -406,7 +417,7 @@ Bug Fixes in This Version type only rather than to the complex type (e.g. ``_Complex float / int`` is now evaluated as ``_Complex float / float`` rather than ``_Complex float / _Complex float``), as mandated by the C standard. This significantly improves codegen of `*` and `/` especially. - Fixes (`#31205 `_). + Fixes #GH31205. - Fixes an assertion failure on invalid code when trying to define member functions in lambdas. @@ -414,6 +425,8 @@ Bug Fixes in This Version - Fixed a regression in CTAD that a friend declaration that befriends itself may cause incorrect constraint substitution. (#GH86769). +- Fixed an assertion failure on invalid InitListExpr in C89 mode (#GH88008). + Bug Fixes to Compiler Builtins ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -455,8 +468,7 @@ Bug Fixes to C++ Support - Fix a crash when trying to call a varargs function that also has an explicit object parameter. (#GH80971) - Fixed a bug where abbreviated function templates would append their invented template parameters to an empty template parameter lists. -- Fix parsing of abominable function types inside type traits. - Fixes (`#77585 `_) +- Fix parsing of abominable function types inside type traits. Fixes #GH77585 - Clang now classifies aggregate initialization in C++17 and newer as constant or non-constant more accurately. Previously, only a subset of the initializer elements were considered, misclassifying some initializers as constant. Partially fixes @@ -497,9 +509,7 @@ Bug Fixes to C++ Support - Fix a bug where overload resolution falsely reported an ambiguity when it was comparing a member-function against a non member function or a member-function with an explicit object parameter against a member function with no explicit object parameter - when one of the function had more specialized templates. - Fixes (`#82509 `_) - and (`#74494 `_) + when one of the function had more specialized templates. Fixes #GH82509 and #GH74494 - Clang now supports direct lambda calls inside of a type alias template declarations. This addresses (#GH70601), (#GH76674), (#GH79555), (#GH81145) and (#GH82104). - Allow access to a public template alias declaration that refers to friend's @@ -520,19 +530,22 @@ Bug Fixes to C++ Support - Fix an issue caused by not handling invalid cases when substituting into the parameter mapping of a constraint. Fixes (#GH86757). - Fixed a bug that prevented member function templates of class templates declared with a deduced return type from being explicitly specialized for a given implicit instantiation of the class template. -- Fixed a crash when ``this`` is used in a dependent class scope function template specialization - that instantiates to a static member function. -- Fix crash when inheriting from a cv-qualified type. Fixes: - (`#35603 `_) +- Fix crash when inheriting from a cv-qualified type. Fixes #GH35603 - Fix a crash when the using enum declaration uses an anonymous enumeration. Fixes (#GH86790). +- Handled an edge case in ``getFullyPackExpandedSize`` so that we now avoid a false-positive diagnostic. (#GH84220) - Clang now correctly tracks type dependence of by-value captures in lambdas with an explicit object parameter. Fixes (#GH70604), (#GH79754), (#GH84163), (#GH84425), (#GH86054), (#GH86398), and (#GH86399). +- Fix a crash when deducing ``auto`` from an invalid dereference (#GH88329). +- Fix a crash in requires expression with templated base class member function. Fixes (#GH84020). +- Fix a crash caused by defined struct in a type alias template when the structure + has fields with dependent type. Fixes (#GH75221). Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ - Clang now properly preserves ``FoundDecls`` within a ``ConceptReference``. (#GH82628) +- The presence of the ``typename`` keyword is now stored in ``TemplateTemplateParmDecl``. Miscellaneous Bug Fixes ^^^^^^^^^^^^^^^^^^^^^^^ @@ -541,8 +554,7 @@ Miscellaneous Clang Crashes Fixed ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Do not attempt to dump the layout of dependent types or invalid declarations - when ``-fdump-record-layouts-complete`` is passed. - Fixes (`#83684 `_). + when ``-fdump-record-layouts-complete`` is passed. Fixes #GH83684. OpenACC Specific Changes ------------------------ @@ -592,8 +604,7 @@ Windows Support would only be included if AVX was enabled at compile time. This was done to work around include times from MSVC STL including ``intrin.h`` under clang-cl. Clang-cl now provides ``intrin0.h`` for MSVC STL and therefore all intrinsic - features without requiring enablement at compile time. - Fixes: (`#53520 `_) + features without requiring enablement at compile time. Fixes #GH53520 - Improved compile times with MSVC STL. MSVC provides ``intrin0.h`` which is a header that only includes intrinsics that are used by MSVC STL to avoid the @@ -647,6 +658,9 @@ Fixed Point Support in Clang AST Matchers ------------ +- Fixes a long-standing performance issue in parent map generation for + ancestry-based matchers such as ``hasParent`` and ``hasAncestor``, making + them significantly faster. - ``isInStdNamespace`` now supports Decl declared with ``extern "C++"``. - Add ``isExplicitObjectMemberFunction``. - Fixed ``forEachArgumentWithParam`` and ``forEachArgumentWithParamType`` to @@ -671,6 +685,8 @@ Static Analyzer but not under any case blocks if ``unroll-loops=true`` analyzer config is set. (#GH68819) - Support C++23 static operator calls. (#GH84972) +- Fixed a crash in ``security.cert.env.InvalidPtr`` checker when accidentally + matched user-defined ``strerror`` and similar library functions. (GH#88181) New features ^^^^^^^^^^^^ diff --git a/clang/docs/StandardCPlusPlusModules.rst b/clang/docs/StandardCPlusPlusModules.rst index c5478bba45f38..ee57fb5da6485 100644 --- a/clang/docs/StandardCPlusPlusModules.rst +++ b/clang/docs/StandardCPlusPlusModules.rst @@ -483,6 +483,13 @@ violations with the flag enabled. ABI Impacts ----------- +This section describes the new ABI changes brought by modules. + +Only Itanium C++ ABI related change are mentioned + +Mangling Names +~~~~~~~~~~~~~~ + The declarations in a module unit which are not in the global module fragment have new linkage names. For example, @@ -520,6 +527,129 @@ is attached to the global module fragments. For example: Now the linkage name of ``NS::foo()`` will be ``_ZN2NS3fooEv``. +Module Initializers +~~~~~~~~~~~~~~~~~~~ + +All the importable module units are required to emit an initializer function. +The initializer function should contain calls to importing modules first and +all the dynamic-initializers in the current module unit then. + +Translation units explicitly or implicitly importing named modules must call +the initializer functions of the imported named modules within the sequence of +the dynamic-initializers in the TU. Initializations of entities at namespace +scope are appearance-ordered. This (recursively) extends into imported modules +at the point of appearance of the import declaration. + +It is allowed to omit calls to importing modules if it is known empty. + +It is allowed to omit calls to importing modules for which is known to be called. + +Reduced BMI +----------- + +To support the 2 phase compilation model, Clang chose to put everything needed to +produce an object into the BMI. But every consumer of the BMI, except itself, doesn't +need such informations. It makes the BMI to larger and so may introduce unnecessary +dependencies into the BMI. To mitigate the problem, we decided to reduce the information +contained in the BMI. + +To be clear, we call the default BMI as Full BMI and the new introduced BMI as Reduced +BMI. + +Users can use ``-fexperimental-modules-reduced-bmi`` flag to enable the Reduced BMI. + +For one phase compilation model (CMake implements this model), with +``-fexperimental-modules-reduced-bmi``, the generated BMI will be Reduced BMI automatically. +(The output path of the BMI is specified by ``-fmodule-output=`` as usual one phase +compilation model). + +It is still possible to support Reduced BMI in two phase compilation model. With +``-fexperimental-modules-reduced-bmi``, ``--precompile`` and ``-fmodule-output=`` specified, +the generated BMI specified by ``-o`` will be full BMI and the BMI specified by +``-fmodule-output=`` will be Reduced BMI. The dependency graph may be: + +.. code-block:: none + + module-unit.cppm --> module-unit.full.pcm -> module-unit.o + | + -> module-unit.reduced.pcm -> consumer1.cpp + -> consumer2.cpp + -> ... + -> consumer_n.cpp + +We don't emit diagnostics if ``-fexperimental-modules-reduced-bmi`` is used with a non-module +unit. This design helps the end users of one phase compilation model to perform experiments +early without asking for the help of build systems. The users of build systems which supports +two phase compilation model still need helps from build systems. + +Within Reduced BMI, we won't write unreachable entities from GMF, definitions of non-inline +functions and non-inline variables. This may not be a transparent change. +`[module.global.frag]ex2 `_ may be a good +example: + +.. code-block:: c++ + + // foo.h + namespace N { + struct X {}; + int d(); + int e(); + inline int f(X, int = d()) { return e(); } + int g(X); + int h(X); + } + + // M.cppm + module; + #include "foo.h" + export module M; + template int use_f() { + N::X x; // N::X, N, and :: are decl-reachable from use_f + return f(x, 123); // N::f is decl-reachable from use_f, + // N::e is indirectly decl-reachable from use_f + // because it is decl-reachable from N::f, and + // N::d is decl-reachable from use_f + // because it is decl-reachable from N::f + // even though it is not used in this call + } + template int use_g() { + N::X x; // N::X, N, and :: are decl-reachable from use_g + return g((T(), x)); // N::g is not decl-reachable from use_g + } + template int use_h() { + N::X x; // N::X, N, and :: are decl-reachable from use_h + return h((T(), x)); // N::h is not decl-reachable from use_h, but + // N::h is decl-reachable from use_h + } + int k = use_h(); + // use_h is decl-reachable from k, so + // N::h is decl-reachable from k + + // M-impl.cpp + module M; + int a = use_f(); // OK + int b = use_g(); // error: no viable function for call to g; + // g is not decl-reachable from purview of + // module M's interface, so is discarded + int c = use_h(); // OK + +In the above example, the function definition of ``N::g`` is elided from the Reduced +BMI of ``M.cppm``. Then the use of ``use_g`` in ``M-impl.cpp`` fails +to instantiate. For such issues, users can add references to ``N::g`` in the module purview +of ``M.cppm`` to make sure it is reachable, e.g., ``using N::g;``. + +We think the Reduced BMI is the correct direction. But given it is a drastic change, +we'd like to make it experimental first to avoid breaking existing users. The roadmap +of Reduced BMI may be: + +1. ``-fexperimental-modules-reduced-bmi`` is opt in for 1~2 releases. The period depends +on testing feedbacks. +2. We would announce Reduced BMI is not experimental and introduce ``-fmodules-reduced-bmi``. +and suggest users to enable this mode. This may takes 1~2 releases too. +3. Finally we will enable this by default. When that time comes, the term BMI will refer to +the reduced BMI today and the Full BMI will only be meaningful to build systems which +loves to support two phase compilations. + Performance Tips ---------------- diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt index 70687c23b15e6..2252d0ccde96d 100644 --- a/clang/docs/tools/clang-formatted-files.txt +++ b/clang/docs/tools/clang-formatted-files.txt @@ -123,7 +123,7 @@ clang/include/clang/Analysis/Analyses/CalledOnceCheck.h clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h clang/include/clang/Analysis/FlowSensitive/AdornedCFG.h -clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h +clang/include/clang/Analysis/FlowSensitive/ASTOps.h clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h @@ -308,6 +308,7 @@ clang/lib/Analysis/CalledOnceCheck.cpp clang/lib/Analysis/CloneDetection.cpp clang/lib/Analysis/CodeInjector.cpp clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp +clang/lib/Analysis/FlowSensitive/ASTOps.cpp clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp clang/lib/Analysis/FlowSensitive/DebugSupport.cpp @@ -2147,8 +2148,10 @@ flang/include/flang/Parser/message.h flang/include/flang/Parser/parse-state.h flang/include/flang/Parser/parse-tree-visitor.h flang/include/flang/Parser/parsing.h +flang/include/flang/Parser/preprocessor.h flang/include/flang/Parser/provenance.h flang/include/flang/Parser/source.h +flang/include/flang/Parser/token-sequence.h flang/include/flang/Parser/tools.h flang/include/flang/Parser/unparse.h flang/include/flang/Parser/user-state.h @@ -2319,7 +2322,6 @@ flang/lib/Parser/openmp-parsers.cpp flang/lib/Parser/parse-tree.cpp flang/lib/Parser/parsing.cpp flang/lib/Parser/preprocessor.cpp -flang/lib/Parser/preprocessor.h flang/lib/Parser/prescan.cpp flang/lib/Parser/prescan.h flang/lib/Parser/program-parsers.cpp @@ -2328,7 +2330,6 @@ flang/lib/Parser/source.cpp flang/lib/Parser/stmt-parser.h flang/lib/Parser/token-parsers.h flang/lib/Parser/token-sequence.cpp -flang/lib/Parser/token-sequence.h flang/lib/Parser/tools.cpp flang/lib/Parser/type-parser-implementation.h flang/lib/Parser/type-parsers.h diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h index fb52fcacf6168..36d499c931313 100644 --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -3416,13 +3416,13 @@ const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB, /// Utility function for constructing a nullary selector. inline Selector GetNullarySelector(StringRef name, ASTContext &Ctx) { - IdentifierInfo* II = &Ctx.Idents.get(name); + const IdentifierInfo *II = &Ctx.Idents.get(name); return Ctx.Selectors.getSelector(0, &II); } /// Utility function for constructing an unary selector. inline Selector GetUnarySelector(StringRef name, ASTContext &Ctx) { - IdentifierInfo* II = &Ctx.Idents.get(name); + const IdentifierInfo *II = &Ctx.Idents.get(name); return Ctx.Selectors.getSelector(1, &II); } diff --git a/clang/include/clang/AST/ASTMutationListener.h b/clang/include/clang/AST/ASTMutationListener.h index 8879f9f3229ff..2c4ec2ce67f36 100644 --- a/clang/include/clang/AST/ASTMutationListener.h +++ b/clang/include/clang/AST/ASTMutationListener.h @@ -27,6 +27,7 @@ namespace clang { class FunctionTemplateDecl; class Module; class NamedDecl; + class NamespaceDecl; class ObjCCategoryDecl; class ObjCContainerDecl; class ObjCInterfaceDecl; @@ -35,6 +36,7 @@ namespace clang { class QualType; class RecordDecl; class TagDecl; + class TranslationUnitDecl; class ValueDecl; class VarDecl; class VarTemplateDecl; @@ -147,6 +149,31 @@ class ASTMutationListener { virtual void AddedAttributeToRecord(const Attr *Attr, const RecordDecl *Record) {} + /// The parser find the named module declaration. + virtual void EnteringModulePurview() {} + + /// An mangling number was added to a Decl + /// + /// \param D The decl that got a mangling number + /// + /// \param Number The mangling number that was added to the Decl + virtual void AddedManglingNumber(const Decl *D, unsigned Number) {} + + /// An static local number was added to a Decl + /// + /// \param D The decl that got a static local number + /// + /// \param Number The static local number that was added to the Decl + virtual void AddedStaticLocalNumbers(const Decl *D, unsigned Number) {} + + /// An anonymous namespace was added the translation unit decl + /// + /// \param TU The translation unit decl that got a new anonymous namespace + /// + /// \param AnonNamespace The anonymous namespace that was added + virtual void AddedAnonymousNamespace(const TranslationUnitDecl *TU, + NamespaceDecl *AnonNamespace) {} + // NOTE: If new methods are added they should also be added to // MultiplexASTMutationListener. }; diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h index 94e7dd817809d..f5c47d8a7c211 100644 --- a/clang/include/clang/AST/ASTNodeTraverser.h +++ b/clang/include/clang/AST/ASTNodeTraverser.h @@ -243,7 +243,8 @@ class ASTNodeTraverser void Visit(const OpenACCClause *C) { getNodeDelegate().AddChild([=] { getNodeDelegate().Visit(C); - // TODO OpenACC: Switch on clauses that have children, and add them. + for (const auto *S : C->children()) + Visit(S); }); } @@ -932,6 +933,14 @@ class ASTNodeTraverser Visit(TArg); } + void VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) { + Visit(Node->getExpr()); + } + + void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) { + Visit(Node->getExpr()); + } + // Implements Visit methods for Attrs. #include "clang/AST/AttrNodeTraverse.inc" }; diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index 31bacacd879ba..18eeecad11329 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -120,7 +120,7 @@ class TranslationUnitDecl : public Decl, ASTContext &getASTContext() const { return Ctx; } NamespaceDecl *getAnonymousNamespace() const { return AnonymousNamespace; } - void setAnonymousNamespace(NamespaceDecl *D) { AnonymousNamespace = D; } + void setAnonymousNamespace(NamespaceDecl *D); static TranslationUnitDecl *Create(ASTContext &C); @@ -1732,7 +1732,7 @@ class ImplicitParamDecl : public VarDecl { static ImplicitParamDecl *CreateDeserialized(ASTContext &C, unsigned ID); ImplicitParamDecl(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, - IdentifierInfo *Id, QualType Type, + const IdentifierInfo *Id, QualType Type, ImplicitParamKind ParamKind) : VarDecl(ImplicitParam, C, DC, IdLoc, IdLoc, Id, Type, /*TInfo=*/nullptr, SC_None) { @@ -1766,7 +1766,7 @@ class ParmVarDecl : public VarDecl { protected: ParmVarDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, QualType T, + SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S, Expr *DefArg) : VarDecl(DK, C, DC, StartLoc, IdLoc, Id, T, TInfo, S) { assert(ParmVarDeclBits.HasInheritedDefaultArg == false); @@ -1778,10 +1778,10 @@ class ParmVarDecl : public VarDecl { public: static ParmVarDecl *Create(ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, - StorageClass S, Expr *DefArg); + SourceLocation StartLoc, SourceLocation IdLoc, + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, StorageClass S, + Expr *DefArg); static ParmVarDecl *CreateDeserialized(ASTContext &C, unsigned ID); @@ -1994,21 +1994,35 @@ class FunctionDecl : public DeclaratorDecl, }; - /// Stashed information about a defaulted function definition whose body has - /// not yet been lazily generated. - class DefaultedFunctionInfo final - : llvm::TrailingObjects { + /// Stashed information about a defaulted/deleted function body. + class DefaultedOrDeletedFunctionInfo final + : llvm::TrailingObjects { friend TrailingObjects; unsigned NumLookups; + bool HasDeletedMessage; + + size_t numTrailingObjects(OverloadToken) const { + return NumLookups; + } public: - static DefaultedFunctionInfo *Create(ASTContext &Context, - ArrayRef Lookups); + static DefaultedOrDeletedFunctionInfo * + Create(ASTContext &Context, ArrayRef Lookups, + StringLiteral *DeletedMessage = nullptr); + /// Get the unqualified lookup results that should be used in this /// defaulted function definition. ArrayRef getUnqualifiedLookups() const { return {getTrailingObjects(), NumLookups}; } + + StringLiteral *getDeletedMessage() const { + return HasDeletedMessage ? *getTrailingObjects() + : nullptr; + } + + void setDeletedMessage(StringLiteral *Message); }; private: @@ -2018,12 +2032,12 @@ class FunctionDecl : public DeclaratorDecl, ParmVarDecl **ParamInfo = nullptr; /// The active member of this union is determined by - /// FunctionDeclBits.HasDefaultedFunctionInfo. + /// FunctionDeclBits.HasDefaultedOrDeletedInfo. union { /// The body of the function. LazyDeclStmtPtr Body; /// Information about a future defaulted function definition. - DefaultedFunctionInfo *DefaultedInfo; + DefaultedOrDeletedFunctionInfo *DefaultedOrDeletedInfo; }; unsigned ODRHash; @@ -2281,18 +2295,18 @@ class FunctionDecl : public DeclaratorDecl, /// Returns whether this specific declaration of the function has a body. bool doesThisDeclarationHaveABody() const { - return (!FunctionDeclBits.HasDefaultedFunctionInfo && Body) || + return (!FunctionDeclBits.HasDefaultedOrDeletedInfo && Body) || isLateTemplateParsed(); } void setBody(Stmt *B); void setLazyBody(uint64_t Offset) { - FunctionDeclBits.HasDefaultedFunctionInfo = false; + FunctionDeclBits.HasDefaultedOrDeletedInfo = false; Body = LazyDeclStmtPtr(Offset); } - void setDefaultedFunctionInfo(DefaultedFunctionInfo *Info); - DefaultedFunctionInfo *getDefaultedFunctionInfo() const; + void setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo *Info); + DefaultedOrDeletedFunctionInfo *getDefalutedOrDeletedInfo() const; /// Whether this function is variadic. bool isVariadic() const; @@ -2495,7 +2509,7 @@ class FunctionDecl : public DeclaratorDecl, return FunctionDeclBits.IsDeleted && !isDefaulted(); } - void setDeletedAsWritten(bool D = true) { FunctionDeclBits.IsDeleted = D; } + void setDeletedAsWritten(bool D = true, StringLiteral *Message = nullptr); /// Determines whether this function is "main", which is the /// entry point into an executable program. @@ -2651,6 +2665,13 @@ class FunctionDecl : public DeclaratorDecl, AC.push_back(TRC); } + /// Get the message that indicates why this function was deleted. + StringLiteral *getDeletedMessage() const { + return FunctionDeclBits.HasDefaultedOrDeletedInfo + ? DefaultedOrDeletedInfo->getDeletedMessage() + : nullptr; + } + void setPreviousDeclaration(FunctionDecl * PrevDecl); FunctionDecl *getCanonicalDecl() override; @@ -3096,7 +3117,7 @@ class FieldDecl : public DeclaratorDecl, public Mergeable { protected: FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, QualType T, + SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle) : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), BitField(false), @@ -3112,7 +3133,7 @@ class FieldDecl : public DeclaratorDecl, public Mergeable { static FieldDecl *Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle); @@ -3129,7 +3150,7 @@ class FieldDecl : public DeclaratorDecl, public Mergeable { bool isBitField() const { return BitField; } /// Determines whether this is an unnamed bitfield. - bool isUnnamedBitfield() const { return isBitField() && !getDeclName(); } + bool isUnnamedBitField() const { return isBitField() && !getDeclName(); } /// Determines whether this field is a /// representative for an anonymous struct or union. Such fields are @@ -3333,8 +3354,9 @@ class IndirectFieldDecl : public ValueDecl, friend class ASTDeclReader; static IndirectFieldDecl *Create(ASTContext &C, DeclContext *DC, - SourceLocation L, IdentifierInfo *Id, - QualType T, llvm::MutableArrayRef CH); + SourceLocation L, const IdentifierInfo *Id, + QualType T, + llvm::MutableArrayRef CH); static IndirectFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID); @@ -3382,9 +3404,9 @@ class TypeDecl : public NamedDecl { void anchor() override; protected: - TypeDecl(Kind DK, DeclContext *DC, SourceLocation L, IdentifierInfo *Id, + TypeDecl(Kind DK, DeclContext *DC, SourceLocation L, const IdentifierInfo *Id, SourceLocation StartL = SourceLocation()) - : NamedDecl(DK, DC, L, Id), LocStart(StartL) {} + : NamedDecl(DK, DC, L, Id), LocStart(StartL) {} public: // Low-level accessor. If you just want the type defined by this node, @@ -3426,7 +3448,7 @@ class TypedefNameDecl : public TypeDecl, public Redeclarable { protected: TypedefNameDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, TypeSourceInfo *TInfo) + const IdentifierInfo *Id, TypeSourceInfo *TInfo) : TypeDecl(DK, DC, IdLoc, Id, StartLoc), redeclarable_base(C), MaybeModedTInfo(TInfo, 0) {} @@ -3513,13 +3535,14 @@ class TypedefNameDecl : public TypeDecl, public Redeclarable { /// type specifier. class TypedefDecl : public TypedefNameDecl { TypedefDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, TypeSourceInfo *TInfo) + SourceLocation IdLoc, const IdentifierInfo *Id, + TypeSourceInfo *TInfo) : TypedefNameDecl(Typedef, C, DC, StartLoc, IdLoc, Id, TInfo) {} public: static TypedefDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, TypeSourceInfo *TInfo); + const IdentifierInfo *Id, TypeSourceInfo *TInfo); static TypedefDecl *CreateDeserialized(ASTContext &C, unsigned ID); SourceRange getSourceRange() const override LLVM_READONLY; @@ -3536,14 +3559,15 @@ class TypeAliasDecl : public TypedefNameDecl { TypeAliasTemplateDecl *Template; TypeAliasDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, TypeSourceInfo *TInfo) + SourceLocation IdLoc, const IdentifierInfo *Id, + TypeSourceInfo *TInfo) : TypedefNameDecl(TypeAlias, C, DC, StartLoc, IdLoc, Id, TInfo), Template(nullptr) {} public: static TypeAliasDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, TypeSourceInfo *TInfo); + const IdentifierInfo *Id, TypeSourceInfo *TInfo); static TypeAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID); SourceRange getSourceRange() const override LLVM_READONLY; diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h index 858450926455c..1079993f49694 100644 --- a/clang/include/clang/AST/DeclBase.h +++ b/clang/include/clang/AST/DeclBase.h @@ -672,16 +672,6 @@ class alignas(8) Decl { /// Whether this declaration comes from explicit global module. bool isFromExplicitGlobalModule() const; - /// Check if we should skip checking ODRHash for declaration \param D. - /// - /// The existing ODRHash mechanism seems to be not stable enough and - /// the false positive ODR violation reports are annoying and we rarely see - /// true ODR violation reports. Also we learned that MSVC disabled ODR checks - /// for declarations in GMF. So we try to disable ODR checks in the GMF to - /// get better user experiences before we make the ODR violation checks stable - /// enough. - bool shouldSkipCheckingODR() const; - /// Return true if this declaration has an attribute which acts as /// definition of the entity, such as 'alias' or 'ifunc'. bool hasDefiningAttr() const; @@ -1739,7 +1729,7 @@ class DeclContext { LLVM_PREFERRED_TYPE(bool) uint64_t IsExplicitlyDefaulted : 1; LLVM_PREFERRED_TYPE(bool) - uint64_t HasDefaultedFunctionInfo : 1; + uint64_t HasDefaultedOrDeletedInfo : 1; /// For member functions of complete types, whether this is an ineligible /// special member function or an unselected destructor. See diff --git a/clang/include/clang/AST/DeclObjC.h b/clang/include/clang/AST/DeclObjC.h index f8f894b4b10d1..b8d17dd06d155 100644 --- a/clang/include/clang/AST/DeclObjC.h +++ b/clang/include/clang/AST/DeclObjC.h @@ -772,7 +772,7 @@ class ObjCPropertyDecl : public NamedDecl { // Synthesize ivar for this property ObjCIvarDecl *PropertyIvarDecl = nullptr; - ObjCPropertyDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id, + ObjCPropertyDecl(DeclContext *DC, SourceLocation L, const IdentifierInfo *Id, SourceLocation AtLocation, SourceLocation LParenLocation, QualType T, TypeSourceInfo *TSI, PropertyControl propControl) : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation), @@ -782,10 +782,12 @@ class ObjCPropertyDecl : public NamedDecl { PropertyImplementation(propControl) {} public: - static ObjCPropertyDecl * - Create(ASTContext &C, DeclContext *DC, SourceLocation L, IdentifierInfo *Id, - SourceLocation AtLocation, SourceLocation LParenLocation, QualType T, - TypeSourceInfo *TSI, PropertyControl propControl = None); + static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC, + SourceLocation L, const IdentifierInfo *Id, + SourceLocation AtLocation, + SourceLocation LParenLocation, QualType T, + TypeSourceInfo *TSI, + PropertyControl propControl = None); static ObjCPropertyDecl *CreateDeserialized(ASTContext &C, unsigned ID); @@ -952,7 +954,7 @@ class ObjCContainerDecl : public NamedDecl, public DeclContext { void anchor() override; public: - ObjCContainerDecl(Kind DK, DeclContext *DC, IdentifierInfo *Id, + ObjCContainerDecl(Kind DK, DeclContext *DC, const IdentifierInfo *Id, SourceLocation nameLoc, SourceLocation atStartLoc); // Iterator access to instance/class properties. @@ -1240,7 +1242,7 @@ class ObjCInterfaceDecl : public ObjCContainerDecl llvm::PointerIntPair Data; ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC, SourceLocation AtLoc, - IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl, bool IsInternal); @@ -1271,13 +1273,11 @@ class ObjCInterfaceDecl : public ObjCContainerDecl } public: - static ObjCInterfaceDecl *Create(const ASTContext &C, DeclContext *DC, - SourceLocation atLoc, - IdentifierInfo *Id, - ObjCTypeParamList *typeParamList, - ObjCInterfaceDecl *PrevDecl, - SourceLocation ClassLoc = SourceLocation(), - bool isInternal = false); + static ObjCInterfaceDecl * + Create(const ASTContext &C, DeclContext *DC, SourceLocation atLoc, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + ObjCInterfaceDecl *PrevDecl, + SourceLocation ClassLoc = SourceLocation(), bool isInternal = false); static ObjCInterfaceDecl *CreateDeserialized(const ASTContext &C, unsigned ID); @@ -1338,7 +1338,8 @@ class ObjCInterfaceDecl : public ObjCContainerDecl ObjCImplementationDecl *getImplementation() const; void setImplementation(ObjCImplementationDecl *ImplD); - ObjCCategoryDecl *FindCategoryDeclaration(IdentifierInfo *CategoryId) const; + ObjCCategoryDecl * + FindCategoryDeclaration(const IdentifierInfo *CategoryId) const; // Get the local instance/class method declared in a category. ObjCMethodDecl *getCategoryInstanceMethod(Selector Sel) const; @@ -1794,9 +1795,9 @@ class ObjCInterfaceDecl : public ObjCContainerDecl data().CategoryList = category; } - ObjCPropertyDecl - *FindPropertyVisibleInPrimaryClass(IdentifierInfo *PropertyId, - ObjCPropertyQueryKind QueryKind) const; + ObjCPropertyDecl * + FindPropertyVisibleInPrimaryClass(const IdentifierInfo *PropertyId, + ObjCPropertyQueryKind QueryKind) const; void collectPropertiesToImplement(PropertyMap &PM) const override; @@ -1954,8 +1955,8 @@ class ObjCIvarDecl : public FieldDecl { private: ObjCIvarDecl(ObjCContainerDecl *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, AccessControl ac, Expr *BW, + SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, AccessControl ac, Expr *BW, bool synthesized) : FieldDecl(ObjCIvar, DC, StartLoc, IdLoc, Id, T, TInfo, BW, /*Mutable=*/false, /*HasInit=*/ICIS_NoInit), @@ -1964,10 +1965,9 @@ class ObjCIvarDecl : public FieldDecl { public: static ObjCIvarDecl *Create(ASTContext &C, ObjCContainerDecl *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, QualType T, - TypeSourceInfo *TInfo, - AccessControl ac, Expr *BW = nullptr, - bool synthesized=false); + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, AccessControl ac, + Expr *BW = nullptr, bool synthesized = false); static ObjCIvarDecl *CreateDeserialized(ASTContext &C, unsigned ID); @@ -2343,7 +2343,7 @@ class ObjCCategoryDecl : public ObjCContainerDecl { ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc, SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc, - IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, + const IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, ObjCTypeParamList *typeParamList, SourceLocation IvarLBraceLoc = SourceLocation(), SourceLocation IvarRBraceLoc = SourceLocation()); @@ -2354,15 +2354,13 @@ class ObjCCategoryDecl : public ObjCContainerDecl { friend class ASTDeclReader; friend class ASTDeclWriter; - static ObjCCategoryDecl *Create(ASTContext &C, DeclContext *DC, - SourceLocation AtLoc, - SourceLocation ClassNameLoc, - SourceLocation CategoryNameLoc, - IdentifierInfo *Id, - ObjCInterfaceDecl *IDecl, - ObjCTypeParamList *typeParamList, - SourceLocation IvarLBraceLoc=SourceLocation(), - SourceLocation IvarRBraceLoc=SourceLocation()); + static ObjCCategoryDecl * + Create(ASTContext &C, DeclContext *DC, SourceLocation AtLoc, + SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc, + const IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, + ObjCTypeParamList *typeParamList, + SourceLocation IvarLBraceLoc = SourceLocation(), + SourceLocation IvarRBraceLoc = SourceLocation()); static ObjCCategoryDecl *CreateDeserialized(ASTContext &C, unsigned ID); ObjCInterfaceDecl *getClassInterface() { return ClassInterface; } @@ -2472,10 +2470,9 @@ class ObjCImplDecl : public ObjCContainerDecl { void anchor() override; protected: - ObjCImplDecl(Kind DK, DeclContext *DC, - ObjCInterfaceDecl *classInterface, - IdentifierInfo *Id, - SourceLocation nameLoc, SourceLocation atStartLoc) + ObjCImplDecl(Kind DK, DeclContext *DC, ObjCInterfaceDecl *classInterface, + const IdentifierInfo *Id, SourceLocation nameLoc, + SourceLocation atStartLoc) : ObjCContainerDecl(DK, DC, Id, nameLoc, atStartLoc), ClassInterface(classInterface) {} @@ -2543,12 +2540,12 @@ class ObjCCategoryImplDecl : public ObjCImplDecl { // Category name location SourceLocation CategoryNameLoc; - ObjCCategoryImplDecl(DeclContext *DC, IdentifierInfo *Id, + ObjCCategoryImplDecl(DeclContext *DC, const IdentifierInfo *Id, ObjCInterfaceDecl *classInterface, SourceLocation nameLoc, SourceLocation atStartLoc, SourceLocation CategoryNameLoc) - : ObjCImplDecl(ObjCCategoryImpl, DC, classInterface, Id, - nameLoc, atStartLoc), + : ObjCImplDecl(ObjCCategoryImpl, DC, classInterface, Id, nameLoc, + atStartLoc), CategoryNameLoc(CategoryNameLoc) {} void anchor() override; @@ -2557,12 +2554,10 @@ class ObjCCategoryImplDecl : public ObjCImplDecl { friend class ASTDeclReader; friend class ASTDeclWriter; - static ObjCCategoryImplDecl *Create(ASTContext &C, DeclContext *DC, - IdentifierInfo *Id, - ObjCInterfaceDecl *classInterface, - SourceLocation nameLoc, - SourceLocation atStartLoc, - SourceLocation CategoryNameLoc); + static ObjCCategoryImplDecl * + Create(ASTContext &C, DeclContext *DC, const IdentifierInfo *Id, + ObjCInterfaceDecl *classInterface, SourceLocation nameLoc, + SourceLocation atStartLoc, SourceLocation CategoryNameLoc); static ObjCCategoryImplDecl *CreateDeserialized(ASTContext &C, unsigned ID); ObjCCategoryDecl *getCategoryDecl() const; diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h index e3b6a7efb1127..f24e71ff22964 100644 --- a/clang/include/clang/AST/DeclTemplate.h +++ b/clang/include/clang/AST/DeclTemplate.h @@ -1389,14 +1389,14 @@ class NonTypeTemplateParmDecl final NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, unsigned D, unsigned P, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, bool ParameterPack, TypeSourceInfo *TInfo) : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc), TemplateParmPosition(D, P), ParameterPack(ParameterPack) {} NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, unsigned D, unsigned P, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef ExpandedTypes, ArrayRef ExpandedTInfos); @@ -1404,12 +1404,12 @@ class NonTypeTemplateParmDecl final public: static NonTypeTemplateParmDecl * Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, QualType T, bool ParameterPack, TypeSourceInfo *TInfo); static NonTypeTemplateParmDecl * Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef ExpandedTypes, ArrayRef ExpandedTInfos); @@ -1581,26 +1581,36 @@ class TemplateTemplateParmDecl final DefaultArgStorage; DefArgStorage DefaultArgument; + /// Whether this template template parameter was declaration with + /// the 'typename' keyword. + /// + /// If false, it was declared with the 'class' keyword. + LLVM_PREFERRED_TYPE(bool) + unsigned Typename : 1; + /// Whether this parameter is a parameter pack. - bool ParameterPack; + LLVM_PREFERRED_TYPE(bool) + unsigned ParameterPack : 1; /// Whether this template template parameter is an "expanded" /// parameter pack, meaning that it is a pack expansion and we /// already know the set of template parameters that expansion expands to. - bool ExpandedParameterPack = false; + LLVM_PREFERRED_TYPE(bool) + unsigned ExpandedParameterPack : 1; /// The number of parameters in an expanded parameter pack. unsigned NumExpandedParams = 0; - TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L, - unsigned D, unsigned P, bool ParameterPack, - IdentifierInfo *Id, TemplateParameterList *Params) + TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L, unsigned D, + unsigned P, bool ParameterPack, IdentifierInfo *Id, + bool Typename, TemplateParameterList *Params) : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params), - TemplateParmPosition(D, P), ParameterPack(ParameterPack) {} + TemplateParmPosition(D, P), Typename(Typename), + ParameterPack(ParameterPack), ExpandedParameterPack(false) {} - TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L, - unsigned D, unsigned P, - IdentifierInfo *Id, TemplateParameterList *Params, + TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L, unsigned D, + unsigned P, IdentifierInfo *Id, bool Typename, + TemplateParameterList *Params, ArrayRef Expansions); void anchor() override; @@ -1613,14 +1623,13 @@ class TemplateTemplateParmDecl final static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, unsigned P, bool ParameterPack, - IdentifierInfo *Id, + IdentifierInfo *Id, bool Typename, TemplateParameterList *Params); - static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC, - SourceLocation L, unsigned D, - unsigned P, - IdentifierInfo *Id, - TemplateParameterList *Params, - ArrayRef Expansions); + static TemplateTemplateParmDecl * + Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, + unsigned P, IdentifierInfo *Id, bool Typename, + TemplateParameterList *Params, + ArrayRef Expansions); static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C, unsigned ID); @@ -1634,6 +1643,14 @@ class TemplateTemplateParmDecl final using TemplateParmPosition::setPosition; using TemplateParmPosition::getIndex; + /// Whether this template template parameter was declared with + /// the 'typename' keyword. + bool wasDeclaredWithTypename() const { return Typename; } + + /// Set whether this template template parameter was declared with + /// the 'typename' or 'class' keyword. + void setDeclaredWithTypename(bool withTypename) { Typename = withTypename; } + /// Whether this template template parameter is a template /// parameter pack. /// diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 3e3edc30702dd..5eac448561426 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -2559,7 +2559,7 @@ class CXXDeleteExpr : public Expr { class PseudoDestructorTypeStorage { /// Either the type source information or the name of the type, if /// it couldn't be resolved due to type-dependence. - llvm::PointerUnion Type; + llvm::PointerUnion Type; /// The starting source location of the pseudo-destructor type. SourceLocation Location; @@ -2567,7 +2567,7 @@ class PseudoDestructorTypeStorage { public: PseudoDestructorTypeStorage() = default; - PseudoDestructorTypeStorage(IdentifierInfo *II, SourceLocation Loc) + PseudoDestructorTypeStorage(const IdentifierInfo *II, SourceLocation Loc) : Type(II), Location(Loc) {} PseudoDestructorTypeStorage(TypeSourceInfo *Info); @@ -2576,8 +2576,8 @@ class PseudoDestructorTypeStorage { return Type.dyn_cast(); } - IdentifierInfo *getIdentifier() const { - return Type.dyn_cast(); + const IdentifierInfo *getIdentifier() const { + return Type.dyn_cast(); } SourceLocation getLocation() const { return Location; } @@ -2708,7 +2708,7 @@ class CXXPseudoDestructorExpr : public Expr { /// In a dependent pseudo-destructor expression for which we do not /// have full type information on the destroyed type, provides the name /// of the destroyed type. - IdentifierInfo *getDestroyedTypeIdentifier() const { + const IdentifierInfo *getDestroyedTypeIdentifier() const { return DestroyedType.getIdentifier(); } diff --git a/clang/include/clang/AST/ExternalASTSource.h b/clang/include/clang/AST/ExternalASTSource.h index 8e573965b0a33..230c83943c222 100644 --- a/clang/include/clang/AST/ExternalASTSource.h +++ b/clang/include/clang/AST/ExternalASTSource.h @@ -138,7 +138,7 @@ class ExternalASTSource : public RefCountedBase { virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset); /// Update an out-of-date identifier. - virtual void updateOutOfDateIdentifier(IdentifierInfo &II) {} + virtual void updateOutOfDateIdentifier(const IdentifierInfo &II) {} /// Find all declarations with the given name in the given context, /// and add them to the context by calling SetExternalVisibleDeclsForName diff --git a/clang/include/clang/AST/JSONNodeDumper.h b/clang/include/clang/AST/JSONNodeDumper.h index 3cfb7ff09125e..1404984139734 100644 --- a/clang/include/clang/AST/JSONNodeDumper.h +++ b/clang/include/clang/AST/JSONNodeDumper.h @@ -311,6 +311,8 @@ class JSONNodeDumper void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *MTE); void VisitCXXDependentScopeMemberExpr(const CXXDependentScopeMemberExpr *ME); void VisitRequiresExpr(const RequiresExpr *RE); + void VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node); + void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node); void VisitObjCEncodeExpr(const ObjCEncodeExpr *OEE); void VisitObjCMessageExpr(const ObjCMessageExpr *OME); diff --git a/clang/include/clang/AST/NestedNameSpecifier.h b/clang/include/clang/AST/NestedNameSpecifier.h index 3b6cf97211850..7b0c21b9e7cfb 100644 --- a/clang/include/clang/AST/NestedNameSpecifier.h +++ b/clang/include/clang/AST/NestedNameSpecifier.h @@ -124,7 +124,7 @@ class NestedNameSpecifier : public llvm::FoldingSetNode { /// cannot be resolved. static NestedNameSpecifier *Create(const ASTContext &Context, NestedNameSpecifier *Prefix, - IdentifierInfo *II); + const IdentifierInfo *II); /// Builds a nested name specifier that names a namespace. static NestedNameSpecifier *Create(const ASTContext &Context, @@ -134,7 +134,7 @@ class NestedNameSpecifier : public llvm::FoldingSetNode { /// Builds a nested name specifier that names a namespace alias. static NestedNameSpecifier *Create(const ASTContext &Context, NestedNameSpecifier *Prefix, - NamespaceAliasDecl *Alias); + const NamespaceAliasDecl *Alias); /// Builds a nested name specifier that names a type. static NestedNameSpecifier *Create(const ASTContext &Context, @@ -148,7 +148,7 @@ class NestedNameSpecifier : public llvm::FoldingSetNode { /// nested name specifier, e.g., in "x->Base::f", the "x" has a dependent /// type. static NestedNameSpecifier *Create(const ASTContext &Context, - IdentifierInfo *II); + const IdentifierInfo *II); /// Returns the nested name specifier representing the global /// scope. diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h index 06a0098bbda4c..07587849eb121 100644 --- a/clang/include/clang/AST/OpenACCClause.h +++ b/clang/include/clang/AST/OpenACCClause.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_AST_OPENACCCLAUSE_H #define LLVM_CLANG_AST_OPENACCCLAUSE_H #include "clang/AST/ASTContext.h" +#include "clang/AST/StmtIterator.h" #include "clang/Basic/OpenACCKinds.h" namespace clang { @@ -34,6 +35,17 @@ class OpenACCClause { static bool classof(const OpenACCClause *) { return true; } + using child_iterator = StmtIterator; + using const_child_iterator = ConstStmtIterator; + using child_range = llvm::iterator_range; + using const_child_range = llvm::iterator_range; + + child_range children(); + const_child_range children() const { + auto Children = const_cast(this)->children(); + return const_child_range(Children.begin(), Children.end()); + } + virtual ~OpenACCClause() = default; }; @@ -49,6 +61,99 @@ class OpenACCClauseWithParams : public OpenACCClause { public: SourceLocation getLParenLoc() const { return LParenLoc; } + + child_range children() { + return child_range(child_iterator(), child_iterator()); + } + const_child_range children() const { + return const_child_range(const_child_iterator(), const_child_iterator()); + } +}; + +/// A 'default' clause, has the optional 'none' or 'present' argument. +class OpenACCDefaultClause : public OpenACCClauseWithParams { + friend class ASTReaderStmt; + friend class ASTWriterStmt; + + OpenACCDefaultClauseKind DefaultClauseKind; + +protected: + OpenACCDefaultClause(OpenACCDefaultClauseKind K, SourceLocation BeginLoc, + SourceLocation LParenLoc, SourceLocation EndLoc) + : OpenACCClauseWithParams(OpenACCClauseKind::Default, BeginLoc, LParenLoc, + EndLoc), + DefaultClauseKind(K) { + assert((DefaultClauseKind == OpenACCDefaultClauseKind::None || + DefaultClauseKind == OpenACCDefaultClauseKind::Present) && + "Invalid Clause Kind"); + } + +public: + OpenACCDefaultClauseKind getDefaultClauseKind() const { + return DefaultClauseKind; + } + + static OpenACCDefaultClause *Create(const ASTContext &C, + OpenACCDefaultClauseKind K, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); +}; + +/// Represents one of the handful of classes that has an optional/required +/// 'condition' expression as an argument. +class OpenACCClauseWithCondition : public OpenACCClauseWithParams { + Expr *ConditionExpr = nullptr; + +protected: + OpenACCClauseWithCondition(OpenACCClauseKind K, SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *ConditionExpr, + SourceLocation EndLoc) + : OpenACCClauseWithParams(K, BeginLoc, LParenLoc, EndLoc), + ConditionExpr(ConditionExpr) {} + +public: + bool hasConditionExpr() const { return ConditionExpr; } + const Expr *getConditionExpr() const { return ConditionExpr; } + Expr *getConditionExpr() { return ConditionExpr; } + + child_range children() { + if (ConditionExpr) + return child_range(reinterpret_cast(&ConditionExpr), + reinterpret_cast(&ConditionExpr + 1)); + return child_range(child_iterator(), child_iterator()); + } + + const_child_range children() const { + if (ConditionExpr) + return const_child_range( + reinterpret_cast(&ConditionExpr), + reinterpret_cast(&ConditionExpr + 1)); + return const_child_range(const_child_iterator(), const_child_iterator()); + } +}; + +/// An 'if' clause, which has a required condition expression. +class OpenACCIfClause : public OpenACCClauseWithCondition { +protected: + OpenACCIfClause(SourceLocation BeginLoc, SourceLocation LParenLoc, + Expr *ConditionExpr, SourceLocation EndLoc); + +public: + static OpenACCIfClause *Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *ConditionExpr, + SourceLocation EndLoc); +}; + +/// A 'self' clause, which has an optional condition expression. +class OpenACCSelfClause : public OpenACCClauseWithCondition { + OpenACCSelfClause(SourceLocation BeginLoc, SourceLocation LParenLoc, + Expr *ConditionExpr, SourceLocation EndLoc); + +public: + static OpenACCSelfClause *Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, SourceLocation EndLoc); }; template class OpenACCClauseVisitor { @@ -65,53 +170,25 @@ template class OpenACCClauseVisitor { return; switch (C->getClauseKind()) { - case OpenACCClauseKind::Default: - case OpenACCClauseKind::Finalize: - case OpenACCClauseKind::IfPresent: - case OpenACCClauseKind::Seq: - case OpenACCClauseKind::Independent: - case OpenACCClauseKind::Auto: - case OpenACCClauseKind::Worker: - case OpenACCClauseKind::Vector: - case OpenACCClauseKind::NoHost: - case OpenACCClauseKind::If: - case OpenACCClauseKind::Self: - case OpenACCClauseKind::Copy: - case OpenACCClauseKind::UseDevice: - case OpenACCClauseKind::Attach: - case OpenACCClauseKind::Delete: - case OpenACCClauseKind::Detach: - case OpenACCClauseKind::Device: - case OpenACCClauseKind::DevicePtr: - case OpenACCClauseKind::DeviceResident: - case OpenACCClauseKind::FirstPrivate: - case OpenACCClauseKind::Host: - case OpenACCClauseKind::Link: - case OpenACCClauseKind::NoCreate: - case OpenACCClauseKind::Present: - case OpenACCClauseKind::Private: - case OpenACCClauseKind::CopyOut: - case OpenACCClauseKind::CopyIn: - case OpenACCClauseKind::Create: - case OpenACCClauseKind::Reduction: - case OpenACCClauseKind::Collapse: - case OpenACCClauseKind::Bind: - case OpenACCClauseKind::VectorLength: - case OpenACCClauseKind::NumGangs: - case OpenACCClauseKind::NumWorkers: - case OpenACCClauseKind::DeviceNum: - case OpenACCClauseKind::DefaultAsync: - case OpenACCClauseKind::DeviceType: - case OpenACCClauseKind::DType: - case OpenACCClauseKind::Async: - case OpenACCClauseKind::Tile: - case OpenACCClauseKind::Gang: - case OpenACCClauseKind::Wait: - case OpenACCClauseKind::Invalid: +#define VISIT_CLAUSE(CLAUSE_NAME) \ + case OpenACCClauseKind::CLAUSE_NAME: \ + Visit##CLAUSE_NAME##Clause(*cast(C)); \ + return; +#include "clang/Basic/OpenACCClauses.def" + + default: llvm_unreachable("Clause visitor not yet implemented"); } llvm_unreachable("Invalid Clause kind"); } + +#define VISIT_CLAUSE(CLAUSE_NAME) \ + void Visit##CLAUSE_NAME##Clause( \ + const OpenACC##CLAUSE_NAME##Clause &Clause) { \ + return getDerived().Visit##CLAUSE_NAME##Clause(Clause); \ + } + +#include "clang/Basic/OpenACCClauses.def" }; class OpenACCClausePrinter final @@ -128,6 +205,10 @@ class OpenACCClausePrinter final } } OpenACCClausePrinter(raw_ostream &OS) : OS(OS) {} + +#define VISIT_CLAUSE(CLAUSE_NAME) \ + void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause); +#include "clang/Basic/OpenACCClauses.def" }; } // namespace clang diff --git a/clang/include/clang/AST/StmtOpenACC.h b/clang/include/clang/AST/StmtOpenACC.h index 419cb6cada0bc..66f8f844e0b29 100644 --- a/clang/include/clang/AST/StmtOpenACC.h +++ b/clang/include/clang/AST/StmtOpenACC.h @@ -142,9 +142,7 @@ class OpenACCComputeConstruct final Stmt *StructuredBlock) : OpenACCAssociatedStmtConstruct(OpenACCComputeConstructClass, K, Start, End, StructuredBlock) { - assert((K == OpenACCDirectiveKind::Parallel || - K == OpenACCDirectiveKind::Serial || - K == OpenACCDirectiveKind::Kernels) && + assert(isOpenACCComputeDirectiveKind(K) && "Only parallel, serial, and kernels constructs should be " "represented by this type"); diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h index 3cb3c1014d73b..f735fa5643aec 100644 --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -6109,6 +6109,8 @@ class OMPTeamsGenericLoopDirective final : public OMPLoopDirective { class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; + /// true if loop directive's associated loop can be a parallel for. + bool CanBeParallelFor = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. @@ -6131,6 +6133,9 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective { llvm::omp::OMPD_target_teams_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} + /// Set whether associated loop can be a parallel for. + void setCanBeParallelFor(bool ParFor) { CanBeParallelFor = ParFor; } + public: /// Creates directive with a list of \p Clauses. /// @@ -6145,7 +6150,7 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective { static OMPTargetTeamsGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef Clauses, - Stmt *AssociatedStmt, const HelperExprs &Exprs); + Stmt *AssociatedStmt, const HelperExprs &Exprs, bool CanBeParallelFor); /// Creates an empty directive with the place /// for \a NumClauses clauses. @@ -6159,6 +6164,10 @@ class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective { unsigned CollapsedNum, EmptyShell); + /// Return true if current loop directive's associated loop can be a + /// parallel for. + bool canBeParallelFor() const { return CanBeParallelFor; } + static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass; } diff --git a/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h b/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h index 1ceef944fbc34..117173ba9a095 100644 --- a/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h +++ b/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h @@ -8,11 +8,9 @@ #ifndef LLVM_CLANG_ANALYSIS_ANALYSES_EXPRMUTATIONANALYZER_H #define LLVM_CLANG_ANALYSIS_ANALYSES_EXPRMUTATIONANALYZER_H -#include - -#include "clang/AST/AST.h" #include "clang/ASTMatchers/ASTMatchers.h" #include "llvm/ADT/DenseMap.h" +#include namespace clang { @@ -21,14 +19,74 @@ class FunctionParmMutationAnalyzer; /// Analyzes whether any mutative operations are applied to an expression within /// a given statement. class ExprMutationAnalyzer { + friend class FunctionParmMutationAnalyzer; + public: + struct Memoized { + using ResultMap = llvm::DenseMap; + using FunctionParaAnalyzerMap = + llvm::SmallDenseMap>; + + ResultMap Results; + ResultMap PointeeResults; + FunctionParaAnalyzerMap FuncParmAnalyzer; + + void clear() { + Results.clear(); + PointeeResults.clear(); + FuncParmAnalyzer.clear(); + } + }; + struct Analyzer { + Analyzer(const Stmt &Stm, ASTContext &Context, Memoized &Memorized) + : Stm(Stm), Context(Context), Memorized(Memorized) {} + + const Stmt *findMutation(const Expr *Exp); + const Stmt *findMutation(const Decl *Dec); + + const Stmt *findPointeeMutation(const Expr *Exp); + const Stmt *findPointeeMutation(const Decl *Dec); + static bool isUnevaluated(const Stmt *Smt, const Stmt &Stm, + ASTContext &Context); + + private: + using MutationFinder = const Stmt *(Analyzer::*)(const Expr *); + + const Stmt *findMutationMemoized(const Expr *Exp, + llvm::ArrayRef Finders, + Memoized::ResultMap &MemoizedResults); + const Stmt *tryEachDeclRef(const Decl *Dec, MutationFinder Finder); + + bool isUnevaluated(const Expr *Exp); + + const Stmt *findExprMutation(ArrayRef Matches); + const Stmt *findDeclMutation(ArrayRef Matches); + const Stmt * + findExprPointeeMutation(ArrayRef Matches); + const Stmt * + findDeclPointeeMutation(ArrayRef Matches); + + const Stmt *findDirectMutation(const Expr *Exp); + const Stmt *findMemberMutation(const Expr *Exp); + const Stmt *findArrayElementMutation(const Expr *Exp); + const Stmt *findCastMutation(const Expr *Exp); + const Stmt *findRangeLoopMutation(const Expr *Exp); + const Stmt *findReferenceMutation(const Expr *Exp); + const Stmt *findFunctionArgMutation(const Expr *Exp); + + const Stmt &Stm; + ASTContext &Context; + Memoized &Memorized; + }; + ExprMutationAnalyzer(const Stmt &Stm, ASTContext &Context) - : Stm(Stm), Context(Context) {} + : Memorized(), A(Stm, Context, Memorized) {} bool isMutated(const Expr *Exp) { return findMutation(Exp) != nullptr; } bool isMutated(const Decl *Dec) { return findMutation(Dec) != nullptr; } - const Stmt *findMutation(const Expr *Exp); - const Stmt *findMutation(const Decl *Dec); + const Stmt *findMutation(const Expr *Exp) { return A.findMutation(Exp); } + const Stmt *findMutation(const Decl *Dec) { return A.findMutation(Dec); } bool isPointeeMutated(const Expr *Exp) { return findPointeeMutation(Exp) != nullptr; @@ -36,51 +94,40 @@ class ExprMutationAnalyzer { bool isPointeeMutated(const Decl *Dec) { return findPointeeMutation(Dec) != nullptr; } - const Stmt *findPointeeMutation(const Expr *Exp); - const Stmt *findPointeeMutation(const Decl *Dec); + const Stmt *findPointeeMutation(const Expr *Exp) { + return A.findPointeeMutation(Exp); + } + const Stmt *findPointeeMutation(const Decl *Dec) { + return A.findPointeeMutation(Dec); + } + static bool isUnevaluated(const Stmt *Smt, const Stmt &Stm, - ASTContext &Context); + ASTContext &Context) { + return Analyzer::isUnevaluated(Smt, Stm, Context); + } private: - using MutationFinder = const Stmt *(ExprMutationAnalyzer::*)(const Expr *); - using ResultMap = llvm::DenseMap; - - const Stmt *findMutationMemoized(const Expr *Exp, - llvm::ArrayRef Finders, - ResultMap &MemoizedResults); - const Stmt *tryEachDeclRef(const Decl *Dec, MutationFinder Finder); - - bool isUnevaluated(const Expr *Exp); - - const Stmt *findExprMutation(ArrayRef Matches); - const Stmt *findDeclMutation(ArrayRef Matches); - const Stmt * - findExprPointeeMutation(ArrayRef Matches); - const Stmt * - findDeclPointeeMutation(ArrayRef Matches); - - const Stmt *findDirectMutation(const Expr *Exp); - const Stmt *findMemberMutation(const Expr *Exp); - const Stmt *findArrayElementMutation(const Expr *Exp); - const Stmt *findCastMutation(const Expr *Exp); - const Stmt *findRangeLoopMutation(const Expr *Exp); - const Stmt *findReferenceMutation(const Expr *Exp); - const Stmt *findFunctionArgMutation(const Expr *Exp); - - const Stmt &Stm; - ASTContext &Context; - llvm::DenseMap> - FuncParmAnalyzer; - ResultMap Results; - ResultMap PointeeResults; + Memoized Memorized; + Analyzer A; }; // A convenient wrapper around ExprMutationAnalyzer for analyzing function // params. class FunctionParmMutationAnalyzer { public: - FunctionParmMutationAnalyzer(const FunctionDecl &Func, ASTContext &Context); + static FunctionParmMutationAnalyzer * + getFunctionParmMutationAnalyzer(const FunctionDecl &Func, ASTContext &Context, + ExprMutationAnalyzer::Memoized &Memorized) { + auto it = Memorized.FuncParmAnalyzer.find(&Func); + if (it == Memorized.FuncParmAnalyzer.end()) + it = + Memorized.FuncParmAnalyzer + .try_emplace(&Func, std::unique_ptr( + new FunctionParmMutationAnalyzer( + Func, Context, Memorized))) + .first; + return it->getSecond().get(); + } bool isMutated(const ParmVarDecl *Parm) { return findMutation(Parm) != nullptr; @@ -88,8 +135,11 @@ class FunctionParmMutationAnalyzer { const Stmt *findMutation(const ParmVarDecl *Parm); private: - ExprMutationAnalyzer BodyAnalyzer; + ExprMutationAnalyzer::Analyzer BodyAnalyzer; llvm::DenseMap Results; + + FunctionParmMutationAnalyzer(const FunctionDecl &Func, ASTContext &Context, + ExprMutationAnalyzer::Memoized &Memorized); }; } // namespace clang diff --git a/clang/include/clang/Analysis/FlowSensitive/ASTOps.h b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h new file mode 100644 index 0000000000000..27ad32c1694f7 --- /dev/null +++ b/clang/include/clang/Analysis/FlowSensitive/ASTOps.h @@ -0,0 +1,98 @@ +//===-- ASTOps.h -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Operations on AST nodes that are used in flow-sensitive analysis. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_ASTOPS_H +#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_ASTOPS_H + +#include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Type.h" +#include "clang/Analysis/FlowSensitive/StorageLocation.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/SetVector.h" + +namespace clang { +namespace dataflow { + +/// Skip past nodes that the CFG does not emit. These nodes are invisible to +/// flow-sensitive analysis, and should be ignored as they will effectively not +/// exist. +/// +/// * `ParenExpr` - The CFG takes the operator precedence into account, but +/// otherwise omits the node afterwards. +/// +/// * `ExprWithCleanups` - The CFG will generate the appropriate calls to +/// destructors and then omit the node. +/// +const Expr &ignoreCFGOmittedNodes(const Expr &E); +const Stmt &ignoreCFGOmittedNodes(const Stmt &S); + +/// A set of `FieldDecl *`. Use `SmallSetVector` to guarantee deterministic +/// iteration order. +using FieldSet = llvm::SmallSetVector; + +/// Returns the set of all fields in the type. +FieldSet getObjectFields(QualType Type); + +/// Returns whether `Fields` and `FieldLocs` contain the same fields. +bool containsSameFields(const FieldSet &Fields, + const RecordStorageLocation::FieldToLoc &FieldLocs); + +/// Helper class for initialization of a record with an `InitListExpr`. +/// `InitListExpr::inits()` contains the initializers for both the base classes +/// and the fields of the record; this helper class separates these out into two +/// different lists. In addition, it deals with special cases associated with +/// unions. +class RecordInitListHelper { +public: + // `InitList` must have record type. + RecordInitListHelper(const InitListExpr *InitList); + + // Base classes with their associated initializer expressions. + ArrayRef> base_inits() const { + return BaseInits; + } + + // Fields with their associated initializer expressions. + ArrayRef> field_inits() const { + return FieldInits; + } + +private: + SmallVector> BaseInits; + SmallVector> FieldInits; + + // We potentially synthesize an `ImplicitValueInitExpr` for unions. It's a + // member variable because we store a pointer to it in `FieldInits`. + std::optional ImplicitValueInitForUnion; +}; + +/// A collection of several types of declarations, all referenced from the same +/// function. +struct ReferencedDecls { + /// Non-static member variables. + FieldSet Fields; + /// All variables with static storage duration, notably including static + /// member variables and static variables declared within a function. + llvm::DenseSet Globals; + /// Free functions and member functions which are referenced (but not + /// necessarily called). + llvm::DenseSet Functions; +}; + +/// Returns declarations that are declared in or referenced from `FD`. +ReferencedDecls getReferencedDecls(const FunctionDecl &FD); + +} // namespace dataflow +} // namespace clang + +#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_ASTOPS_H diff --git a/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h b/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h deleted file mode 100644 index 3972962d0b2da..0000000000000 --- a/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h +++ /dev/null @@ -1,27 +0,0 @@ -//===-- ControlFlowContext.h ------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines a deprecated alias for AdornedCFG. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H -#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H - -#include "clang/Analysis/FlowSensitive/AdornedCFG.h" - -namespace clang { -namespace dataflow { - -// This is a deprecated alias. Use `AdornedCFG` instead. -using ControlFlowContext = AdornedCFG; - -} // namespace dataflow -} // namespace clang - -#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h index 909a91059438c..aa2c366cb164a 100644 --- a/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h +++ b/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h @@ -18,6 +18,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" #include "clang/AST/TypeOrdering.h" +#include "clang/Analysis/FlowSensitive/ASTOps.h" #include "clang/Analysis/FlowSensitive/AdornedCFG.h" #include "clang/Analysis/FlowSensitive/Arena.h" #include "clang/Analysis/FlowSensitive/Solver.h" @@ -30,38 +31,11 @@ #include #include #include -#include -#include -#include namespace clang { namespace dataflow { class Logger; -/// Skip past nodes that the CFG does not emit. These nodes are invisible to -/// flow-sensitive analysis, and should be ignored as they will effectively not -/// exist. -/// -/// * `ParenExpr` - The CFG takes the operator precedence into account, but -/// otherwise omits the node afterwards. -/// -/// * `ExprWithCleanups` - The CFG will generate the appropriate calls to -/// destructors and then omit the node. -/// -const Expr &ignoreCFGOmittedNodes(const Expr &E); -const Stmt &ignoreCFGOmittedNodes(const Stmt &S); - -/// A set of `FieldDecl *`. Use `SmallSetVector` to guarantee deterministic -/// iteration order. -using FieldSet = llvm::SmallSetVector; - -/// Returns the set of all fields in the type. -FieldSet getObjectFields(QualType Type); - -/// Returns whether `Fields` and `FieldLocs` contain the same fields. -bool containsSameFields(const FieldSet &Fields, - const RecordStorageLocation::FieldToLoc &FieldLocs); - struct ContextSensitiveOptions { /// The maximum depth to analyze. A value of zero is equivalent to disabling /// context-sensitive analysis entirely. diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h index 9a65f76cdf56b..4277792219c0a 100644 --- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h +++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h @@ -30,6 +30,7 @@ #include "llvm/ADT/MapVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include #include #include @@ -344,17 +345,6 @@ class Environment { /// location of the result object to pass in `this`, even though prvalues are /// otherwise not associated with storage locations. /// - /// FIXME: Currently, this simply returns a stable storage location for `E`, - /// but this doesn't do the right thing in scenarios like the following: - /// ``` - /// MyClass c = some_condition()? MyClass(foo) : MyClass(bar); - /// ``` - /// Here, `MyClass(foo)` and `MyClass(bar)` will have two different storage - /// locations, when in fact their storage locations should be the same. - /// Eventually, we want to propagate storage locations from result objects - /// down to the prvalues that initialize them, similar to the way that this is - /// done in Clang's CodeGen. - /// /// Requirements: /// `E` must be a prvalue of record type. RecordStorageLocation & @@ -462,7 +452,13 @@ class Environment { /// Initializes the fields (including synthetic fields) of `Loc` with values, /// unless values of the field type are not supported or we hit one of the /// limits at which we stop producing values. - void initializeFieldsWithValues(RecordStorageLocation &Loc); + /// If `Type` is provided, initializes only those fields that are modeled for + /// `Type`; this is intended for use in cases where `Loc` is a derived type + /// and we only want to initialize the fields of a base type. + void initializeFieldsWithValues(RecordStorageLocation &Loc, QualType Type); + void initializeFieldsWithValues(RecordStorageLocation &Loc) { + initializeFieldsWithValues(Loc, Loc.getType()); + } /// Assigns `Val` as the value of `Loc` in the environment. void setValue(const StorageLocation &Loc, Value &Val); @@ -653,6 +649,9 @@ class Environment { LLVM_DUMP_METHOD void dump(raw_ostream &OS) const; private: + using PrValueToResultObject = + llvm::DenseMap; + // The copy-constructor is for use in fork() only. Environment(const Environment &) = default; @@ -682,8 +681,10 @@ class Environment { /// Initializes the fields (including synthetic fields) of `Loc` with values, /// unless values of the field type are not supported or we hit one of the /// limits at which we stop producing values (controlled by `Visited`, - /// `Depth`, and `CreatedValuesCount`). - void initializeFieldsWithValues(RecordStorageLocation &Loc, + /// `Depth`, and `CreatedValuesCount`). If `Type` is different from + /// `Loc.getType()`, initializes only those fields that are modeled for + /// `Type`. + void initializeFieldsWithValues(RecordStorageLocation &Loc, QualType Type, llvm::DenseSet &Visited, int Depth, int &CreatedValuesCount); @@ -702,22 +703,45 @@ class Environment { /// and functions referenced in `FuncDecl`. `FuncDecl` must have a body. void initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl); + static PrValueToResultObject + buildResultObjectMap(DataflowAnalysisContext *DACtx, + const FunctionDecl *FuncDecl, + RecordStorageLocation *ThisPointeeLoc, + RecordStorageLocation *LocForRecordReturnVal); + // `DACtx` is not null and not owned by this object. DataflowAnalysisContext *DACtx; - // FIXME: move the fields `CallStack`, `ReturnVal`, `ReturnLoc` and - // `ThisPointeeLoc` into a separate call-context object, shared between - // environments in the same call. + // FIXME: move the fields `CallStack`, `ResultObjectMap`, `ReturnVal`, + // `ReturnLoc` and `ThisPointeeLoc` into a separate call-context object, + // shared between environments in the same call. // https://github.com/llvm/llvm-project/issues/59005 // `DeclContext` of the block being analysed if provided. std::vector CallStack; - // Value returned by the function (if it has non-reference return type). + // Maps from prvalues of record type to their result objects. Shared between + // all environments for the same function. + // FIXME: It's somewhat unsatisfactory that we have to use a `shared_ptr` + // here, though the cost is acceptable: The overhead of a `shared_ptr` is + // incurred when it is copied, and this happens only relatively rarely (when + // we fork the environment). The need for a `shared_ptr` will go away once we + // introduce a shared call-context object (see above). + std::shared_ptr ResultObjectMap; + + // The following three member variables handle various different types of + // return values. + // - If the return type is not a reference and not a record: Value returned + // by the function. Value *ReturnVal = nullptr; - // Storage location of the reference returned by the function (if it has - // reference return type). + // - If the return type is a reference: Storage location of the reference + // returned by the function. StorageLocation *ReturnLoc = nullptr; + // - If the return type is a record or the function being analyzed is a + // constructor: Storage location into which the return value should be + // constructed. + RecordStorageLocation *LocForRecordReturnVal = nullptr; + // The storage location of the `this` pointee. Should only be null if the // function being analyzed is only a function and not a method. RecordStorageLocation *ThisPointeeLoc = nullptr; @@ -751,42 +775,6 @@ RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE, RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME, const Environment &Env); -/// Returns the fields of a `RecordDecl` that are initialized by an -/// `InitListExpr`, in the order in which they appear in -/// `InitListExpr::inits()`. -/// `Init->getType()` must be a record type. -std::vector -getFieldsForInitListExpr(const InitListExpr *InitList); - -/// Helper class for initialization of a record with an `InitListExpr`. -/// `InitListExpr::inits()` contains the initializers for both the base classes -/// and the fields of the record; this helper class separates these out into two -/// different lists. In addition, it deals with special cases associated with -/// unions. -class RecordInitListHelper { -public: - // `InitList` must have record type. - RecordInitListHelper(const InitListExpr *InitList); - - // Base classes with their associated initializer expressions. - ArrayRef> base_inits() const { - return BaseInits; - } - - // Fields with their associated initializer expressions. - ArrayRef> field_inits() const { - return FieldInits; - } - -private: - SmallVector> BaseInits; - SmallVector> FieldInits; - - // We potentially synthesize an `ImplicitValueInitExpr` for unions. It's a - // member variable because we store a pointer to it in `FieldInits`. - std::optional ImplicitValueInitForUnion; -}; - /// Associates a new `RecordValue` with `Loc` and returns the new value. RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env); diff --git a/clang/include/clang/Analysis/SelectorExtras.h b/clang/include/clang/Analysis/SelectorExtras.h index 1e1daf5706bbf..ac2c2519beae3 100644 --- a/clang/include/clang/Analysis/SelectorExtras.h +++ b/clang/include/clang/Analysis/SelectorExtras.h @@ -15,10 +15,10 @@ namespace clang { template static inline Selector getKeywordSelector(ASTContext &Ctx, - IdentifierInfos *... IIs) { + const IdentifierInfos *...IIs) { static_assert(sizeof...(IdentifierInfos) > 0, "keyword selectors must have at least one argument"); - SmallVector II({&Ctx.Idents.get(IIs)...}); + SmallVector II({&Ctx.Idents.get(IIs)...}); return Ctx.Selectors.getSelector(II.size(), &II[0]); } diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index 56cea5d115d12..1888daa0f3fb2 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -1657,27 +1657,40 @@ specifies availability for the current target platform, the availability attributes are ignored. Supported platforms are: ``ios`` - Apple's iOS operating system. The minimum deployment target is specified by - the ``-mios-version-min=*version*`` or ``-miphoneos-version-min=*version*`` - command-line arguments. + Apple's iOS operating system. The minimum deployment target is specified + as part of the ``-target *arch*-apple-ios*version*`` command line argument. + Alternatively, it can be specified by the ``-mtargetos=ios*version*`` + command-line argument. ``macos`` - Apple's macOS operating system. The minimum deployment target is - specified by the ``-mmacosx-version-min=*version*`` command-line argument. - ``macosx`` is supported for backward-compatibility reasons, but it is - deprecated. + Apple's macOS operating system. The minimum deployment target is specified + as part of the ``-target *arch*-apple-macos*version*`` command line argument. + Alternatively, it can be specified by the ``-mtargetos=macos*version*`` + command-line argument. ``macosx`` is supported for + backward-compatibility reasons, but it is deprecated. ``tvos`` - Apple's tvOS operating system. The minimum deployment target is specified by - the ``-mtvos-version-min=*version*`` command-line argument. + Apple's tvOS operating system. The minimum deployment target is specified + as part of the ``-target *arch*-apple-tvos*version*`` command line argument. + Alternatively, it can be specified by the ``-mtargetos=tvos*version*`` + command-line argument. ``watchos`` - Apple's watchOS operating system. The minimum deployment target is specified by - the ``-mwatchos-version-min=*version*`` command-line argument. + Apple's watchOS operating system. The minimum deployment target is specified + as part of the ``-target *arch*-apple-watchos*version*`` command line argument. + Alternatively, it can be specified by the ``-mtargetos=watchos*version*`` + command-line argument. + +``visionos`` + Apple's visionOS operating system. The minimum deployment target is specified + as part of the ``-target *arch*-apple-visionos*version*`` command line argument. + Alternatively, it can be specified by the ``-mtargetos=visionos*version*`` + command-line argument. ``driverkit`` Apple's DriverKit userspace kernel extensions. The minimum deployment target - is specified as part of the triple. + is specified as part of the ``-target *arch*-apple-driverkit*version*`` + command line argument. A declaration can typically be used even when deploying back to a platform version prior to when the declaration was introduced. When this happens, the @@ -9213,7 +9226,7 @@ means that it can e.g no longer be part of an initializer expression. /* This may print something else than "6 * 7 = 42", if there is a non-weak definition of "ANSWER" in - an object linked in */ + an object linked in */ printf("6 * 7 = %d\n", ANSWER); return 0; diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 7a9a6c6186af2..408c9c0654e5d 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -1164,6 +1164,12 @@ def Unreachable : Builtin { let Prototype = "void()"; } +def AllowRuntimeCheck : Builtin { + let Spellings = ["__builtin_allow_runtime_check"]; + let Attributes = [NoThrow, Pure, Const]; + let Prototype = "bool(char const*)"; +} + def ShuffleVector : Builtin { let Spellings = ["__builtin_shufflevector"]; let Attributes = [NoThrow, Const, CustomTypeChecking]; diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def index 6c5691390137b..cb633774578c9 100644 --- a/clang/include/clang/Basic/BuiltinsAMDGPU.def +++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -61,6 +61,7 @@ BUILTIN(__builtin_amdgcn_s_waitcnt, "vIi", "n") BUILTIN(__builtin_amdgcn_s_sendmsg, "vIiUi", "n") BUILTIN(__builtin_amdgcn_s_sendmsghalt, "vIiUi", "n") BUILTIN(__builtin_amdgcn_s_barrier, "v", "n") +BUILTIN(__builtin_amdgcn_s_ttracedata, "vi", "n") BUILTIN(__builtin_amdgcn_wave_barrier, "v", "n") BUILTIN(__builtin_amdgcn_sched_barrier, "vIi", "n") BUILTIN(__builtin_amdgcn_sched_group_barrier, "vIiIiIi", "n") @@ -267,6 +268,7 @@ TARGET_BUILTIN(__builtin_amdgcn_dot4_f32_bf8_bf8, "fUiUif", "nc", "dot11-insts") TARGET_BUILTIN(__builtin_amdgcn_permlane16, "UiUiUiUiUiIbIb", "nc", "gfx10-insts") TARGET_BUILTIN(__builtin_amdgcn_permlanex16, "UiUiUiUiUiIbIb", "nc", "gfx10-insts") TARGET_BUILTIN(__builtin_amdgcn_mov_dpp8, "UiUiIUi", "nc", "gfx10-insts") +TARGET_BUILTIN(__builtin_amdgcn_s_ttracedata_imm, "vIs", "n", "gfx10-insts") //===----------------------------------------------------------------------===// // Raytracing builtins. diff --git a/clang/include/clang/Basic/Cuda.h b/clang/include/clang/Basic/Cuda.h index 3e77a74c7c009..ba0e4465a0f5a 100644 --- a/clang/include/clang/Basic/Cuda.h +++ b/clang/include/clang/Basic/Cuda.h @@ -53,10 +53,12 @@ CudaVersion CudaStringToVersion(const llvm::Twine &S); enum class CudaArch { UNUSED, UNKNOWN, + // TODO: Deprecate and remove GPU architectures older than sm_52. SM_20, SM_21, SM_30, - SM_32, + // This has a name conflict with sys/mac.h on AIX, rename it as a workaround. + SM_32_, SM_35, SM_37, SM_50, @@ -126,6 +128,14 @@ enum class CudaArch { HIPDefault = CudaArch::GFX906, }; +enum class CUDAFunctionTarget { + Device, + Global, + Host, + HostDevice, + InvalidTarget +}; + static inline bool IsNVIDIAGpuArch(CudaArch A) { return A >= CudaArch::SM_20 && A < CudaArch::GFX600; } diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td index 0172154200e6c..8dbdea0762da4 100644 --- a/clang/include/clang/Basic/DiagnosticGroups.td +++ b/clang/include/clang/Basic/DiagnosticGroups.td @@ -1436,9 +1436,6 @@ def MultiGPU: DiagGroup<"multi-gpu">; // libc and the CRT to be skipped. def AVRRtlibLinkingQuirks : DiagGroup<"avr-rtlib-linking-quirks">; -// A warning group related to AArch64 SME function attribues. -def AArch64SMEAttributes : DiagGroup<"aarch64-sme-attributes">; - // A warning group for things that will change semantics in the future. def FutureCompat : DiagGroup<"future-compat">; diff --git a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td index 0a477da7186b0..396bff0146a37 100644 --- a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td +++ b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td @@ -20,6 +20,10 @@ def warn_no_such_excluded_header_file : Warning<"no such excluded %select{public def warn_glob_did_not_match: Warning<"glob '%0' did not match any header file">, InGroup; def err_no_such_umbrella_header_file : Error<"%select{public|private|project}1 umbrella header file not found in input: '%0'">; def err_cannot_find_reexport : Error<"cannot find re-exported %select{framework|library}0: '%1'">; +def err_no_matching_target : Error<"no matching target found for target variant '%0'">; +def err_unsupported_vendor : Error<"vendor '%0' is not supported: '%1'">; +def err_unsupported_environment : Error<"environment '%0' is not supported: '%1'">; +def err_unsupported_os : Error<"os '%0' is not supported: '%1'">; } // end of command line category. let CategoryName = "Verification" in { diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index 46a44418a3153..66405095d51de 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -863,6 +863,8 @@ def err_empty_requires_expr : Error< "a requires expression must contain at least one requirement">; def err_requires_expr_parameter_list_ellipsis : Error< "varargs not allowed in requires expression">; +def err_requires_expr_explicit_object_parameter: Error< + "a requires expression cannot have an explicit object parameter">; def err_expected_semi_requirement : Error< "expected ';' at end of requirement">; def err_requires_expr_missing_arrow : Error< @@ -941,6 +943,12 @@ def warn_cxx98_compat_defaulted_deleted_function : Warning< "%select{defaulted|deleted}0 function definitions are incompatible with C++98">, InGroup, DefaultIgnore; +def ext_delete_with_message : ExtWarn< + "'= delete' with a message is a C++2c extension">, InGroup; +def warn_cxx23_delete_with_message : Warning< + "'= delete' with a message is incompatible with C++ standards before C++2c">, + DefaultIgnore, InGroup; + // C++11 default member initialization def ext_nonstatic_member_init : ExtWarn< "default member initializer for non-static data member is a C++11 " diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index 9c47ac6664531..04c6796eb6e7d 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -2467,10 +2467,6 @@ def err_selected_explicit_constructor : Error< def note_explicit_ctor_deduction_guide_here : Note< "explicit %select{constructor|deduction guide}0 declared here">; -// C++11 decltype -def err_decltype_in_declarator : Error< - "'decltype' cannot be used to name a declaration">; - // C++11 auto def warn_cxx98_compat_auto_type_specifier : Warning< "'auto' type specifier is incompatible with C++98">, @@ -3751,6 +3747,9 @@ def warn_attribute_dllexport_explicit_instantiation_decl : Warning< def warn_attribute_dllexport_explicit_instantiation_def : Warning< "'dllexport' attribute ignored on explicit instantiation definition">, InGroup; +def warn_attribute_exclude_from_explicit_instantiation_local_class : Warning< + "%0 attribute ignored on local class%select{| member}1">, + InGroup; def warn_invalid_initializer_from_system_header : Warning< "invalid constructor from class in system header, should not be explicit">, InGroup>; @@ -3835,16 +3834,6 @@ def err_sme_definition_using_za_in_non_sme_target : Error< "function using ZA state requires 'sme'">; def err_sme_definition_using_zt0_in_non_sme2_target : Error< "function using ZT0 state requires 'sme2'">; -def warn_sme_streaming_pass_return_vl_to_non_streaming : Warning< - "passing a VL-dependent argument to/from a function that has a different" - " streaming-mode. The streaming and non-streaming vector lengths may be" - " different">, - InGroup, DefaultIgnore; -def warn_sme_locally_streaming_has_vl_args_returns : Warning< - "passing/returning a VL-dependent argument to/from a __arm_locally_streaming" - " function. The streaming and non-streaming vector" - " lengths may be different">, - InGroup, DefaultIgnore; def err_conflicting_attributes_arm_state : Error< "conflicting attributes for state '%0'">; def err_sme_streaming_cannot_be_multiversioned : Error< @@ -4769,11 +4758,10 @@ def err_ovl_no_viable_member_function_in_call : Error< "no matching member function for call to %0">; def err_ovl_ambiguous_call : Error< "call to %0 is ambiguous">; -def err_ovl_deleted_call : Error<"call to deleted function %0">; +def err_ovl_deleted_call : Error<"call to deleted" + "%select{| member}0 function %1%select{|: %3}2">; def err_ovl_ambiguous_member_call : Error< "call to member function %0 is ambiguous">; -def err_ovl_deleted_member_call : Error< - "call to deleted member function %0">; def note_ovl_too_many_candidates : Note< "remaining %0 candidate%s0 omitted; " "pass -fshow-overloads=all to show them">; @@ -5001,12 +4989,12 @@ def err_ovl_ambiguous_conversion_in_cast : Error< "dynamic_cast|C-style cast|functional-style cast|}0 from %1 to %2">; def err_ovl_deleted_conversion_in_cast : Error< "%select{|static_cast|reinterpret_cast|dynamic_cast|C-style cast|" - "functional-style cast|}0 from %1 to %2 uses deleted function">; + "functional-style cast|}0 from %1 to %2 uses deleted function%select{|: %4}3">; def err_ovl_ambiguous_init : Error<"call to constructor of %0 is ambiguous">; def err_ref_init_ambiguous : Error< "reference initialization of type %0 with initializer of type %1 is ambiguous">; def err_ovl_deleted_init : Error< - "call to deleted constructor of %0">; + "call to deleted constructor of %0%select{|: %2}1">; def err_ovl_deleted_special_init : Error< "call to implicitly-deleted %select{default constructor|copy constructor|" "move constructor|copy assignment operator|move assignment operator|" @@ -5032,7 +5020,7 @@ def note_ovl_ambiguous_oper_binary_reversed_candidate : Note< def err_ovl_no_viable_oper : Error<"no viable overloaded '%0'">; def note_assign_lhs_incomplete : Note<"type %0 is incomplete">; def err_ovl_deleted_oper : Error< - "overload resolution selected deleted operator '%0'">; + "overload resolution selected deleted operator '%0'%select{|: %2}1">; def err_ovl_deleted_special_oper : Error< "object of type %0 cannot be %select{constructed|copied|moved|assigned|" "assigned|destroyed}1 because its %sub{select_special_member_kind}1 is " @@ -5069,7 +5057,7 @@ def err_ovl_ambiguous_object_call : Error< def err_ovl_ambiguous_subscript_call : Error< "call to subscript operator of type %0 is ambiguous">; def err_ovl_deleted_object_call : Error< - "call to deleted function call operator in type %0">; + "call to deleted function call operator in type %0%select{|: %2}1">; def note_ovl_surrogate_cand : Note<"conversion candidate of type %0">; def err_member_call_without_object : Error< "call to %select{non-static|explicit}0 member function without an object argument">; @@ -7685,8 +7673,8 @@ def ext_gnu_ptr_func_arith : Extension< InGroup; def err_readonly_message_assignment : Error< "assigning to 'readonly' return result of an Objective-C message not allowed">; -def ext_integer_increment_complex : Extension< - "ISO C does not support '++'/'--' on complex integer type %0">; +def ext_increment_complex : Extension< + "'%select{--|++}0' on an object of complex type is a Clang extension">; def ext_integer_complement_complex : Extension< "ISO C does not support '~' for complex conjugation of %0">; def err_nosetter_property_assignment : Error< @@ -8385,7 +8373,7 @@ def err_typecheck_nonviable_condition_incomplete : Error< "no viable conversion%diff{ from $ to incomplete type $|}0,1">; def err_typecheck_deleted_function : Error< "conversion function %diff{from $ to $|between types}0,1 " - "invokes a deleted function">; + "invokes a deleted function%select{|: %3}2">; def err_expected_class_or_namespace : Error<"%0 is not a class" "%select{ or namespace|, namespace, or enumeration}1">; @@ -8410,6 +8398,9 @@ def ext_template_after_declarative_nns : ExtWarn< def ext_alias_template_in_declarative_nns : ExtWarn< "a declarative nested name specifier cannot name an alias template">, InGroup>; +def err_computed_type_in_declarative_nns : Error< + "a %select{pack indexing|'decltype'}0 specifier cannot be used in " + "a declarative nested name specifier">; def err_no_typeid_with_fno_rtti : Error< "use of typeid requires -frtti">; @@ -8982,7 +8973,7 @@ def err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector : Error< "address argument to nontemporal builtin must be a pointer to integer, float, " "pointer, or a vector of such types (%0 invalid)">; -def err_deleted_function_use : Error<"attempt to use a deleted function">; +def err_deleted_function_use : Error<"attempt to use a deleted function%select{|: %1}0">; def err_deleted_inherited_ctor_use : Error< "constructor inherited by %0 from base class %1 is implicitly deleted">; @@ -12551,6 +12542,10 @@ def err_acc_construct_appertainment "be used in a statement context">; def err_acc_clause_appertainment : Error<"OpenACC '%1' clause is not valid on '%0' directive">; +def err_acc_duplicate_clause_disallowed + : Error<"OpenACC '%1' clause cannot appear more than once on a '%0' " + "directive">; +def note_acc_previous_clause_here : Note<"previous clause is here">; def err_acc_branch_in_out_compute_construct : Error<"invalid %select{branch|return|throw}0 %select{out of|into}1 " "OpenACC Compute Construct">; @@ -12558,4 +12553,8 @@ def note_acc_branch_into_compute_construct : Note<"invalid branch into OpenACC Compute Construct">; def note_acc_branch_out_of_compute_construct : Note<"invalid branch out of OpenACC Compute Construct">; +def warn_acc_if_self_conflict + : Warning<"OpenACC construct 'self' has no effect when an 'if' clause " + "evaluates to true">, + InGroup>; } // end of sema component. diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h index a091639bfa254..a893e6f4d3d39 100644 --- a/clang/include/clang/Basic/IdentifierTable.h +++ b/clang/include/clang/Basic/IdentifierTable.h @@ -913,12 +913,13 @@ class alignas(IdentifierInfoAlignment) MultiKeywordSelector public: // Constructor for keyword selectors. - MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV) + MultiKeywordSelector(unsigned nKeys, const IdentifierInfo **IIV) : DeclarationNameExtra(nKeys) { assert((nKeys > 1) && "not a multi-keyword selector"); // Fill in the trailing keyword array. - IdentifierInfo **KeyInfo = reinterpret_cast(this + 1); + const IdentifierInfo **KeyInfo = + reinterpret_cast(this + 1); for (unsigned i = 0; i != nKeys; ++i) KeyInfo[i] = IIV[i]; } @@ -928,7 +929,7 @@ class alignas(IdentifierInfoAlignment) MultiKeywordSelector using DeclarationNameExtra::getNumArgs; - using keyword_iterator = IdentifierInfo *const *; + using keyword_iterator = const IdentifierInfo *const *; keyword_iterator keyword_begin() const { return reinterpret_cast(this + 1); @@ -938,7 +939,7 @@ class alignas(IdentifierInfoAlignment) MultiKeywordSelector return keyword_begin() + getNumArgs(); } - IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const { + const IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const { assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index"); return keyword_begin()[i]; } @@ -991,10 +992,10 @@ class Selector { /// Do not reorder or add any arguments to this template /// without thoroughly understanding how tightly coupled these classes are. llvm::PointerIntPair< - llvm::PointerUnion, 2> + llvm::PointerUnion, 2> InfoPtr; - Selector(IdentifierInfo *II, unsigned nArgs) { + Selector(const IdentifierInfo *II, unsigned nArgs) { assert(nArgs < 2 && "nArgs not equal to 0/1"); InfoPtr.setPointerAndInt(II, nArgs + 1); } @@ -1006,8 +1007,8 @@ class Selector { InfoPtr.setPointerAndInt(SI, MultiArg & 0b11); } - IdentifierInfo *getAsIdentifierInfo() const { - return InfoPtr.getPointer().dyn_cast(); + const IdentifierInfo *getAsIdentifierInfo() const { + return InfoPtr.getPointer().dyn_cast(); } MultiKeywordSelector *getMultiKeywordSelector() const { @@ -1075,7 +1076,7 @@ class Selector { /// /// \returns the uniqued identifier for this slot, or NULL if this slot has /// no corresponding identifier. - IdentifierInfo *getIdentifierInfoForSlot(unsigned argIndex) const; + const IdentifierInfo *getIdentifierInfoForSlot(unsigned argIndex) const; /// Retrieve the name at a given position in the selector. /// @@ -1132,13 +1133,13 @@ class SelectorTable { /// /// \p NumArgs indicates whether this is a no argument selector "foo", a /// single argument selector "foo:" or multi-argument "foo:bar:". - Selector getSelector(unsigned NumArgs, IdentifierInfo **IIV); + Selector getSelector(unsigned NumArgs, const IdentifierInfo **IIV); - Selector getUnarySelector(IdentifierInfo *ID) { + Selector getUnarySelector(const IdentifierInfo *ID) { return Selector(ID, 1); } - Selector getNullarySelector(IdentifierInfo *ID) { + Selector getNullarySelector(const IdentifierInfo *ID) { return Selector(ID, 0); } diff --git a/clang/include/clang/Basic/OpenACCClauses.def b/clang/include/clang/Basic/OpenACCClauses.def new file mode 100644 index 0000000000000..378495d2c0909 --- /dev/null +++ b/clang/include/clang/Basic/OpenACCClauses.def @@ -0,0 +1,22 @@ +//===-- OpenACCClauses.def - List of implemented OpenACC Clauses -- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines a list of currently implemented OpenACC Clauses (and +// eventually, the entire list) in a way that makes generating 'visitor' and +// other lists easier. +// +// The primary macro is a single-argument version taking the name of the Clause +// as used in Clang source (so `Default` instead of `default`). +// +// VISIT_CLAUSE(CLAUSE_NAME) + +VISIT_CLAUSE(Default) +VISIT_CLAUSE(If) +VISIT_CLAUSE(Self) + +#undef VISIT_CLAUSE diff --git a/clang/include/clang/Basic/OpenACCKinds.h b/clang/include/clang/Basic/OpenACCKinds.h index 95fc35a5bedb7..e3f7417843328 100644 --- a/clang/include/clang/Basic/OpenACCKinds.h +++ b/clang/include/clang/Basic/OpenACCKinds.h @@ -146,6 +146,12 @@ inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out, return printOpenACCDirectiveKind(Out, K); } +inline bool isOpenACCComputeDirectiveKind(OpenACCDirectiveKind K) { + return K == OpenACCDirectiveKind::Parallel || + K == OpenACCDirectiveKind::Serial || + K == OpenACCDirectiveKind::Kernels; +} + enum class OpenACCAtomicKind { Read, Write, @@ -419,6 +425,30 @@ enum class OpenACCDefaultClauseKind { Invalid, }; +template +inline StreamTy &printOpenACCDefaultClauseKind(StreamTy &Out, + OpenACCDefaultClauseKind K) { + switch (K) { + case OpenACCDefaultClauseKind::None: + return Out << "none"; + case OpenACCDefaultClauseKind::Present: + return Out << "present"; + case OpenACCDefaultClauseKind::Invalid: + return Out << ""; + } + llvm_unreachable("Unknown OpenACCDefaultClauseKind enum"); +} + +inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out, + OpenACCDefaultClauseKind K) { + return printOpenACCDefaultClauseKind(Out, K); +} + +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out, + OpenACCDefaultClauseKind K) { + return printOpenACCDefaultClauseKind(Out, K); +} + enum class OpenACCReductionOperator { /// '+'. Addition, diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def index 81236f0185e85..d93c6d965b1f1 100644 --- a/clang/include/clang/Basic/TokenKinds.def +++ b/clang/include/clang/Basic/TokenKinds.def @@ -522,6 +522,7 @@ TYPE_TRAIT_1(__is_union, IsUnion, KEYCXX) TYPE_TRAIT_1(__has_unique_object_representations, HasUniqueObjectRepresentations, KEYCXX) TYPE_TRAIT_2(__is_layout_compatible, IsLayoutCompatible, KEYCXX) +TYPE_TRAIT_2(__is_pointer_interconvertible_base_of, IsPointerInterconvertibleBaseOf, KEYCXX) #define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) KEYWORD(__##Trait, KEYCXX) #include "clang/Basic/TransformTypeTraits.def" diff --git a/clang/include/clang/Basic/arm_fp16.td b/clang/include/clang/Basic/arm_fp16.td index cb2a09303e8e1..d36b4617bef5d 100644 --- a/clang/include/clang/Basic/arm_fp16.td +++ b/clang/include/clang/Basic/arm_fp16.td @@ -14,7 +14,7 @@ include "arm_neon_incl.td" // ARMv8.2-A FP16 intrinsics. -let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fullfp16" in { // Negate def VNEGSH : SInst<"vneg", "11", "Sh">; diff --git a/clang/include/clang/Basic/arm_neon.td b/clang/include/clang/Basic/arm_neon.td index 7edac5afafaa9..6d655c39360d3 100644 --- a/clang/include/clang/Basic/arm_neon.td +++ b/clang/include/clang/Basic/arm_neon.td @@ -605,11 +605,11 @@ def VQDMULL_LANE : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>; def VQDMULH_N : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>; def VQRDMULH_N : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>; -let ArchGuard = "!defined(__aarch64__)" in { +let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)" in { def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>; def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>; } -let ArchGuard = "defined(__aarch64__)" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in { def A64_VQDMULH_LANE : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">; def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">; } @@ -686,7 +686,7 @@ multiclass REINTERPRET_CROSS_TYPES { // E.3.31 Vector reinterpret cast operations def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> { - let ArchGuard = "!defined(__aarch64__)"; + let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)"; let BigEndianSafe = 1; } @@ -714,7 +714,7 @@ def VADDP : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">; //////////////////////////////////////////////////////////////////////////////// // AArch64 Intrinsics -let ArchGuard = "defined(__aarch64__)" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in { //////////////////////////////////////////////////////////////////////////////// // Load/Store @@ -1091,14 +1091,14 @@ let isLaneQ = 1 in { def VQDMULH_LANEQ : SInst<"vqdmulh_laneq", "..QI", "siQsQi">; def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a" in { def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> { let isLaneQ = 1; } def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> { let isLaneQ = 1; } -} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" +} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a" // Note: d type implemented by SCALAR_VMULX_LANE def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>; @@ -1143,7 +1143,7 @@ def SHA256H2 : SInst<"vsha256h2", "....", "QUi">; def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3" in { def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">; def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">; def RAX1 : SInst<"vrax1", "...", "QUl">; @@ -1153,14 +1153,14 @@ def XAR : SInst<"vxar", "...I", "QUl">; } } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3" in { def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">; def SHA512su1 : SInst<"vsha512su1", "....", "QUl">; def SHA512H : SInst<"vsha512h", "....", "QUl">; def SHA512H2 : SInst<"vsha512h2", "....", "QUl">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4" in { def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">; def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">; def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">; @@ -1170,7 +1170,7 @@ def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">; def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4" in { def SM4E : SInst<"vsm4e", "...", "QUi">; def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">; } @@ -1193,7 +1193,7 @@ def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">; def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">; } -let ArchGuard = "defined(__aarch64__)" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in { def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">; def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">; def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">; @@ -1217,7 +1217,7 @@ def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">; def FRINTI_S32 : SInst<"vrndi", "..", "fQf">; } -let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { +let ArchGuard = "(defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { def FRINTN_S64 : SInst<"vrndn", "..", "dQd">; def FRINTA_S64 : SInst<"vrnda", "..", "dQd">; def FRINTP_S64 : SInst<"vrndp", "..", "dQd">; @@ -1227,7 +1227,7 @@ def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">; def FRINTI_S64 : SInst<"vrndi", "..", "dQd">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.5a" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.5a" in { def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">; def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">; def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">; @@ -1247,7 +1247,7 @@ def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">; def FMINNM_S32 : SInst<"vminnm", "...", "fQf">; } -let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { +let ArchGuard = "(defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">; def FMINNM_S64 : SInst<"vminnm", "...", "dQd">; } @@ -1289,7 +1289,7 @@ def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">; // itself during generation so, unlike all other intrinsics, this one should // include *all* types, not just additional ones. def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> { - let ArchGuard = "defined(__aarch64__)"; + let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)"; let BigEndianSafe = 1; } @@ -1401,7 +1401,7 @@ def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">; // Scalar Integer Saturating Rounding Doubling Multiply Half High def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">; -let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a" in { //////////////////////////////////////////////////////////////////////////////// // Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">; @@ -1409,7 +1409,7 @@ def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">; //////////////////////////////////////////////////////////////////////////////// // Signed Saturating Rounding Doubling Multiply Subtract Returning High Half def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">; -} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" +} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a" //////////////////////////////////////////////////////////////////////////////// // Scalar Floating-point Multiply Extended @@ -1651,7 +1651,7 @@ def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcS let isLaneQ = 1; } -} // ArchGuard = "defined(__aarch64__)" +} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" // ARMv8.2-A FP16 vector intrinsics for A32/A64. let TargetGuard = "fullfp16" in { @@ -1775,7 +1775,7 @@ def VEXTH : WInst<"vext", "...I", "hQh">; def VREV64H : WOpInst<"vrev64", "..", "hQh", OP_REV64>; // ARMv8.2-A FP16 vector intrinsics for A64 only. -let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fullfp16" in { // Vector rounding def FRINTIH : SInst<"vrndi", "..", "hQh">; @@ -1856,7 +1856,7 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in { def FMINNMVH : SInst<"vminnmv", "1.", "hQh">; } -let ArchGuard = "defined(__aarch64__)" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in { // Permutation def VTRN1H : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>; def VZIP1H : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>; @@ -1876,7 +1876,7 @@ let TargetGuard = "dotprod" in { def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">; def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "dotprod" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "dotprod" in { // Variants indexing into a 128-bit vector are A64 only. def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(< { let isLaneQ = 1; @@ -1884,7 +1884,7 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "dotprod" in { } // v8.2-A FP16 fused multiply-add long instructions. -let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp16fml" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fp16fml" in { def VFMLAL_LOW : SInst<"vfmlal_low", ">>..", "hQh">; def VFMLSL_LOW : SInst<"vfmlsl_low", ">>..", "hQh">; def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">; @@ -1918,7 +1918,7 @@ let TargetGuard = "i8mm" in { def VUSDOT_LANE : SOpInst<"vusdot_lane", "..(<; def VSUDOT_LANE : SOpInst<"vsudot_lane", "..(<<)(<; - let ArchGuard = "defined(__aarch64__)" in { + let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in { let isLaneQ = 1 in { def VUSDOT_LANEQ : SOpInst<"vusdot_laneq", "..(<; def VSUDOT_LANEQ : SOpInst<"vsudot_laneq", "..(<<)(<; @@ -1986,7 +1986,7 @@ let TargetGuard = "v8.3a" in { defm VCMLA_F32 : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.3a" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.3a" in { def VCADDQ_ROT90_FP64 : SInst<"vcaddq_rot90", "QQQ", "d">; def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">; @@ -2058,14 +2058,14 @@ let TargetGuard = "bf16" in { def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>; } -let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in { +let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16" in { def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">; def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>; def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>; def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>; } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16" in { def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">; def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>; def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">; @@ -2077,14 +2077,14 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in { def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>; } -let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in { +let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16" in { let BigEndianSafe = 1 in { defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES< "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">; } } -let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16" in { let BigEndianSafe = 1 in { defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES< "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">; @@ -2092,7 +2092,7 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in { } // v8.9a/v9.4a LRCPC3 intrinsics -let ArchGuard = "defined(__aarch64__)", TargetGuard = "rcpc3" in { +let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "rcpc3" in { def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">; def VSTL1_LANE : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">; } diff --git a/clang/include/clang/CIR/CMakeLists.txt b/clang/include/clang/CIR/CMakeLists.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/include/clang/CMakeLists.txt b/clang/include/clang/CMakeLists.txt index 0dc9ea5ed8ac8..47ac70cd21690 100644 --- a/clang/include/clang/CMakeLists.txt +++ b/clang/include/clang/CMakeLists.txt @@ -1,5 +1,8 @@ add_subdirectory(AST) add_subdirectory(Basic) +if(CLANG_ENABLE_CIR) + add_subdirectory(CIR) +endif() add_subdirectory(Driver) add_subdirectory(Parse) add_subdirectory(Sema) diff --git a/clang/include/clang/CodeGen/CodeGenAction.h b/clang/include/clang/CodeGen/CodeGenAction.h index 7ad2988e589eb..186dbb43f01ef 100644 --- a/clang/include/clang/CodeGen/CodeGenAction.h +++ b/clang/include/clang/CodeGen/CodeGenAction.h @@ -57,6 +57,8 @@ class CodeGenAction : public ASTFrontendAction { bool loadLinkModules(CompilerInstance &CI); protected: + bool BeginSourceFileAction(CompilerInstance &CI) override; + /// Create a new code generation action. If the optional \p _VMContext /// parameter is supplied, the action uses it without taking ownership, /// otherwise it creates a fresh LLVM context and takes ownership. diff --git a/clang/include/clang/Config/config.h.cmake b/clang/include/clang/Config/config.h.cmake index 4015ac8040861..27ed69e21562b 100644 --- a/clang/include/clang/Config/config.h.cmake +++ b/clang/include/clang/Config/config.h.cmake @@ -83,4 +83,7 @@ /* Spawn a new process clang.exe for the CC1 tool invocation, when necessary */ #cmakedefine01 CLANG_SPAWN_CC1 +/* Whether CIR is built into Clang */ +#cmakedefine01 CLANG_ENABLE_CIR + #endif diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index b396f6cc88a51..637d4941f2e3a 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1412,7 +1412,8 @@ def hip_link : Flag<["--"], "hip-link">, Group, HelpText<"Link clang-offload-bundler bundles for HIP">; def no_hip_rt: Flag<["-"], "no-hip-rt">, Group, HelpText<"Do not link against HIP runtime libraries">; -def rocm_path_EQ : Joined<["--"], "rocm-path=">, Group, +def rocm_path_EQ : Joined<["--"], "rocm-path=">, + Visibility<[FlangOption]>, Group, HelpText<"ROCm installation path, used for finding and automatically linking required bitcode libraries.">; def hip_path_EQ : Joined<["--"], "hip-path=">, Group, HelpText<"HIP runtime installation path, used for finding HIP version and adding HIP include path.">; @@ -1520,7 +1521,7 @@ def dD : Flag<["-"], "dD">, Group, Visibility<[ClangOption, CC1Option]> def dI : Flag<["-"], "dI">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Print include directives in -E mode in addition to normal output">, MarshallingInfoFlag>; -def dM : Flag<["-"], "dM">, Group, Visibility<[ClangOption, CC1Option]>, +def dM : Flag<["-"], "dM">, Group, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>, HelpText<"Print macro definitions in -E mode instead of normal output">; def dead__strip : Flag<["-"], "dead_strip">; def dependency_file : Separate<["-"], "dependency-file">, @@ -3114,6 +3115,7 @@ defm prebuilt_implicit_modules : BoolFOption<"prebuilt-implicit-modules", def fmodule_output_EQ : Joined<["-"], "fmodule-output=">, Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option]>, + MarshallingInfoString>, HelpText<"Save intermediate module file results when compiling a standard C++ module unit.">; def fmodule_output : Flag<["-"], "fmodule-output">, Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option]>, @@ -3127,6 +3129,11 @@ defm skip_odr_check_in_gmf : BoolOption<"f", "skip-odr-check-in-gmf", "Perform ODR checks for decls in the global module fragment.">>, Group; +def modules_reduced_bmi : Flag<["-"], "fexperimental-modules-reduced-bmi">, + Group, Visibility<[ClangOption, CC1Option]>, + HelpText<"Generate the reduced BMI">, + MarshallingInfoFlag>; + def fmodules_prune_interval : Joined<["-"], "fmodules-prune-interval=">, Group, Visibility<[ClangOption, CC1Option]>, MetaVarName<"">, HelpText<"Specify the interval (in seconds) between attempts to prune the module cache">, @@ -5205,6 +5212,9 @@ defm tgsplit : SimpleMFlag<"tgsplit", "Enable", "Disable", defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64", "Specify wavefront size 64", "Specify wavefront size 32", " mode (AMDGPU only)">; +defm amdgpu_precise_memory_op + : SimpleMFlag<"amdgpu-precise-memory-op", "Enable", "Disable", + " precise memory mode (AMDGPU only)">; defm unsafe_fp_atomics : BoolMOption<"unsafe-fp-atomics", TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse, @@ -5756,21 +5766,23 @@ def rdynamic : Flag<["-"], "rdynamic">, Group, Visibility<[ClangOption, FlangOption]>; def resource_dir : Separate<["-"], "resource-dir">, Flags<[NoXarchOption, HelpHidden]>, - Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>, + Visibility<[ClangOption, CC1Option, CLOption, DXCOption, FlangOption, FC1Option]>, HelpText<"The directory which holds the compiler resource files">, MarshallingInfoString>; def resource_dir_EQ : Joined<["-"], "resource-dir=">, Flags<[NoXarchOption]>, - Visibility<[ClangOption, CLOption, DXCOption]>, + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, Alias; def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def rtlib_EQ : Joined<["-", "--"], "rtlib=">, Visibility<[ClangOption, CLOption]>, HelpText<"Compiler runtime library to use">; def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>, + Visibility<[ClangOption, FlangOption]>, HelpText<"Add -rpath with architecture-specific resource directory to the linker flags. " "When --hip-link is specified, also add -rpath with HIP runtime library directory to the linker flags">; def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>, + Visibility<[ClangOption, FlangOption]>, HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags. " "When --hip-link is specified, do not add -rpath with HIP runtime library directory to the linker flags">; def offload_add_rpath: Flag<["--"], "offload-add-rpath">, diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h index 5ee4d471670f4..a738c1f375768 100644 --- a/clang/include/clang/Frontend/FrontendOptions.h +++ b/clang/include/clang/Frontend/FrontendOptions.h @@ -404,6 +404,10 @@ class FrontendOptions { LLVM_PREFERRED_TYPE(bool) unsigned EmitPrettySymbolGraphs : 1; + /// Whether to generate reduced BMI for C++20 named modules. + LLVM_PREFERRED_TYPE(bool) + unsigned GenReducedBMI : 1; + CodeCompleteOptions CodeCompleteOpts; /// Specifies the output format of the AST. @@ -568,6 +572,9 @@ class FrontendOptions { /// Path which stores the output files for -ftime-trace std::string TimeTracePath; + /// Output Path for module output file. + std::string ModuleOutputPath; + public: FrontendOptions() : DisableFree(false), RelocatablePCH(false), ShowHelp(false), @@ -582,7 +589,8 @@ class FrontendOptions { AllowPCMWithCompilerErrors(false), ModulesShareFileManager(true), EmitSymbolGraph(false), EmitExtensionSymbolGraphs(false), EmitSymbolGraphSymbolLabelsForTesting(false), - EmitPrettySymbolGraphs(false), TimeTraceGranularity(500) {} + EmitPrettySymbolGraphs(false), GenReducedBMI(false), + TimeTraceGranularity(500) {} /// getInputKindForExtension - Return the appropriate input kind for a file /// extension. For example, "c" would return Language::C. diff --git a/clang/include/clang/InstallAPI/DylibVerifier.h b/clang/include/clang/InstallAPI/DylibVerifier.h index a3df25f10de4b..31de212fc423a 100644 --- a/clang/include/clang/InstallAPI/DylibVerifier.h +++ b/clang/include/clang/InstallAPI/DylibVerifier.h @@ -28,6 +28,16 @@ enum class VerificationMode { using LibAttrs = llvm::StringMap; using ReexportedInterfaces = llvm::SmallVector; +// Pointers to information about a zippered declaration used for +// querying and reporting violations against different +// declarations that all map to the same symbol. +struct ZipperedDeclSource { + const FrontendAttrs *FA; + clang::SourceManager *SrcMgr; + Target T; +}; +using ZipperedDeclSources = std::vector; + /// Service responsible to tracking state of verification across the /// lifetime of InstallAPI. /// As declarations are collected during AST traversal, they are @@ -68,10 +78,10 @@ class DylibVerifier : llvm::MachO::RecordVisitor { DylibVerifier() = default; DylibVerifier(llvm::MachO::Records &&Dylib, ReexportedInterfaces &&Reexports, - DiagnosticsEngine *Diag, VerificationMode Mode, bool Demangle, - StringRef DSYMPath) + DiagnosticsEngine *Diag, VerificationMode Mode, bool Zippered, + bool Demangle, StringRef DSYMPath) : Dylib(std::move(Dylib)), Reexports(std::move(Reexports)), Mode(Mode), - Demangle(Demangle), DSYMPath(DSYMPath), + Zippered(Zippered), Demangle(Demangle), DSYMPath(DSYMPath), Exports(std::make_unique()), Ctx(VerifierContext{Diag}) {} Result verify(GlobalRecord *R, const FrontendAttrs *FA); @@ -118,6 +128,15 @@ class DylibVerifier : llvm::MachO::RecordVisitor { /// symbols should be omitted from the text-api file. bool shouldIgnoreReexport(const Record *R, SymbolContext &SymCtx) const; + // Ignore and omit unavailable symbols in zippered libraries. + bool shouldIgnoreZipperedAvailability(const Record *R, SymbolContext &SymCtx); + + // Check if an internal declaration in zippered library has an + // external declaration for a different platform. This results + // in the symbol being in a "seperate" platform slice. + bool shouldIgnoreInternalZipperedSymbol(const Record *R, + const SymbolContext &SymCtx) const; + /// Compare the visibility declarations to the linkage of symbol found in /// dylib. Result compareVisibility(const Record *R, SymbolContext &SymCtx, @@ -173,6 +192,9 @@ class DylibVerifier : llvm::MachO::RecordVisitor { // Controls what class of violations to report. VerificationMode Mode = VerificationMode::Invalid; + // Library is zippered. + bool Zippered = false; + // Attempt to demangle when reporting violations. bool Demangle = false; @@ -182,6 +204,10 @@ class DylibVerifier : llvm::MachO::RecordVisitor { // Valid symbols in final text file. std::unique_ptr Exports = std::make_unique(); + // Unavailable or obsoleted declarations for a zippered library. + // These are cross referenced against symbols in the dylib. + llvm::StringMap DeferredZipperedSymbols; + // Track current state of verification while traversing AST. VerifierContext Ctx; diff --git a/clang/include/clang/Lex/ExternalPreprocessorSource.h b/clang/include/clang/Lex/ExternalPreprocessorSource.h index 685941b66bd8b..6775841860373 100644 --- a/clang/include/clang/Lex/ExternalPreprocessorSource.h +++ b/clang/include/clang/Lex/ExternalPreprocessorSource.h @@ -31,7 +31,7 @@ class ExternalPreprocessorSource { virtual void ReadDefinedMacros() = 0; /// Update an out-of-date identifier. - virtual void updateOutOfDateIdentifier(IdentifierInfo &II) = 0; + virtual void updateOutOfDateIdentifier(const IdentifierInfo &II) = 0; /// Return the identifier associated with the given ID number. /// diff --git a/clang/include/clang/Lex/HeaderSearch.h b/clang/include/clang/Lex/HeaderSearch.h index 855f81f775f8a..c5f90ef4cb368 100644 --- a/clang/include/clang/Lex/HeaderSearch.h +++ b/clang/include/clang/Lex/HeaderSearch.h @@ -547,14 +547,15 @@ class HeaderSearch { /// Return whether the specified file is a normal header, /// a system header, or a C++ friendly system header. SrcMgr::CharacteristicKind getFileDirFlavor(FileEntryRef File) { - return (SrcMgr::CharacteristicKind)getFileInfo(File).DirInfo; + if (const HeaderFileInfo *HFI = getExistingFileInfo(File)) + return (SrcMgr::CharacteristicKind)HFI->DirInfo; + return (SrcMgr::CharacteristicKind)HeaderFileInfo().DirInfo; } /// Mark the specified file as a "once only" file due to /// \#pragma once. void MarkFileIncludeOnce(FileEntryRef File) { - HeaderFileInfo &FI = getFileInfo(File); - FI.isPragmaOnce = true; + getFileInfo(File).isPragmaOnce = true; } /// Mark the specified file as a system header, e.g. due to @@ -834,16 +835,17 @@ class HeaderSearch { unsigned header_file_size() const { return FileInfo.size(); } - /// Return the HeaderFileInfo structure for the specified FileEntry, - /// in preparation for updating it in some way. + /// Return the HeaderFileInfo structure for the specified FileEntry, in + /// preparation for updating it in some way. HeaderFileInfo &getFileInfo(FileEntryRef FE); - /// Return the HeaderFileInfo structure for the specified FileEntry, - /// if it has ever been filled in. - /// \param WantExternal Whether the caller wants purely-external header file - /// info (where \p External is true). - const HeaderFileInfo *getExistingFileInfo(FileEntryRef FE, - bool WantExternal = true) const; + /// Return the HeaderFileInfo structure for the specified FileEntry, if it has + /// ever been filled in (either locally or externally). + const HeaderFileInfo *getExistingFileInfo(FileEntryRef FE) const; + + /// Return the headerFileInfo structure for the specified FileEntry, if it has + /// ever been filled in locally. + const HeaderFileInfo *getExistingLocalFileInfo(FileEntryRef FE) const; SearchDirIterator search_dir_begin() { return {*this, 0}; } SearchDirIterator search_dir_end() { return {*this, SearchDirs.size()}; } diff --git a/clang/include/clang/Lex/MacroInfo.h b/clang/include/clang/Lex/MacroInfo.h index 1237fc62eb6cf..19a706216d509 100644 --- a/clang/include/clang/Lex/MacroInfo.h +++ b/clang/include/clang/Lex/MacroInfo.h @@ -515,7 +515,7 @@ class ModuleMacro : public llvm::FoldingSetNode { friend class Preprocessor; /// The name defined by the macro. - IdentifierInfo *II; + const IdentifierInfo *II; /// The body of the #define, or nullptr if this is a #undef. MacroInfo *Macro; @@ -529,7 +529,7 @@ class ModuleMacro : public llvm::FoldingSetNode { /// The number of modules whose macros are directly overridden by this one. unsigned NumOverrides; - ModuleMacro(Module *OwningModule, IdentifierInfo *II, MacroInfo *Macro, + ModuleMacro(Module *OwningModule, const IdentifierInfo *II, MacroInfo *Macro, ArrayRef Overrides) : II(II), Macro(Macro), OwningModule(OwningModule), NumOverrides(Overrides.size()) { @@ -539,7 +539,7 @@ class ModuleMacro : public llvm::FoldingSetNode { public: static ModuleMacro *create(Preprocessor &PP, Module *OwningModule, - IdentifierInfo *II, MacroInfo *Macro, + const IdentifierInfo *II, MacroInfo *Macro, ArrayRef Overrides); void Profile(llvm::FoldingSetNodeID &ID) const { @@ -553,7 +553,7 @@ class ModuleMacro : public llvm::FoldingSetNode { } /// Get the name of the macro. - IdentifierInfo *getName() const { return II; } + const IdentifierInfo *getName() const { return II; } /// Get the ID of the module that exports this macro. Module *getOwningModule() const { return OwningModule; } diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h index 0836b7d439bb0..e89b4a2c5230e 100644 --- a/clang/include/clang/Lex/Preprocessor.h +++ b/clang/include/clang/Lex/Preprocessor.h @@ -836,7 +836,7 @@ class Preprocessor { ModuleMacroInfo *getModuleInfo(Preprocessor &PP, const IdentifierInfo *II) const { if (II->isOutOfDate()) - PP.updateOutOfDateIdentifier(const_cast(*II)); + PP.updateOutOfDateIdentifier(*II); // FIXME: Find a spare bit on IdentifierInfo and store a // HasModuleMacros flag. if (!II->hasMacroDefinition() || @@ -1162,7 +1162,7 @@ class Preprocessor { /// skipped. llvm::DenseMap RecordedSkippedRanges; - void updateOutOfDateIdentifier(IdentifierInfo &II) const; + void updateOutOfDateIdentifier(const IdentifierInfo &II) const; public: Preprocessor(std::shared_ptr PPOpts, @@ -1432,14 +1432,15 @@ class Preprocessor { MacroDirective *MD); /// Register an exported macro for a module and identifier. - ModuleMacro *addModuleMacro(Module *Mod, IdentifierInfo *II, MacroInfo *Macro, + ModuleMacro *addModuleMacro(Module *Mod, const IdentifierInfo *II, + MacroInfo *Macro, ArrayRef Overrides, bool &IsNew); ModuleMacro *getModuleMacro(Module *Mod, const IdentifierInfo *II); /// Get the list of leaf (non-overridden) module macros for a name. ArrayRef getLeafModuleMacros(const IdentifierInfo *II) const { if (II->isOutOfDate()) - updateOutOfDateIdentifier(const_cast(*II)); + updateOutOfDateIdentifier(*II); auto I = LeafModuleMacros.find(II); if (I != LeafModuleMacros.end()) return I->second; diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 6237e430502bf..38ff4357d9715 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -18,6 +18,7 @@ #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Sema.h" +#include "clang/Sema/SemaOpenMP.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/SaveAndRestore.h" @@ -329,7 +330,7 @@ class Parser : public CodeCompletionHandler { }; /// Identifiers which have been declared within a tentative parse. - SmallVector TentativelyDeclaredIdentifiers; + SmallVector TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. @@ -1601,6 +1602,8 @@ class Parser : public CodeCompletionHandler { const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); + StringLiteral *ParseCXXDeletedFunctionMessage(); + void SkipDeletedFunctionBody(); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, @@ -1928,15 +1931,11 @@ class Parser : public CodeCompletionHandler { bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); - bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, - ParsedType ObjectType, - bool ObjectHasErrors, - bool EnteringContext, - bool *MayBePseudoDestructor = nullptr, - bool IsTypename = false, - IdentifierInfo **LastII = nullptr, - bool OnlyNamespace = false, - bool InUsingDeclaration = false); + bool ParseOptionalCXXScopeSpecifier( + CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, + bool EnteringContext, bool *MayBePseudoDestructor = nullptr, + bool IsTypename = false, const IdentifierInfo **LastII = nullptr, + bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions @@ -2540,7 +2539,7 @@ class Parser : public CodeCompletionHandler { /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) - Actions.startOpenMPLoop(); + Actions.OpenMP().startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); @@ -3399,7 +3398,7 @@ class Parser : public CodeCompletionHandler { SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. - void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); + void ParseOMPDeclareTargetClauses(SemaOpenMP::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, @@ -3489,7 +3488,7 @@ class Parser : public CodeCompletionHandler { /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; - bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, + bool ParseOpenMPIndirectClause(SemaOpenMP::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. @@ -3559,12 +3558,12 @@ class Parser : public CodeCompletionHandler { /// Parses a reserved locator like 'omp_all_memory'. bool ParseOpenMPReservedLocator(OpenMPClauseKind Kind, - Sema::OpenMPVarListDataTy &Data, + SemaOpenMP::OpenMPVarListDataTy &Data, const LangOptions &LangOpts); /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl &Vars, - Sema::OpenMPVarListDataTy &Data); + SemaOpenMP::OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, @@ -3572,11 +3571,11 @@ class Parser : public CodeCompletionHandler { SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. - bool parseMapperModifier(Sema::OpenMPVarListDataTy &Data); + bool parseMapperModifier(SemaOpenMP::OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) - bool parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data); + bool parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data); //===--------------------------------------------------------------------===// // OpenACC Parsing. @@ -3662,6 +3661,8 @@ class Parser : public CodeCompletionHandler { bool ParseOpenACCGangArgList(); /// Parses a 'gang-arg', used for the 'gang' clause. bool ParseOpenACCGangArg(); + /// Parses a 'condition' expr, ensuring it results in a + ExprResult ParseOpenACCConditionExpr(); private: //===--------------------------------------------------------------------===// diff --git a/clang/include/clang/Sema/CodeCompleteConsumer.h b/clang/include/clang/Sema/CodeCompleteConsumer.h index a2028e40f83d5..0924dc27af82b 100644 --- a/clang/include/clang/Sema/CodeCompleteConsumer.h +++ b/clang/include/clang/Sema/CodeCompleteConsumer.h @@ -362,7 +362,7 @@ class CodeCompletionContext { QualType BaseType; /// The identifiers for Objective-C selector parts. - ArrayRef SelIdents; + ArrayRef SelIdents; /// The scope specifier that comes before the completion token e.g. /// "a::b::" @@ -378,8 +378,9 @@ class CodeCompletionContext { : CCKind(CCKind), IsUsingDeclaration(false), SelIdents(std::nullopt) {} /// Construct a new code-completion context of the given kind. - CodeCompletionContext(Kind CCKind, QualType T, - ArrayRef SelIdents = std::nullopt) + CodeCompletionContext( + Kind CCKind, QualType T, + ArrayRef SelIdents = std::nullopt) : CCKind(CCKind), IsUsingDeclaration(false), SelIdents(SelIdents) { if (CCKind == CCC_DotMemberAccess || CCKind == CCC_ArrowMemberAccess || CCKind == CCC_ObjCPropertyAccess || CCKind == CCC_ObjCClassMessage || @@ -406,7 +407,7 @@ class CodeCompletionContext { QualType getBaseType() const { return BaseType; } /// Retrieve the Objective-C selector identifiers. - ArrayRef getSelIdents() const { return SelIdents; } + ArrayRef getSelIdents() const { return SelIdents; } /// Determines whether we want C++ constructors as results within this /// context. diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h index a176159707486..c9eecdafe62c7 100644 --- a/clang/include/clang/Sema/DeclSpec.h +++ b/clang/include/clang/Sema/DeclSpec.h @@ -1049,7 +1049,7 @@ class UnqualifiedId { union { /// When Kind == IK_Identifier, the parsed identifier, or when /// Kind == IK_UserLiteralId, the identifier suffix. - IdentifierInfo *Identifier; + const IdentifierInfo *Identifier; /// When Kind == IK_OperatorFunctionId, the overloaded operator /// that we parsed. @@ -1111,7 +1111,7 @@ class UnqualifiedId { /// \param IdLoc the location of the parsed identifier. void setIdentifier(const IdentifierInfo *Id, SourceLocation IdLoc) { Kind = UnqualifiedIdKind::IK_Identifier; - Identifier = const_cast(Id); + Identifier = Id; StartLocation = EndLocation = IdLoc; } @@ -1154,9 +1154,9 @@ class UnqualifiedId { /// /// \param IdLoc the location of the identifier. void setLiteralOperatorId(const IdentifierInfo *Id, SourceLocation OpLoc, - SourceLocation IdLoc) { + SourceLocation IdLoc) { Kind = UnqualifiedIdKind::IK_LiteralOperatorId; - Identifier = const_cast(Id); + Identifier = Id; StartLocation = OpLoc; EndLocation = IdLoc; } @@ -1225,7 +1225,7 @@ class UnqualifiedId { /// \param Id the identifier. void setImplicitSelfParam(const IdentifierInfo *Id) { Kind = UnqualifiedIdKind::IK_ImplicitSelfParam; - Identifier = const_cast(Id); + Identifier = Id; StartLocation = EndLocation = SourceLocation(); } @@ -1327,7 +1327,7 @@ struct DeclaratorChunk { /// Parameter type lists will have type info (if the actions module provides /// it), but may have null identifier info: e.g. for 'void foo(int X, int)'. struct ParamInfo { - IdentifierInfo *Ident; + const IdentifierInfo *Ident; SourceLocation IdentLoc; Decl *Param; @@ -1339,11 +1339,10 @@ struct DeclaratorChunk { std::unique_ptr DefaultArgTokens; ParamInfo() = default; - ParamInfo(IdentifierInfo *ident, SourceLocation iloc, - Decl *param, + ParamInfo(const IdentifierInfo *ident, SourceLocation iloc, Decl *param, std::unique_ptr DefArgTokens = nullptr) - : Ident(ident), IdentLoc(iloc), Param(param), - DefaultArgTokens(std::move(DefArgTokens)) {} + : Ident(ident), IdentLoc(iloc), Param(param), + DefaultArgTokens(std::move(DefArgTokens)) {} }; struct TypeAndRange { @@ -2326,7 +2325,7 @@ class Declarator { return BindingGroup.isSet(); } - IdentifierInfo *getIdentifier() const { + const IdentifierInfo *getIdentifier() const { if (Name.getKind() == UnqualifiedIdKind::IK_Identifier) return Name.Identifier; @@ -2335,7 +2334,7 @@ class Declarator { SourceLocation getIdentifierLoc() const { return Name.StartLocation; } /// Set the name of this declarator to be the given identifier. - void SetIdentifier(IdentifierInfo *Id, SourceLocation IdLoc) { + void SetIdentifier(const IdentifierInfo *Id, SourceLocation IdLoc) { Name.setIdentifier(Id, IdLoc); } diff --git a/clang/include/clang/Sema/Lookup.h b/clang/include/clang/Sema/Lookup.h index 2f2f2607a937f..0db5b847038ff 100644 --- a/clang/include/clang/Sema/Lookup.h +++ b/clang/include/clang/Sema/Lookup.h @@ -153,28 +153,30 @@ class LookupResult { using iterator = UnresolvedSetImpl::iterator; - LookupResult(Sema &SemaRef, const DeclarationNameInfo &NameInfo, - Sema::LookupNameKind LookupKind, - Sema::RedeclarationKind Redecl = Sema::NotForRedeclaration) + LookupResult( + Sema &SemaRef, const DeclarationNameInfo &NameInfo, + Sema::LookupNameKind LookupKind, + RedeclarationKind Redecl = RedeclarationKind::NotForRedeclaration) : SemaPtr(&SemaRef), NameInfo(NameInfo), LookupKind(LookupKind), - Redecl(Redecl != Sema::NotForRedeclaration), - ExternalRedecl(Redecl == Sema::ForExternalRedeclaration), - DiagnoseAccess(Redecl == Sema::NotForRedeclaration), - DiagnoseAmbiguous(Redecl == Sema::NotForRedeclaration) { + Redecl(Redecl != RedeclarationKind::NotForRedeclaration), + ExternalRedecl(Redecl == RedeclarationKind::ForExternalRedeclaration), + DiagnoseAccess(Redecl == RedeclarationKind::NotForRedeclaration), + DiagnoseAmbiguous(Redecl == RedeclarationKind::NotForRedeclaration) { configure(); } // TODO: consider whether this constructor should be restricted to take // as input a const IdentifierInfo* (instead of Name), // forcing other cases towards the constructor taking a DNInfo. - LookupResult(Sema &SemaRef, DeclarationName Name, SourceLocation NameLoc, - Sema::LookupNameKind LookupKind, - Sema::RedeclarationKind Redecl = Sema::NotForRedeclaration) + LookupResult( + Sema &SemaRef, DeclarationName Name, SourceLocation NameLoc, + Sema::LookupNameKind LookupKind, + RedeclarationKind Redecl = RedeclarationKind::NotForRedeclaration) : SemaPtr(&SemaRef), NameInfo(Name, NameLoc), LookupKind(LookupKind), - Redecl(Redecl != Sema::NotForRedeclaration), - ExternalRedecl(Redecl == Sema::ForExternalRedeclaration), - DiagnoseAccess(Redecl == Sema::NotForRedeclaration), - DiagnoseAmbiguous(Redecl == Sema::NotForRedeclaration) { + Redecl(Redecl != RedeclarationKind::NotForRedeclaration), + ExternalRedecl(Redecl == RedeclarationKind::ForExternalRedeclaration), + DiagnoseAccess(Redecl == RedeclarationKind::NotForRedeclaration), + DiagnoseAmbiguous(Redecl == RedeclarationKind::NotForRedeclaration) { configure(); } @@ -285,9 +287,10 @@ class LookupResult { return ExternalRedecl; } - Sema::RedeclarationKind redeclarationKind() const { - return ExternalRedecl ? Sema::ForExternalRedeclaration : - Redecl ? Sema::ForVisibleRedeclaration : Sema::NotForRedeclaration; + RedeclarationKind redeclarationKind() const { + return ExternalRedecl ? RedeclarationKind::ForExternalRedeclaration + : Redecl ? RedeclarationKind::ForVisibleRedeclaration + : RedeclarationKind::NotForRedeclaration; } /// Specify whether hidden declarations are visible, e.g., @@ -615,9 +618,9 @@ class LookupResult { } /// Change this lookup's redeclaration kind. - void setRedeclarationKind(Sema::RedeclarationKind RK) { - Redecl = (RK != Sema::NotForRedeclaration); - ExternalRedecl = (RK == Sema::ForExternalRedeclaration); + void setRedeclarationKind(RedeclarationKind RK) { + Redecl = (RK != RedeclarationKind::NotForRedeclaration); + ExternalRedecl = (RK == RedeclarationKind::ForExternalRedeclaration); configure(); } diff --git a/clang/include/clang/Sema/ParsedAttr.h b/clang/include/clang/Sema/ParsedAttr.h index 2a7389f78a168..ca766c3de5eea 100644 --- a/clang/include/clang/Sema/ParsedAttr.h +++ b/clang/include/clang/Sema/ParsedAttr.h @@ -94,7 +94,7 @@ struct PropertyData { : GetterId(getterId), SetterId(setterId) {} }; -} // namespace +} // namespace detail /// Wraps an identifier and optional source location for the identifier. struct IdentifierLoc { @@ -748,11 +748,6 @@ class AttributePool { IdentifierInfo *scopeName, SourceLocation scopeLoc, ArgsUnion *args, unsigned numArgs, ParsedAttr::Form form, SourceLocation ellipsisLoc = SourceLocation()) { - size_t temp = - ParsedAttr::totalSizeToAlloc(numArgs, 0, 0, 0, 0); - (void)temp; void *memory = allocate( ParsedAttr::totalSizeToAlloc TemplateArgs, bool ArgsInvalid, @@ -236,7 +236,8 @@ namespace clang { TemplateIdAnnotation(const TemplateIdAnnotation &) = delete; TemplateIdAnnotation(SourceLocation TemplateKWLoc, - SourceLocation TemplateNameLoc, IdentifierInfo *Name, + SourceLocation TemplateNameLoc, + const IdentifierInfo *Name, OverloadedOperatorKind OperatorKind, ParsedTemplateTy OpaqueTemplateName, TemplateNameKind TemplateKind, diff --git a/clang/include/clang/Sema/Redeclaration.h b/clang/include/clang/Sema/Redeclaration.h new file mode 100644 index 0000000000000..ae18b922f5cd9 --- /dev/null +++ b/clang/include/clang/Sema/Redeclaration.h @@ -0,0 +1,31 @@ +//===- Redeclaration.h - Redeclarations--------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines RedeclarationKind enum. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_REDECLARATION_H +#define LLVM_CLANG_SEMA_REDECLARATION_H + +/// Specifies whether (or how) name lookup is being performed for a +/// redeclaration (vs. a reference). +enum class RedeclarationKind { + /// The lookup is a reference to this name that is not for the + /// purpose of redeclaring the name. + NotForRedeclaration = 0, + /// The lookup results will be used for redeclaration of a name, + /// if an entity by that name already exists and is visible. + ForVisibleRedeclaration, + /// The lookup results will be used for redeclaration of a name + /// with external linkage; non-visible lookup results with external linkage + /// may also be found. + ForExternalRedeclaration +}; + +#endif // LLVM_CLANG_SEMA_REDECLARATION_H \ No newline at end of file diff --git a/clang/include/clang/Sema/Scope.h b/clang/include/clang/Sema/Scope.h index 099c2739e8603..1752a25111a77 100644 --- a/clang/include/clang/Sema/Scope.h +++ b/clang/include/clang/Sema/Scope.h @@ -156,6 +156,9 @@ class Scope { /// This is the scope of an OpenACC Compute Construct, which restricts /// jumping into/out of it. OpenACCComputeConstructScope = 0x10000000, + + /// This is a scope of type alias declaration. + TypeAliasScope = 0x20000000, }; private: @@ -580,6 +583,9 @@ class Scope { /// if/switch/while/for statement. bool isControlScope() const { return getFlags() & Scope::ControlScope; } + /// Determine whether this scope is a type alias scope. + bool isTypeAliasScope() const { return getFlags() & Scope::TypeAliasScope; } + /// Returns if rhs has a higher scope depth than this. /// /// The caller is responsible for calling this only if one of the two scopes diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 790f41627522d..756fd082da0b2 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -26,24 +26,22 @@ #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" -#include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" -#include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" +#include "clang/Basic/Cuda.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" -#include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" @@ -55,19 +53,21 @@ #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" +#include "clang/Sema/Redeclaration.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaBase.h" #include "clang/Sema/SemaConcept.h" +#include "clang/Sema/SemaDiagnostic.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" -#include "llvm/Frontend/OpenMP/OMPConstants.h" #include #include #include @@ -166,12 +166,6 @@ class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; -class OMPThreadPrivateDecl; -class OMPRequiresDecl; -class OMPDeclareReductionDecl; -class OMPDeclareSimdDecl; -class OMPClause; -struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; @@ -183,8 +177,11 @@ class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; +class SemaCUDA; class SemaHLSL; class SemaOpenACC; +class SemaOpenMP; +class SemaSYCL; class StandardConversionSequence; class Stmt; class StringLiteral; @@ -295,193 +292,6 @@ class FileNullabilityMap { } }; -// TODO SYCL Integration header approach relies on an assumption that kernel -// lambda objects created by the host compiler and any of the device compilers -// will be identical wrt to field types, order and offsets. Some verification -// mechanism should be developed to enforce that. - -// TODO FIXME SYCL Support for SYCL in FE should be refactored: -// - kernel identification and generation should be made a separate pass over -// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl + -// FunctionTemplateDecl::getSpecializations() mechanism could be used for that. -// - All SYCL stuff on Sema level should be encapsulated into a single Sema -// field -// - Move SYCL stuff into a separate header - -// Represents contents of a SYCL integration header file produced by a SYCL -// device compiler and used by SYCL host compiler (via forced inclusion into -// compiled SYCL source): -// - SYCL kernel names -// - SYCL kernel parameters and offsets of corresponding actual arguments -class SYCLIntegrationHeader { -public: - // Kind of kernel's parameters as captured by the compiler in the - // kernel lambda or function object - enum kernel_param_kind_t { - kind_first, - kind_accessor = kind_first, - kind_std_layout, - kind_sampler, - kind_pointer, - kind_specialization_constants_buffer, - kind_stream, - kind_last = kind_stream - }; - -public: - SYCLIntegrationHeader(Sema &S); - - /// Emits contents of the header into given stream. - void emit(raw_ostream &Out); - - /// Emits contents of the header into a file with given name. - /// Returns true/false on success/failure. - bool emit(StringRef MainSrc); - - /// Signals that subsequent parameter descriptor additions will go to - /// the kernel with given name. Starts new kernel invocation descriptor. - void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType, - SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel, - int64_t ObjSize); - - /// Adds a kernel parameter descriptor to current kernel invocation - /// descriptor. - void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset); - - /// Signals that addition of parameter descriptors to current kernel - /// invocation descriptor has finished. - void endKernel(); - - /// Registers a specialization constant to emit info for it into the header. - void addSpecConstant(StringRef IDName, QualType IDType); - - /// Update the names of a kernel description based on its SyclKernel. - void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name, - StringRef StableName) { - auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) { - return KD.SyclKernel == SyclKernel; - }); - - assert(Itr != KernelDescs.end() && "Unknown kernel description"); - Itr->updateKernelNames(Name, StableName); - } - - /// Signals that emission of __sycl_device_global_registration type and - /// declaration of variable __sycl_device_global_registrar of this type in - /// integration header is required. - void addDeviceGlobalRegistration() { - NeedToEmitDeviceGlobalRegistration = true; - } - - /// Signals that emission of __sycl_host_pipe_registration type and - /// declaration of variable __sycl_host_pipe_registrar of this type in - /// integration header is required. - void addHostPipeRegistration() { - NeedToEmitHostPipeRegistration = true; - } - -private: - // Kernel actual parameter descriptor. - struct KernelParamDesc { - // Represents a parameter kind. - kernel_param_kind_t Kind = kind_last; - // If Kind is kind_scalar or kind_struct, then - // denotes parameter size in bytes (includes padding for structs) - // If Kind is kind_accessor - // denotes access target; possible access targets are defined in - // access/access.hpp - int Info = 0; - // Offset of the captured parameter value in the lambda or function object. - unsigned Offset = 0; - - KernelParamDesc() = default; - }; - - // Kernel invocation descriptor - struct KernelDesc { - /// sycl_kernel function associated with this kernel. - const FunctionDecl *SyclKernel; - - /// Kernel name. - std::string Name; - - /// Kernel name type. - QualType NameType; - - /// Kernel name with stable lambda name mangling - std::string StableName; - - SourceLocation KernelLocation; - - /// Whether this kernel is an ESIMD one. - bool IsESIMDKernel; - - /// Descriptor of kernel actual parameters. - SmallVector Params; - - // If we are in unnamed kernel/lambda mode AND this is one that the user - // hasn't provided an explicit name for. - bool IsUnnamedKernel; - - /// Size of the kernel object. - int64_t ObjSize = 0; - - KernelDesc(const FunctionDecl *SyclKernel, QualType NameType, - SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel, - int64_t ObjSize) - : SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc), - IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel), - ObjSize(ObjSize) {} - - void updateKernelNames(StringRef Name, StringRef StableName) { - this->Name = Name.str(); - this->StableName = StableName.str(); - } - }; - - /// Returns the latest invocation descriptor started by - /// SYCLIntegrationHeader::startKernel - KernelDesc *getCurKernelDesc() { - return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1] - : nullptr; - } - -private: - /// Keeps invocation descriptors for each kernel invocation started by - /// SYCLIntegrationHeader::startKernel - SmallVector KernelDescs; - - using SpecConstID = std::pair; - - /// Keeps specialization constants met in the translation unit. Maps spec - /// constant's ID type to generated unique name. Duplicates are removed at - /// integration header emission time. - llvm::SmallVector SpecConsts; - - Sema &S; - - /// Keeps track of whether declaration of __sycl_device_global_registration - /// type and __sycl_device_global_registrar variable are required to emit. - bool NeedToEmitDeviceGlobalRegistration = false; - - /// Keeps track of whether declaration of __sycl_host_pipe_registration - /// type and __sycl_host_pipe_registrar variable are required to emit. - bool NeedToEmitHostPipeRegistration = false; -}; - -class SYCLIntegrationFooter { -public: - SYCLIntegrationFooter(Sema &S) : S(S) {} - bool emit(StringRef MainSrc); - void addVarDecl(const VarDecl *VD); - -private: - bool emit(raw_ostream &O); - Sema &S; - llvm::SmallVector GlobalVars; - void emitSpecIDName(raw_ostream &O, const VarDecl *VD); -}; - /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a @@ -544,6 +354,14 @@ class PreferredTypeBuilder { llvm::function_ref ComputeType; }; +struct SkipBodyInfo { + SkipBodyInfo() = default; + bool ShouldSkip = false; + bool CheckSameAsPrevious = false; + NamedDecl *Previous = nullptr; + NamedDecl *New = nullptr; +}; + /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of @@ -610,6 +428,31 @@ enum class TemplateDeductionResult { AlreadyDiagnosed }; +/// Kinds of C++ special members. +enum class CXXSpecialMemberKind { + DefaultConstructor, + CopyConstructor, + MoveConstructor, + CopyAssignment, + MoveAssignment, + Destructor, + Invalid +}; + +/// The kind of conversion being performed. +enum class CheckedConversionKind { + /// An implicit conversion. + Implicit, + /// A C-style cast. + CStyleCast, + /// A functional-style cast. + FunctionalCast, + /// A cast other than a C-style cast. + OtherCast, + /// A conversion for an operand of a builtin overloaded operator. + ForBuiltinOverloadedOp +}; + /// Sema - This implements semantic analysis and AST building for C. /// \nosubgrouping class Sema final : public SemaBase { @@ -653,9 +496,6 @@ class Sema final : public SemaBase { // 35. Code Completion (SemaCodeComplete.cpp) // 36. FixIt Helpers (SemaFixItUtils.cpp) // 37. Name Lookup for RISC-V Vector Intrinsic (SemaRISCVVectorLookup.cpp) - // 38. CUDA (SemaCUDA.cpp) - // 39. OpenMP Directives and Clauses (SemaOpenMP.cpp) - // 40. SYCL Constructs (SemaSYCL.cpp) /// \name Semantic Analysis /// Implementations are in Sema.cpp @@ -756,7 +596,7 @@ class Sema final : public SemaBase { /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * - InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, + InventAbbreviatedTemplateParameterTypeName(const IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); @@ -876,28 +716,27 @@ class Sema final : public SemaBase { void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); - /// The kind of conversion being performed. - enum CheckedConversionKind { - /// An implicit conversion. - CCK_ImplicitConversion, - /// A C-style cast. - CCK_CStyleCast, - /// A functional-style cast. - CCK_FunctionalCast, - /// A cast other than a C-style cast. - CCK_OtherCast, - /// A conversion for an operand of a builtin overloaded operator. - CCK_ForBuiltinOverloadedOp - }; + // /// The kind of conversion being performed. + // enum CheckedConversionKind { + // /// An implicit conversion. + // CCK_ImplicitConversion, + // /// A C-style cast. + // CCK_CStyleCast, + // /// A functional-style cast. + // CCK_FunctionalCast, + // /// A cast other than a C-style cast. + // CCK_OtherCast, + // /// A conversion for an operand of a builtin overloaded operator. + // CCK_ForBuiltinOverloadedOp + // }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. - ExprResult - ImpCastExprToType(Expr *E, QualType Type, CastKind CK, - ExprValueKind VK = VK_PRValue, - const CXXCastPath *BasePath = nullptr, - CheckedConversionKind CCK = CCK_ImplicitConversion); + ExprResult ImpCastExprToType( + Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, + const CXXCastPath *BasePath = nullptr, + CheckedConversionKind CCK = CheckedConversionKind::Implicit); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. @@ -1149,9 +988,19 @@ class Sema final : public SemaBase { return DelayedDiagnostics.push(pool); } + /// Diagnostics that are emitted only if we discover that the given function + /// must be codegen'ed. Because handling these correctly adds overhead to + /// compilation, this is currently only enabled for CUDA compilations. + SemaDiagnosticBuilder::DeferredDiagnosticsType DeviceDeferredDiags; + /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; + SemaCUDA &CUDA() { + assert(CUDAPtr); + return *CUDAPtr; + } + SemaHLSL &HLSL() { assert(HLSLPtr); return *HLSLPtr; @@ -1162,6 +1011,16 @@ class Sema final : public SemaBase { return *OpenACCPtr; } + SemaOpenMP &OpenMP() { + assert(OpenMPPtr && "SemaOpenMP is dead"); + return *OpenMPPtr; + } + + SemaSYCL &SYCL() { + assert(SYCLPtr); + return *SYCLPtr; + } + protected: friend class Parser; friend class InitializationSequence; @@ -1192,8 +1051,11 @@ class Sema final : public SemaBase { mutable IdentifierInfo *Ident_super; + std::unique_ptr CUDAPtr; std::unique_ptr HLSLPtr; std::unique_ptr OpenACCPtr; + std::unique_ptr OpenMPPtr; + std::unique_ptr SYCLPtr; ///@} @@ -1934,8 +1796,9 @@ class Sema final : public SemaBase { public: static bool isCast(CheckedConversionKind CCK) { - return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || - CCK == CCK_OtherCast; + return CCK == CheckedConversionKind::CStyleCast || + CCK == CheckedConversionKind::FunctionalCast || + CCK == CheckedConversionKind::OtherCast; } /// ActOnCXXNamedCast - Parse @@ -2157,6 +2020,8 @@ class Sema final : public SemaBase { }; bool IsLayoutCompatible(QualType T1, QualType T2) const; + bool IsPointerInterconvertibleBaseOf(const TypeSourceInfo *Base, + const TypeSourceInfo *Derived); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); @@ -2793,14 +2658,6 @@ class Sema final : public SemaBase { return Entity->getOwningModule(); } - struct SkipBodyInfo { - SkipBodyInfo() = default; - bool ShouldSkip = false; - bool CheckSameAsPrevious = false; - NamedDecl *Previous = nullptr; - NamedDecl *New = nullptr; - }; - DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, @@ -3129,13 +2986,6 @@ class Sema final : public SemaBase { QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); - void ActOnHLSLTopLevelFunction(FunctionDecl *FD); - void CheckHLSLEntryPoint(FunctionDecl *FD); - void CheckHLSLSemanticAnnotation(FunctionDecl *EntryPoint, const Decl *Param, - const HLSLAnnotationAttr *AnnotationAttr); - void DiagnoseHLSLAttrStageMismatch( - const Attr *A, HLSLShaderAttr::ShaderType Stage, - std::initializer_list AllowedStages); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); @@ -3147,9 +2997,9 @@ class Sema final : public SemaBase { SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, - SourceLocation NameLoc, IdentifierInfo *Name, - QualType T, TypeSourceInfo *TSInfo, - StorageClass SC); + SourceLocation NameLoc, + const IdentifierInfo *Name, QualType T, + TypeSourceInfo *TSInfo, StorageClass SC); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. @@ -3215,14 +3065,18 @@ class Sema final : public SemaBase { void ActOnDocumentableDecls(ArrayRef Group); enum class FnBodyKind { - /// C++ [dcl.fct.def.general]p1 + /// C++26 [dcl.fct.def.general]p1 /// function-body: /// ctor-initializer[opt] compound-statement /// function-try-block Other, /// = default ; Default, + /// deleted-function-body + /// + /// deleted-function-body: /// = delete ; + /// = delete ( unevaluated-string ) ; Delete }; @@ -3554,7 +3408,7 @@ class Sema final : public SemaBase { /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD, DiagReceiverTy DiagReceiver); - ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, + ObjCInterfaceDecl *getObjCInterfaceDecl(const IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); @@ -3597,7 +3451,8 @@ class Sema final : public SemaBase { bool ConstexprSupported, bool CLinkageMayDiffer); /// type checking declaration initializers (C99 6.7.8) - bool CheckForConstantInitializer(Expr *e, QualType t); + bool CheckForConstantInitializer( + Expr *Init, unsigned DiagID = diag::err_init_element_not_constant); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, @@ -3609,14 +3464,6 @@ class Sema final : public SemaBase { sema::LambdaScopeInfo *RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator); - /// The declarator \p D defines a function in the scope \p S which is nested - /// in an `omp begin/end declare variant` scope. In this method we create a - /// declaration for \p D and rename \p D according to the OpenMP context - /// selector of the surrounding scope. Return all base functions in \p Bases. - void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( - Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, - SmallVectorImpl &Bases); - // Heuristically tells if the function is `get_return_object` member of a // coroutine promise_type by matching the function name. static bool CanBeGetReturnObject(const FunctionDecl *FD); @@ -3631,8 +3478,9 @@ class Sema final : public SemaBase { /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. - ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, - QualType FieldTy, bool IsMsStruct, Expr *BitWidth); + ExprResult VerifyBitField(SourceLocation FieldLoc, + const IdentifierInfo *FieldName, QualType FieldTy, + bool IsMsStruct, Expr *BitWidth); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid @@ -3848,20 +3696,12 @@ class Sema final : public SemaBase { InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); - enum CUDAFunctionTarget { - CFT_Device, - CFT_Global, - CFT_Host, - CFT_HostDevice, - CFT_InvalidTarget - }; - /// Check validaty of calling convention attribute \p attr. If \p FD /// is not null pointer, use \p FD to determine the CUDA/HIP host/device /// target. Otherwise, it is specified by \p CFT. - bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, - const FunctionDecl *FD = nullptr, - CUDAFunctionTarget CFT = CFT_InvalidTarget); + bool CheckCallingConvAttr( + const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr, + CUDAFunctionTarget CFT = CUDAFunctionTarget::InvalidTarget); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); @@ -3898,14 +3738,6 @@ class Sema final : public SemaBase { StringRef UuidAsWritten, MSGuidDecl *GuidDecl); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); - HLSLNumThreadsAttr *mergeHLSLNumThreadsAttr(Decl *D, - const AttributeCommonInfo &AL, - int X, int Y, int Z); - HLSLShaderAttr *mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL, - HLSLShaderAttr::ShaderType ShaderType); - HLSLParamModifierAttr * - mergeHLSLParamModifierAttr(Decl *D, const AttributeCommonInfo &AL, - HLSLParamModifierAttr::Spelling Spelling); WebAssemblyImportNameAttr * mergeImportNameAttr(Decl *D, const WebAssemblyImportNameAttr &AL); @@ -4441,22 +4273,11 @@ class Sema final : public SemaBase { SourceRange SpecificationRange, ArrayRef DynamicExceptions, ArrayRef DynamicExceptionRanges, Expr *NoexceptExpr); - /// Kinds of C++ special members. - enum CXXSpecialMember { - CXXDefaultConstructor, - CXXCopyConstructor, - CXXMoveConstructor, - CXXCopyAssignment, - CXXMoveAssignment, - CXXDestructor, - CXXInvalid - }; - class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. - bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, + bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMemberKind CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); @@ -4822,7 +4643,7 @@ class Sema final : public SemaBase { void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, - CXXSpecialMember CSM, + CXXSpecialMemberKind CSM, SourceLocation DefaultLoc); void CheckDelayedMemberExceptionSpecs(); @@ -4982,13 +4803,14 @@ class Sema final : public SemaBase { void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); - CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { + CXXSpecialMemberKind getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id); + SourceLocation IdLoc, + const IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); @@ -5008,7 +4830,8 @@ class Sema final : public SemaBase { AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); - void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); + void DiagnoseNontrivial(const CXXRecordDecl *Record, + CXXSpecialMemberKind CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". @@ -5018,26 +4841,31 @@ class Sema final : public SemaBase { TAH_ConsiderTrivialABI }; - bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, + bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { + LLVM_PREFERRED_TYPE(CXXSpecialMemberKind) unsigned SpecialMember : 8; unsigned Comparison : 8; public: DefaultedFunctionKind() - : SpecialMember(CXXInvalid), + : SpecialMember(llvm::to_underlying(CXXSpecialMemberKind::Invalid)), Comparison(llvm::to_underlying(DefaultedComparisonKind::None)) {} - DefaultedFunctionKind(CXXSpecialMember CSM) - : SpecialMember(CSM), + DefaultedFunctionKind(CXXSpecialMemberKind CSM) + : SpecialMember(llvm::to_underlying(CSM)), Comparison(llvm::to_underlying(DefaultedComparisonKind::None)) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) - : SpecialMember(CXXInvalid), Comparison(llvm::to_underlying(Comp)) {} + : SpecialMember(llvm::to_underlying(CXXSpecialMemberKind::Invalid)), + Comparison(llvm::to_underlying(Comp)) {} - bool isSpecialMember() const { return SpecialMember != CXXInvalid; } + bool isSpecialMember() const { + return static_cast(SpecialMember) != + CXXSpecialMemberKind::Invalid; + } bool isComparison() const { return static_cast(Comparison) != DefaultedComparisonKind::None; @@ -5047,8 +4875,8 @@ class Sema final : public SemaBase { return isSpecialMember() || isComparison(); } - CXXSpecialMember asSpecialMember() const { - return static_cast(SpecialMember); + CXXSpecialMemberKind asSpecialMember() const { + return static_cast(SpecialMember); } DefaultedComparisonKind asComparison() const { return static_cast(Comparison); @@ -5056,7 +4884,8 @@ class Sema final : public SemaBase { /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { - static_assert(CXXInvalid > CXXDestructor, + static_assert(llvm::to_underlying(CXXSpecialMemberKind::Invalid) > + llvm::to_underlying(CXXSpecialMemberKind::Destructor), "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); @@ -5100,10 +4929,12 @@ class Sema final : public SemaBase { SourceLocation EqualLoc); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); - void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); + void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc, + StringLiteral *Message = nullptr); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); - void SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind); + void SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind, + StringLiteral *DeletedMessage = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); @@ -5162,7 +4993,7 @@ class Sema final : public SemaBase { /// definition in this translation unit. llvm::MapVector UndefinedButUsed; - typedef llvm::PointerIntPair + typedef llvm::PointerIntPair SpecialMemberDecl; /// The C++ special members which we are currently in the process of @@ -5442,34 +5273,6 @@ class Sema final : public SemaBase { /// example, in a for-range initializer). bool InLifetimeExtendingContext = false; - /// Whether we are currently in a context in which all temporaries must be - /// materialized. - /// - /// [class.temporary]/p2: - /// The materialization of a temporary object is generally delayed as long - /// as possible in order to avoid creating unnecessary temporary objects. - /// - /// Temporary objects are materialized: - /// (2.1) when binding a reference to a prvalue ([dcl.init.ref], - /// [expr.type.conv], [expr.dynamic.cast], [expr.static.cast], - /// [expr.const.cast], [expr.cast]), - /// - /// (2.2) when performing member access on a class prvalue ([expr.ref], - /// [expr.mptr.oper]), - /// - /// (2.3) when performing an array-to-pointer conversion or subscripting - /// on an array prvalue ([conv.array], [expr.sub]), - /// - /// (2.4) when initializing an object of type - /// std​::​initializer_list from a braced-init-list - /// ([dcl.init.list]), - /// - /// (2.5) for certain unevaluated operands ([expr.typeid], [expr.sizeof]) - /// - /// (2.6) when a prvalue that has type other than cv void appears as a - /// discarded-value expression ([expr.context]). - bool InMaterializeTemporaryObjectContext = false; - // When evaluating immediate functions in the initializer of a default // argument or default member initializer, this is the declaration whose // default initializer is being evaluated and the location of the call @@ -5795,8 +5598,7 @@ class Sema final : public SemaBase { ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, - bool AcceptInvalidDecl = false, - bool NeedUnresolved = false); + bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, @@ -5812,15 +5614,6 @@ class Sema final : public SemaBase { ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); - ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, - TypeSourceInfo *TSI); - ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, - ParsedType ParsedTy); - bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); @@ -5914,32 +5707,6 @@ class Sema final : public SemaBase { Expr *ColumnIdx, SourceLocation RBLoc); - ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, - Expr *LowerBound, - SourceLocation ColonLocFirst, - SourceLocation ColonLocSecond, - Expr *Length, Expr *Stride, - SourceLocation RBLoc); - ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, - SourceLocation RParenLoc, - ArrayRef Dims, - ArrayRef Brackets); - - /// Data structure for iterator expression. - struct OMPIteratorData { - IdentifierInfo *DeclIdent = nullptr; - SourceLocation DeclIdentLoc; - ParsedType Type; - OMPIteratorExpr::IteratorRange Range; - SourceLocation AssignLoc; - SourceLocation ColonLoc; - SourceLocation SecColonLoc; - }; - - ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, - SourceLocation LLoc, SourceLocation RLoc, - ArrayRef Data); - bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef Args, SourceLocation RParenLoc, @@ -6751,19 +6518,6 @@ class Sema final : public SemaBase { } } - /// keepInMaterializeTemporaryObjectContext - Pull down - /// InMaterializeTemporaryObjectContext flag from previous context. - void keepInMaterializeTemporaryObjectContext() { - if (ExprEvalContexts.size() > 2 && - ExprEvalContexts[ExprEvalContexts.size() - 2] - .InMaterializeTemporaryObjectContext) { - auto &LastRecord = ExprEvalContexts.back(); - auto &PrevRecord = ExprEvalContexts[ExprEvalContexts.size() - 2]; - LastRecord.InMaterializeTemporaryObjectContext = - PrevRecord.InMaterializeTemporaryObjectContext; - } - } - DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } @@ -6872,14 +6626,6 @@ class Sema final : public SemaBase { void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); -public: - ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, Expr *E); - ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, Expr *E); - ///@} // @@ -6915,20 +6661,14 @@ class Sema final : public SemaBase { /// used in initializer of the field. llvm::MapVector DeleteExprs; - bool isInMaterializeTemporaryObjectContext() const { - assert(!ExprEvalContexts.empty() && - "Must be in an expression evaluation context"); - return ExprEvalContexts.back().InMaterializeTemporaryObjectContext; - } - ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, - IdentifierInfo &Name); + const IdentifierInfo &Name); - ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, - Scope *S, CXXScopeSpec &SS, - bool EnteringContext); - ParsedType getDestructorName(IdentifierInfo &II, SourceLocation NameLoc, + ParsedType getConstructorName(const IdentifierInfo &II, + SourceLocation NameLoc, Scope *S, + CXXScopeSpec &SS, bool EnteringContext); + ParsedType getDestructorName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); @@ -6956,10 +6696,7 @@ class Sema final : public SemaBase { SourceLocation RParenLoc); //// ActOnCXXThis - Parse 'this' pointer. - ExprResult ActOnCXXThis(SourceLocation Loc); - - /// Check whether the type of 'this' is valid in the current context. - bool CheckCXXThisType(SourceLocation Loc, QualType Type); + ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); @@ -7186,11 +6923,10 @@ class Sema final : public SemaBase { bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); - ExprResult - PerformImplicitConversion(Expr *From, QualType ToType, - const ImplicitConversionSequence &ICS, - AssignmentAction Action, - CheckedConversionKind CCK = CCK_ImplicitConversion); + ExprResult PerformImplicitConversion( + Expr *From, QualType ToType, const ImplicitConversionSequence &ICS, + AssignmentAction Action, + CheckedConversionKind CCK = CheckedConversionKind::Implicit); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence &SCS, AssignmentAction Action, @@ -7331,7 +7067,7 @@ class Sema final : public SemaBase { concepts::Requirement *ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, - IdentifierInfo *TypeName, + const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); @@ -7511,7 +7247,7 @@ class Sema final : public SemaBase { ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, - CheckedConversionKind CCK = CCK_ImplicitConversion); + CheckedConversionKind CCK = CheckedConversionKind::Implicit); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); @@ -7869,7 +7605,7 @@ class Sema final : public SemaBase { }; SpecialMemberOverloadResult - LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, + LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMemberKind SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); @@ -7877,40 +7613,17 @@ class Sema final : public SemaBase { typedef std::function TypoRecoveryCallback; - /// Specifies whether (or how) name lookup is being performed for a - /// redeclaration (vs. a reference). - enum RedeclarationKind { - /// The lookup is a reference to this name that is not for the - /// purpose of redeclaring the name. - NotForRedeclaration = 0, - /// The lookup results will be used for redeclaration of a name, - /// if an entity by that name already exists and is visible. - ForVisibleRedeclaration, - /// The lookup results will be used for redeclaration of a name - /// with external linkage; non-visible lookup results with external linkage - /// may also be found. - ForExternalRedeclaration - }; - - RedeclarationKind forRedeclarationInCurContext() const { - // A declaration with an owning module for linkage can never link against - // anything that is not visible. We don't need to check linkage here; if - // the context has internal linkage, redeclaration lookup won't find things - // from other TUs, and we can't safely compute linkage yet in general. - if (cast(CurContext) - ->getOwningModuleForLinkage(/*IgnoreLinkage*/ true)) - return ForVisibleRedeclaration; - return ForExternalRedeclaration; - } + RedeclarationKind forRedeclarationInCurContext() const; /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. - NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, - SourceLocation Loc, LookupNameKind NameKind, - RedeclarationKind Redecl = NotForRedeclaration); + NamedDecl *LookupSingleName( + Scope *S, DeclarationName Name, SourceLocation Loc, + LookupNameKind NameKind, + RedeclarationKind Redecl = RedeclarationKind::NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false, @@ -7922,9 +7635,9 @@ class Sema final : public SemaBase { bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); - ObjCProtocolDecl * - LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, - RedeclarationKind Redecl = NotForRedeclaration); + ObjCProtocolDecl *LookupProtocol( + IdentifierInfo *II, SourceLocation IdLoc, + RedeclarationKind Redecl = RedeclarationKind::NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, @@ -8455,6 +8168,11 @@ class Sema final : public SemaBase { bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); + void DiagnoseUseOfDeletedFunction(SourceLocation Loc, SourceRange Range, + DeclarationName Name, + OverloadCandidateSet &CandidateSet, + FunctionDecl *Fn, MultiExprArg Args, + bool IsMember = false); ExprResult InitializeExplicitObjectArgument(Sema &S, Expr *Obj, FunctionDecl *Fun); @@ -9457,7 +9175,7 @@ class Sema final : public SemaBase { Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter( Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, - SourceLocation EllipsisLoc, IdentifierInfo *ParamName, + bool Typename, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); @@ -9511,7 +9229,7 @@ class Sema final : public SemaBase { TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, - TemplateTy Template, IdentifierInfo *TemplateII, + TemplateTy Template, const IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false, @@ -9852,7 +9570,7 @@ class Sema final : public SemaBase { TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, - TemplateTy TemplateName, IdentifierInfo *TemplateII, + TemplateTy TemplateName, const IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); @@ -9930,14 +9648,15 @@ class Sema final : public SemaBase { Decl *ActOnConceptDefinition(Scope *S, MultiTemplateParamsArg TemplateParameterLists, - IdentifierInfo *Name, SourceLocation NameLoc, - Expr *ConstraintExpr); + const IdentifierInfo *Name, + SourceLocation NameLoc, Expr *ConstraintExpr); void CheckConceptRedefinition(ConceptDecl *NewDecl, LookupResult &Previous, bool &AddToScope); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, - const CXXScopeSpec &SS, IdentifierInfo *Name, + const CXXScopeSpec &SS, + const IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, @@ -10413,7 +10132,7 @@ class Sema final : public SemaBase { unsigned NumCallArgs; /// The special member being declared or defined. - CXXSpecialMember SpecialMember; + CXXSpecialMemberKind SpecialMember; }; ArrayRef template_arguments() const { @@ -12383,22 +12102,22 @@ class Sema final : public SemaBase { SkipBodyInfo *SkipBody); ObjCCategoryDecl *ActOnStartCategoryInterface( - SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, + SourceLocation AtInterfaceLoc, const IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, - IdentifierInfo *CategoryName, SourceLocation CategoryLoc, + const IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); ObjCImplementationDecl *ActOnStartClassImplementation( - SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, - SourceLocation ClassLoc, IdentifierInfo *SuperClassname, + SourceLocation AtClassImplLoc, const IdentifierInfo *ClassName, + SourceLocation ClassLoc, const IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); ObjCCategoryImplDecl *ActOnStartCategoryImplementation( - SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, - SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, - const ParsedAttributesView &AttrList); + SourceLocation AtCatImplLoc, const IdentifierInfo *ClassName, + SourceLocation ClassLoc, const IdentifierInfo *CatName, + SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef Decls); @@ -12581,11 +12300,13 @@ class Sema final : public SemaBase { bool CheckObjCDeclScope(Decl *D); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, - IdentifierInfo *ClassName, SmallVectorImpl &Decls); + const IdentifierInfo *ClassName, + SmallVectorImpl &Decls); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, bool Invalid = false); + const IdentifierInfo *Id, + bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); @@ -12702,8 +12423,8 @@ class Sema final : public SemaBase { SourceLocation SuperLoc, QualType SuperType, bool Super); - ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, - IdentifierInfo &propertyName, + ExprResult ActOnClassPropertyRefExpr(const IdentifierInfo &receiverName, + const IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); @@ -13178,18 +12899,18 @@ class Sema final : public SemaBase { bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, - ArrayRef SelIdents, + ArrayRef SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, - ArrayRef SelIdents, + ArrayRef SelIdents, bool AtArgumentExpression, bool IsSuper = false); - void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, - ArrayRef SelIdents, - bool AtArgumentExpression, - ObjCInterfaceDecl *Super = nullptr); + void CodeCompleteObjCInstanceMessage( + Scope *S, Expr *Receiver, ArrayRef SelIdents, + bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); - void CodeCompleteObjCSelector(Scope *S, ArrayRef SelIdents); + void CodeCompleteObjCSelector(Scope *S, + ArrayRef SelIdents); void CodeCompleteObjCProtocolReferences(ArrayRef Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); @@ -13209,11 +12930,11 @@ class Sema final : public SemaBase { void CodeCompleteObjCMethodDecl(Scope *S, std::optional IsInstanceMethod, ParsedType ReturnType); - void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, - bool AtParameterName, - ParsedType ReturnType, - ArrayRef SelIdents); - void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, + void CodeCompleteObjCMethodDeclSelector( + Scope *S, bool IsInstanceMethod, bool AtParameterName, + ParsedType ReturnType, ArrayRef SelIdents); + void CodeCompleteObjCClassPropertyRefExpr(Scope *S, + const IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); @@ -13290,1795 +13011,6 @@ class Sema final : public SemaBase { std::unique_ptr RVIntrinsicManager; ///@} - - // - // - // ------------------------------------------------------------------------- - // - // - - /// \name CUDA - /// Implementations are in SemaCUDA.cpp - ///@{ - -public: - /// Increments our count of the number of times we've seen a pragma forcing - /// functions to be __host__ __device__. So long as this count is greater - /// than zero, all functions encountered will be __host__ __device__. - void PushForceCUDAHostDevice(); - - /// Decrements our count of the number of times we've seen a pragma forcing - /// functions to be __host__ __device__. Returns false if the count is 0 - /// before incrementing, so you can emit an error. - bool PopForceCUDAHostDevice(); - - ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, - MultiExprArg ExecConfig, - SourceLocation GGGLoc); - - /// Diagnostics that are emitted only if we discover that the given function - /// must be codegen'ed. Because handling these correctly adds overhead to - /// compilation, this is currently only enabled for CUDA compilations. - SemaDiagnosticBuilder::DeferredDiagnosticsType DeviceDeferredDiags; - - /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the - /// key in a hashtable, both the FD and location are hashed. - struct FunctionDeclAndLoc { - CanonicalDeclPtr FD; - SourceLocation Loc; - }; - - /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a - /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the - /// same deferred diag twice. - llvm::DenseSet LocsWithCUDACallDiags; - - /// An inverse call graph, mapping known-emitted functions to one of their - /// known-emitted callers (plus the location of the call). - /// - /// Functions that we can tell a priori must be emitted aren't added to this - /// map. - llvm::DenseMap, - /* Caller = */ FunctionDeclAndLoc> - DeviceKnownEmittedFns; - - /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current - /// context is "used as device code". - /// - /// - If CurContext is a __host__ function, does not emit any diagnostics - /// unless \p EmitOnBothSides is true. - /// - If CurContext is a __device__ or __global__ function, emits the - /// diagnostics immediately. - /// - If CurContext is a __host__ __device__ function and we are compiling for - /// the device, creates a diagnostic which is emitted if and when we realize - /// that the function will be codegen'ed. - /// - /// Example usage: - /// - /// // Variable-length arrays are not allowed in CUDA device code. - /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) - /// return ExprError(); - /// // Otherwise, continue parsing as normal. - SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, - unsigned DiagID); - - /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current - /// context is "used as host code". - /// - /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. - SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); - - /// Determines whether the given function is a CUDA device/host/kernel/etc. - /// function. - /// - /// Use this rather than examining the function's attributes yourself -- you - /// will get it wrong. Returns CFT_Host if D is null. - CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, - bool IgnoreImplicitHDAttr = false); - CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); - - enum CUDAVariableTarget { - CVT_Device, /// Emitted on device side with a shadow variable on host side - CVT_Host, /// Emitted on host side only - CVT_Both, /// Emitted on both sides with different addresses - CVT_Unified, /// Emitted as a unified address, e.g. managed variables - }; - /// Determines whether the given variable is emitted on host or device side. - CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); - - /// Defines kinds of CUDA global host/device context where a function may be - /// called. - enum CUDATargetContextKind { - CTCK_Unknown, /// Unknown context - CTCK_InitGlobalVar, /// Function called during global variable - /// initialization - }; - - /// Define the current global CUDA host/device context where a function may be - /// called. Only used when a function is called outside of any functions. - struct CUDATargetContext { - CUDAFunctionTarget Target = CFT_HostDevice; - CUDATargetContextKind Kind = CTCK_Unknown; - Decl *D = nullptr; - } CurCUDATargetCtx; - - struct CUDATargetContextRAII { - Sema &S; - CUDATargetContext SavedCtx; - CUDATargetContextRAII(Sema &S_, CUDATargetContextKind K, Decl *D); - ~CUDATargetContextRAII() { S.CurCUDATargetCtx = SavedCtx; } - }; - - /// Gets the CUDA target for the current context. - CUDAFunctionTarget CurrentCUDATarget() { - return IdentifyCUDATarget(dyn_cast(CurContext)); - } - - static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); - - // CUDA function call preference. Must be ordered numerically from - // worst to best. - enum CUDAFunctionPreference { - CFP_Never, // Invalid caller/callee combination. - CFP_WrongSide, // Calls from host-device to host or device - // function that do not match current compilation - // mode. - CFP_HostDevice, // Any calls to host/device functions. - CFP_SameSide, // Calls from host-device to host or device - // function matching current compilation mode. - CFP_Native, // host-to-host or device-to-device calls. - }; - - /// Identifies relative preference of a given Caller/Callee - /// combination, based on their host/device attributes. - /// \param Caller function which needs address of \p Callee. - /// nullptr in case of global context. - /// \param Callee target function - /// - /// \returns preference value for particular Caller/Callee combination. - CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, - const FunctionDecl *Callee); - - /// Determines whether Caller may invoke Callee, based on their CUDA - /// host/device attributes. Returns false if the call is not allowed. - /// - /// Note: Will return true for CFP_WrongSide calls. These may appear in - /// semantically correct CUDA programs, but only if they're never codegen'ed. - bool IsAllowedCUDACall(const FunctionDecl *Caller, - const FunctionDecl *Callee) { - return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; - } - - /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, - /// depending on FD and the current compilation settings. - void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, - const LookupResult &Previous); - - /// May add implicit CUDAConstantAttr attribute to VD, depending on VD - /// and current compilation settings. - void MaybeAddCUDAConstantAttr(VarDecl *VD); - - /// Check whether we're allowed to call Callee from the current context. - /// - /// - If the call is never allowed in a semantically-correct program - /// (CFP_Never), emits an error and returns false. - /// - /// - If the call is allowed in semantically-correct programs, but only if - /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to - /// be emitted if and when the caller is codegen'ed, and returns true. - /// - /// Will only create deferred diagnostics for a given SourceLocation once, - /// so you can safely call this multiple times without generating duplicate - /// deferred errors. - /// - /// - Otherwise, returns true without emitting any diagnostics. - bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); - - void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); - - /// Set __device__ or __host__ __device__ attributes on the given lambda - /// operator() method. - /// - /// CUDA lambdas by default is host device function unless it has explicit - /// host or device attribute. - void CUDASetLambdaAttrs(CXXMethodDecl *Method); - - /// Record \p FD if it is a CUDA/HIP implicit host device function used on - /// device side in device compilation. - void CUDARecordImplicitHostDeviceFuncUsedByDevice(const FunctionDecl *FD); - - /// Finds a function in \p Matches with highest calling priority - /// from \p Caller context and erases all functions with lower - /// calling priority. - void EraseUnwantedCUDAMatches( - const FunctionDecl *Caller, - SmallVectorImpl> &Matches); - - /// Given a implicit special member, infer its CUDA target from the - /// calls it needs to make to underlying base/field special members. - /// \param ClassDecl the class for which the member is being created. - /// \param CSM the kind of special member. - /// \param MemberDecl the special member itself. - /// \param ConstRHS true if this is a copy operation with a const object on - /// its RHS. - /// \param Diagnose true if this call should emit diagnostics. - /// \return true if there was an error inferring. - /// The result of this call is implicit CUDA target attribute(s) attached to - /// the member declaration. - bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, - CXXSpecialMember CSM, - CXXMethodDecl *MemberDecl, - bool ConstRHS, bool Diagnose); - - /// \return true if \p CD can be considered empty according to CUDA - /// (E.2.3.1 in CUDA 7.5 Programming guide). - bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); - bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); - - // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In - // case of error emits appropriate diagnostic and invalidates \p Var. - // - // \details CUDA allows only empty constructors as initializers for global - // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all - // __shared__ variables whether they are local or not (they all are implicitly - // static in CUDA). One exception is that CUDA allows constant initializers - // for __constant__ and __device__ variables. - void checkAllowedCUDAInitializer(VarDecl *VD); - - /// Check whether NewFD is a valid overload for CUDA. Emits - /// diagnostics and invalidates NewFD if not. - void checkCUDATargetOverload(FunctionDecl *NewFD, - const LookupResult &Previous); - /// Copies target attributes from the template TD to the function FD. - void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); - - /// Returns the name of the launch configuration function. This is the name - /// of the function that will be called to configure kernel call, with the - /// parameters specified via <<<>>>. - std::string getCudaConfigureFuncName() const; - -private: - unsigned ForceCUDAHostDeviceDepth = 0; - - ///@} - - // - // - // ------------------------------------------------------------------------- - // - // - - /// \name OpenMP Directives and Clauses - /// Implementations are in SemaOpenMP.cpp - ///@{ - -public: - /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current - /// context is "used as device code". - /// - /// - If CurContext is a `declare target` function or it is known that the - /// function is emitted for the device, emits the diagnostics immediately. - /// - If CurContext is a non-`declare target` function and we are compiling - /// for the device, creates a diagnostic which is emitted if and when we - /// realize that the function will be codegen'ed. - /// - /// Example usage: - /// - /// // Variable-length arrays are not allowed in NVPTX device code. - /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) - /// return ExprError(); - /// // Otherwise, continue parsing as normal. - SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, - unsigned DiagID, - const FunctionDecl *FD); - - /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current - /// context is "used as host code". - /// - /// - If CurContext is a `declare target` function or it is known that the - /// function is emitted for the host, emits the diagnostics immediately. - /// - If CurContext is a non-host function, just ignore it. - /// - /// Example usage: - /// - /// // Variable-length arrays are not allowed in NVPTX device code. - /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) - /// return ExprError(); - /// // Otherwise, continue parsing as normal. - SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, - unsigned DiagID, - const FunctionDecl *FD); - - /// Register \p D as specialization of all base functions in \p Bases in the - /// current `omp begin/end declare variant` scope. - void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( - Decl *D, SmallVectorImpl &Bases); - - /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. - void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); - - /// Can we exit an OpenMP declare variant scope at the moment. - bool isInOpenMPDeclareVariantScope() const { - return !OMPDeclareVariantScopes.empty(); - } - - ExprResult - VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, - bool StrictlyPositive = true, - bool SuppressExprDiags = false); - - /// Given the potential call expression \p Call, determine if there is a - /// specialization via the OpenMP declare variant mechanism available. If - /// there is, return the specialized call expression, otherwise return the - /// original \p Call. - ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, - SourceLocation LParenLoc, MultiExprArg ArgExprs, - SourceLocation RParenLoc, Expr *ExecConfig); - - /// Handle a `omp begin declare variant`. - void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); - - /// Handle a `omp end declare variant`. - void ActOnOpenMPEndDeclareVariant(); - - /// Function tries to capture lambda's captured variables in the OpenMP region - /// before the original lambda is captured. - void tryCaptureOpenMPLambdas(ValueDecl *V); - - /// Return true if the provided declaration \a VD should be captured by - /// reference. - /// \param Level Relative level of nested OpenMP construct for that the check - /// is performed. - /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. - bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, - unsigned OpenMPCaptureLevel) const; - - /// Check if the specified variable is used in one of the private - /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP - /// constructs. - VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, - unsigned StopAt = 0); - - /// The member expression(this->fd) needs to be rebuilt in the template - /// instantiation to generate private copy for OpenMP when default - /// clause is used. The function will return true if default - /// cluse is used. - bool isOpenMPRebuildMemberExpr(ValueDecl *D); - - ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, - ExprObjectKind OK, SourceLocation Loc); - - /// If the current region is a loop-based region, mark the start of the loop - /// construct. - void startOpenMPLoop(); - - /// If the current region is a range loop-based region, mark the start of the - /// loop construct. - void startOpenMPCXXRangeFor(); - - /// Check if the specified variable is used in 'private' clause. - /// \param Level Relative level of nested OpenMP construct for that the check - /// is performed. - OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, - unsigned CapLevel) const; - - /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) - /// for \p FD based on DSA for the provided corresponding captured declaration - /// \p D. - void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); - - /// Check if the specified variable is captured by 'target' directive. - /// \param Level Relative level of nested OpenMP construct for that the check - /// is performed. - bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, - unsigned CaptureLevel) const; - - /// Check if the specified global variable must be captured by outer capture - /// regions. - /// \param Level Relative level of nested OpenMP construct for that - /// the check is performed. - bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, - unsigned CaptureLevel) const; - - ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, - Expr *Op); - /// Called on start of new data sharing attribute block. - void StartOpenMPDSABlock(OpenMPDirectiveKind K, - const DeclarationNameInfo &DirName, Scope *CurScope, - SourceLocation Loc); - /// Start analysis of clauses. - void StartOpenMPClause(OpenMPClauseKind K); - /// End analysis of clauses. - void EndOpenMPClause(); - /// Called on end of data sharing attribute block. - void EndOpenMPDSABlock(Stmt *CurDirective); - - /// Check if the current region is an OpenMP loop region and if it is, - /// mark loop control variable, used in \p Init for loop initialization, as - /// private by default. - /// \param Init First part of the for loop. - void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); - - /// Called on well-formed '\#pragma omp metadirective' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPMetaDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - - // OpenMP directives and clauses. - /// Called on correct id-expression from the '#pragma omp - /// threadprivate'. - ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, - const DeclarationNameInfo &Id, - OpenMPDirectiveKind Kind); - /// Called on well-formed '#pragma omp threadprivate'. - DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(SourceLocation Loc, - ArrayRef VarList); - /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. - OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, - ArrayRef VarList); - /// Called on well-formed '#pragma omp allocate'. - DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, - ArrayRef VarList, - ArrayRef Clauses, - DeclContext *Owner = nullptr); - - /// Called on well-formed '#pragma omp [begin] assume[s]'. - void ActOnOpenMPAssumesDirective(SourceLocation Loc, - OpenMPDirectiveKind DKind, - ArrayRef Assumptions, - bool SkippedClauses); - - /// Check if there is an active global `omp begin assumes` directive. - bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } - - /// Check if there is an active global `omp assumes` directive. - bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } - - /// Called on well-formed '#pragma omp end assumes'. - void ActOnOpenMPEndAssumesDirective(); - - /// Called on well-formed '#pragma omp requires'. - DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, - ArrayRef ClauseList); - /// Check restrictions on Requires directive - OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, - ArrayRef Clauses); - /// Check if the specified type is allowed to be used in 'omp declare - /// reduction' construct. - QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, - TypeResult ParsedType); - /// Called on start of '#pragma omp declare reduction'. - DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( - Scope *S, DeclContext *DC, DeclarationName Name, - ArrayRef> ReductionTypes, - AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); - /// Initialize declare reduction construct initializer. - void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); - /// Finish current declare reduction construct initializer. - void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); - /// Initialize declare reduction construct initializer. - /// \return omp_priv variable. - VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); - /// Finish current declare reduction construct initializer. - void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, - VarDecl *OmpPrivParm); - /// Called at the end of '#pragma omp declare reduction'. - DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( - Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); - - /// Check variable declaration in 'omp declare mapper' construct. - TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); - /// Check if the specified type is allowed to be used in 'omp declare - /// mapper' construct. - QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, - TypeResult ParsedType); - /// Called on start of '#pragma omp declare mapper'. - DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( - Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, - SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, - Expr *MapperVarRef, ArrayRef Clauses, - Decl *PrevDeclInScope = nullptr); - /// Build the mapper variable of '#pragma omp declare mapper'. - ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, - QualType MapperType, - SourceLocation StartLoc, - DeclarationName VN); - void ActOnOpenMPIteratorVarDecl(VarDecl *VD); - bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; - const ValueDecl *getOpenMPDeclareMapperVarName() const; - - struct DeclareTargetContextInfo { - struct MapInfo { - OMPDeclareTargetDeclAttr::MapTypeTy MT; - SourceLocation Loc; - }; - /// Explicitly listed variables and functions in a 'to' or 'link' clause. - llvm::DenseMap ExplicitlyMapped; - - /// The 'device_type' as parsed from the clause. - OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; - - /// The directive kind, `begin declare target` or `declare target`. - OpenMPDirectiveKind Kind; - - /// The directive with indirect clause. - std::optional Indirect; - - /// The directive location. - SourceLocation Loc; - - DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) - : Kind(Kind), Loc(Loc) {} - }; - - /// Called on the start of target region i.e. '#pragma omp declare target'. - bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); - - /// Called at the end of target region i.e. '#pragma omp end declare target'. - const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); - - /// Called once a target context is completed, that can be when a - /// '#pragma omp end declare target' was encountered or when a - /// '#pragma omp declare target' without declaration-definition-seq was - /// encountered. - void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); - - /// Report unterminated 'omp declare target' or 'omp begin declare target' at - /// the end of a compilation unit. - void DiagnoseUnterminatedOpenMPDeclareTarget(); - - /// Searches for the provided declaration name for OpenMP declare target - /// directive. - NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, - CXXScopeSpec &ScopeSpec, - const DeclarationNameInfo &Id); - - /// Called on correct id-expression from the '#pragma omp declare target'. - void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, - OMPDeclareTargetDeclAttr::MapTypeTy MT, - DeclareTargetContextInfo &DTCI); - - /// Check declaration inside target region. - void - checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, - SourceLocation IdLoc = SourceLocation()); - - /// Adds OMPDeclareTargetDeclAttr to referenced variables in declare target - /// directive. - void ActOnOpenMPDeclareTargetInitializer(Decl *D); - - /// Finishes analysis of the deferred functions calls that may be declared as - /// host/nohost during device/host compilation. - void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, - const FunctionDecl *Callee, - SourceLocation Loc); - - /// Return true if currently in OpenMP task with untied clause context. - bool isInOpenMPTaskUntiedContext() const; - - /// Return true inside OpenMP declare target region. - bool isInOpenMPDeclareTargetContext() const { - return !DeclareTargetNesting.empty(); - } - /// Return true inside OpenMP target region. - bool isInOpenMPTargetExecutionDirective() const; - - /// Return the number of captured regions created for an OpenMP directive. - static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); - - /// Initialization of captured region for OpenMP region. - void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); - - /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to - /// an OpenMP loop directive. - StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); - - /// Process a canonical OpenMP loop nest that can either be a canonical - /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an - /// OpenMP loop transformation construct. - StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); - - /// End of OpenMP region. - /// - /// \param S Statement associated with the current OpenMP region. - /// \param Clauses List of clauses for the current OpenMP region. - /// - /// \returns Statement for finished OpenMP region. - StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef Clauses); - StmtResult ActOnOpenMPExecutableDirective( - OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, - OpenMPDirectiveKind CancelRegion, ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, - OpenMPDirectiveKind PrevMappedDirective = llvm::omp::OMPD_unknown); - /// Called on well-formed '\#pragma omp parallel' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPParallelDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - using VarsWithInheritedDSAType = - llvm::SmallDenseMap; - /// Called on well-formed '\#pragma omp simd' after parsing - /// of the associated statement. - StmtResult - ActOnOpenMPSimdDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '#pragma omp tile' after parsing of its clauses and - /// the associated statement. - StmtResult ActOnOpenMPTileDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '#pragma omp unroll' after parsing of its clauses - /// and the associated statement. - StmtResult ActOnOpenMPUnrollDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp for' after parsing - /// of the associated statement. - StmtResult - ActOnOpenMPForDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp for simd' after parsing - /// of the associated statement. - StmtResult - ActOnOpenMPForSimdDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp sections' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPSectionsDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp section' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp scope' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPScopeDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp single' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPSingleDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp master' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp critical' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, - ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp parallel for' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPParallelForDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel for simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelForSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel master' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp parallel masked' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMaskedDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp parallel sections' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp task' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPTaskDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp taskyield'. - StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp error'. - /// Error direcitive is allowed in both declared and excutable contexts. - /// Adding InExContext to identify which context is called from. - StmtResult ActOnOpenMPErrorDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - bool InExContext = true); - /// Called on well-formed '\#pragma omp barrier'. - StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp taskwait'. - StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp taskgroup'. - StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp flush'. - StmtResult ActOnOpenMPFlushDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp depobj'. - StmtResult ActOnOpenMPDepobjDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp scan'. - StmtResult ActOnOpenMPScanDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp ordered' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPOrderedDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp atomic' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPAtomicDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp target' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPTargetDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp target data' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPTargetDataDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp target enter data' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - Stmt *AStmt); - /// Called on well-formed '\#pragma omp target exit data' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - Stmt *AStmt); - /// Called on well-formed '\#pragma omp target parallel' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp target parallel for' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetParallelForDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp teams' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPTeamsDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp teams loop' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPTeamsGenericLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target teams loop' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel loop' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPParallelGenericLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target parallel loop' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPTargetParallelGenericLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp cancellation point'. - StmtResult - ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, - SourceLocation EndLoc, - OpenMPDirectiveKind CancelRegion); - /// Called on well-formed '\#pragma omp cancel'. - StmtResult ActOnOpenMPCancelDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - OpenMPDirectiveKind CancelRegion); - /// Called on well-formed '\#pragma omp taskloop' after parsing of the - /// associated statement. - StmtResult - ActOnOpenMPTaskLoopDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp taskloop simd' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPTaskLoopSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp master taskloop' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPMasterTaskLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel master taskloop' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel master taskloop simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp masked taskloop' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPMaskedTaskLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp masked taskloop simd' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPMaskedTaskLoopSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel masked taskloop' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMaskedTaskLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp parallel masked taskloop simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPParallelMaskedTaskLoopSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp distribute' after parsing - /// of the associated statement. - StmtResult - ActOnOpenMPDistributeDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target update'. - StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - Stmt *AStmt); - /// Called on well-formed '\#pragma omp distribute parallel for' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPDistributeParallelForDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp distribute parallel for simd' - /// after parsing of the associated statement. - StmtResult ActOnOpenMPDistributeParallelForSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp distribute simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPDistributeSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target parallel for simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetParallelForSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target simd' after parsing of - /// the associated statement. - StmtResult - ActOnOpenMPTargetSimdDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp teams distribute' after parsing of - /// the associated statement. - StmtResult ActOnOpenMPTeamsDistributeDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp teams distribute simd' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPTeamsDistributeSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp teams distribute parallel for simd' - /// after parsing of the associated statement. - StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp teams distribute parallel for' - /// after parsing of the associated statement. - StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target teams' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp target teams distribute' after parsing - /// of the associated statement. - StmtResult ActOnOpenMPTargetTeamsDistributeDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target teams distribute parallel for' - /// after parsing of the associated statement. - StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target teams distribute parallel for - /// simd' after parsing of the associated statement. - StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp target teams distribute simd' after - /// parsing of the associated statement. - StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - /// Called on well-formed '\#pragma omp interop'. - StmtResult ActOnOpenMPInteropDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp dispatch' after parsing of the - // /associated statement. - StmtResult ActOnOpenMPDispatchDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed '\#pragma omp masked' after parsing of the - // /associated statement. - StmtResult ActOnOpenMPMaskedDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed '\#pragma omp loop' after parsing of the - /// associated statement. - StmtResult ActOnOpenMPGenericLoopDirective( - ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); - - /// Checks correctness of linear modifiers. - bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, - SourceLocation LinLoc); - /// Checks that the specified declaration matches requirements for the linear - /// decls. - bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, - OpenMPLinearClauseKind LinKind, QualType Type, - bool IsDeclareSimd = false); - - /// Called on well-formed '\#pragma omp declare simd' after parsing of - /// the associated method/function. - DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( - DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, - Expr *Simdlen, ArrayRef Uniforms, ArrayRef Aligneds, - ArrayRef Alignments, ArrayRef Linears, - ArrayRef LinModifiers, ArrayRef Steps, SourceRange SR); - - /// Checks '\#pragma omp declare variant' variant function and original - /// functions after parsing of the associated method/function. - /// \param DG Function declaration to which declare variant directive is - /// applied to. - /// \param VariantRef Expression that references the variant function, which - /// must be used instead of the original one, specified in \p DG. - /// \param TI The trait info object representing the match clause. - /// \param NumAppendArgs The number of omp_interop_t arguments to account for - /// in checking. - /// \returns std::nullopt, if the function/variant function are not compatible - /// with the pragma, pair of original function/variant ref expression - /// otherwise. - std::optional> - checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, - OMPTraitInfo &TI, unsigned NumAppendArgs, - SourceRange SR); - - /// Called on well-formed '\#pragma omp declare variant' after parsing of - /// the associated method/function. - /// \param FD Function declaration to which declare variant directive is - /// applied to. - /// \param VariantRef Expression that references the variant function, which - /// must be used instead of the original one, specified in \p DG. - /// \param TI The context traits associated with the function variant. - /// \param AdjustArgsNothing The list of 'nothing' arguments. - /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. - /// \param AppendArgs The list of 'append_args' arguments. - /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. - /// \param AppendArgsLoc The Location of an 'append_args' clause. - /// \param SR The SourceRange of the 'declare variant' directive. - void ActOnOpenMPDeclareVariantDirective( - FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, - ArrayRef AdjustArgsNothing, - ArrayRef AdjustArgsNeedDevicePtr, - ArrayRef AppendArgs, SourceLocation AdjustArgsLoc, - SourceLocation AppendArgsLoc, SourceRange SR); - - OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'allocator' clause. - OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'if' clause. - OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, - Expr *Condition, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation NameModifierLoc, - SourceLocation ColonLoc, - SourceLocation EndLoc); - /// Called on well-formed 'final' clause. - OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'num_threads' clause. - OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'align' clause. - OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'safelen' clause. - OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'simdlen' clause. - OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-form 'sizes' clause. - OMPClause *ActOnOpenMPSizesClause(ArrayRef SizeExprs, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-form 'full' clauses. - OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-form 'partial' clauses. - OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'collapse' clause. - OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'ordered' clause. - OMPClause * - ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, - SourceLocation LParenLoc = SourceLocation(), - Expr *NumForLoops = nullptr); - /// Called on well-formed 'grainsize' clause. - OMPClause *ActOnOpenMPGrainsizeClause(OpenMPGrainsizeClauseModifier Modifier, - Expr *Size, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ModifierLoc, - SourceLocation EndLoc); - /// Called on well-formed 'num_tasks' clause. - OMPClause *ActOnOpenMPNumTasksClause(OpenMPNumTasksClauseModifier Modifier, - Expr *NumTasks, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ModifierLoc, - SourceLocation EndLoc); - /// Called on well-formed 'hint' clause. - OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'detach' clause. - OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, - SourceLocation ArgumentLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'when' clause. - OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'default' clause. - OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'proc_bind' clause. - OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'order' clause. - OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseModifier Modifier, - OpenMPOrderClauseKind Kind, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation MLoc, SourceLocation KindLoc, - SourceLocation EndLoc); - /// Called on well-formed 'update' clause. - OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - OMPClause *ActOnOpenMPSingleExprWithArgClause( - OpenMPClauseKind Kind, ArrayRef Arguments, Expr *Expr, - SourceLocation StartLoc, SourceLocation LParenLoc, - ArrayRef ArgumentsLoc, SourceLocation DelimLoc, - SourceLocation EndLoc); - /// Called on well-formed 'schedule' clause. - OMPClause *ActOnOpenMPScheduleClause( - OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, - OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, - SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, - SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); - - OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'nowait' clause. - OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'untied' clause. - OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'mergeable' clause. - OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'read' clause. - OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'write' clause. - OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'update' clause. - OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'capture' clause. - OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'compare' clause. - OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'fail' clause. - OMPClause *ActOnOpenMPFailClause(SourceLocation StartLoc, - SourceLocation EndLoc); - OMPClause *ActOnOpenMPFailClause(OpenMPClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'seq_cst' clause. - OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'acq_rel' clause. - OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'acquire' clause. - OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'release' clause. - OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'relaxed' clause. - OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'weak' clause. - OMPClause *ActOnOpenMPWeakClause(SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'init' clause. - OMPClause * - ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo, - SourceLocation StartLoc, SourceLocation LParenLoc, - SourceLocation VarLoc, SourceLocation EndLoc); - - /// Called on well-formed 'use' clause. - OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation VarLoc, SourceLocation EndLoc); - - /// Called on well-formed 'destroy' clause. - OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation VarLoc, - SourceLocation EndLoc); - /// Called on well-formed 'novariants' clause. - OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'nocontext' clause. - OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'filter' clause. - OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'threads' clause. - OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'simd' clause. - OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'nogroup' clause. - OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, - SourceLocation EndLoc); - /// Called on well-formed 'unified_address' clause. - OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'unified_address' clause. - OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'reverse_offload' clause. - OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'dynamic_allocators' clause. - OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'atomic_default_mem_order' clause. - OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( - OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, - SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); - - /// Called on well-formed 'at' clause. - OMPClause *ActOnOpenMPAtClause(OpenMPAtClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'severity' clause. - OMPClause *ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'message' clause. - /// passing string for message. - OMPClause *ActOnOpenMPMessageClause(Expr *MS, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Data used for processing a list of variables in OpenMP clauses. - struct OpenMPVarListDataTy final { - Expr *DepModOrTailExpr = nullptr; - Expr *IteratorExpr = nullptr; - SourceLocation ColonLoc; - SourceLocation RLoc; - CXXScopeSpec ReductionOrMapperIdScopeSpec; - DeclarationNameInfo ReductionOrMapperId; - int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or - ///< lastprivate clause. - SmallVector - MapTypeModifiers; - SmallVector - MapTypeModifiersLoc; - SmallVector - MotionModifiers; - SmallVector MotionModifiersLoc; - bool IsMapTypeImplicit = false; - SourceLocation ExtraModifierLoc; - SourceLocation OmpAllMemoryLoc; - SourceLocation - StepModifierLoc; /// 'step' modifier location for linear clause - }; - - OMPClause *ActOnOpenMPVarListClause(OpenMPClauseKind Kind, - ArrayRef Vars, - const OMPVarListLocTy &Locs, - OpenMPVarListDataTy &Data); - /// Called on well-formed 'inclusive' clause. - OMPClause *ActOnOpenMPInclusiveClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'exclusive' clause. - OMPClause *ActOnOpenMPExclusiveClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'allocate' clause. - OMPClause * - ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef VarList, - SourceLocation StartLoc, SourceLocation ColonLoc, - SourceLocation LParenLoc, SourceLocation EndLoc); - /// Called on well-formed 'private' clause. - OMPClause *ActOnOpenMPPrivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'firstprivate' clause. - OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'lastprivate' clause. - OMPClause *ActOnOpenMPLastprivateClause( - ArrayRef VarList, OpenMPLastprivateModifier LPKind, - SourceLocation LPKindLoc, SourceLocation ColonLoc, - SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); - /// Called on well-formed 'shared' clause. - OMPClause *ActOnOpenMPSharedClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'reduction' clause. - OMPClause *ActOnOpenMPReductionClause( - ArrayRef VarList, OpenMPReductionClauseModifier Modifier, - SourceLocation StartLoc, SourceLocation LParenLoc, - SourceLocation ModifierLoc, SourceLocation ColonLoc, - SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, - const DeclarationNameInfo &ReductionId, - ArrayRef UnresolvedReductions = std::nullopt); - /// Called on well-formed 'task_reduction' clause. - OMPClause *ActOnOpenMPTaskReductionClause( - ArrayRef VarList, SourceLocation StartLoc, - SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, - CXXScopeSpec &ReductionIdScopeSpec, - const DeclarationNameInfo &ReductionId, - ArrayRef UnresolvedReductions = std::nullopt); - /// Called on well-formed 'in_reduction' clause. - OMPClause *ActOnOpenMPInReductionClause( - ArrayRef VarList, SourceLocation StartLoc, - SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, - CXXScopeSpec &ReductionIdScopeSpec, - const DeclarationNameInfo &ReductionId, - ArrayRef UnresolvedReductions = std::nullopt); - /// Called on well-formed 'linear' clause. - OMPClause *ActOnOpenMPLinearClause( - ArrayRef VarList, Expr *Step, SourceLocation StartLoc, - SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, - SourceLocation LinLoc, SourceLocation ColonLoc, - SourceLocation StepModifierLoc, SourceLocation EndLoc); - /// Called on well-formed 'aligned' clause. - OMPClause *ActOnOpenMPAlignedClause(ArrayRef VarList, Expr *Alignment, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ColonLoc, - SourceLocation EndLoc); - /// Called on well-formed 'copyin' clause. - OMPClause *ActOnOpenMPCopyinClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'copyprivate' clause. - OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'flush' pseudo clause. - OMPClause *ActOnOpenMPFlushClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'depobj' pseudo clause. - OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'depend' clause. - OMPClause *ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, - Expr *DepModifier, - ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'device' clause. - OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, - Expr *Device, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ModifierLoc, - SourceLocation EndLoc); - /// Called on well-formed 'map' clause. - OMPClause *ActOnOpenMPMapClause( - Expr *IteratorModifier, ArrayRef MapTypeModifiers, - ArrayRef MapTypeModifiersLoc, - CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, - OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, - SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef VarList, - const OMPVarListLocTy &Locs, bool NoDiagnose = false, - ArrayRef UnresolvedMappers = std::nullopt); - /// Called on well-formed 'num_teams' clause. - OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'thread_limit' clause. - OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'priority' clause. - OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - /// Called on well-formed 'dist_schedule' clause. - OMPClause *ActOnOpenMPDistScheduleClause( - OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, - SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, - SourceLocation CommaLoc, SourceLocation EndLoc); - /// Called on well-formed 'defaultmap' clause. - OMPClause *ActOnOpenMPDefaultmapClause( - OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, - SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, - SourceLocation KindLoc, SourceLocation EndLoc); - /// Called on well-formed 'to' clause. - OMPClause * - ActOnOpenMPToClause(ArrayRef MotionModifiers, - ArrayRef MotionModifiersLoc, - CXXScopeSpec &MapperIdScopeSpec, - DeclarationNameInfo &MapperId, SourceLocation ColonLoc, - ArrayRef VarList, const OMPVarListLocTy &Locs, - ArrayRef UnresolvedMappers = std::nullopt); - /// Called on well-formed 'from' clause. - OMPClause * - ActOnOpenMPFromClause(ArrayRef MotionModifiers, - ArrayRef MotionModifiersLoc, - CXXScopeSpec &MapperIdScopeSpec, - DeclarationNameInfo &MapperId, SourceLocation ColonLoc, - ArrayRef VarList, const OMPVarListLocTy &Locs, - ArrayRef UnresolvedMappers = std::nullopt); - /// Called on well-formed 'use_device_ptr' clause. - OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs); - /// Called on well-formed 'use_device_addr' clause. - OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs); - /// Called on well-formed 'is_device_ptr' clause. - OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs); - /// Called on well-formed 'has_device_addr' clause. - OMPClause *ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs); - /// Called on well-formed 'nontemporal' clause. - OMPClause *ActOnOpenMPNontemporalClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Data for list of allocators. - struct UsesAllocatorsData { - /// Allocator. - Expr *Allocator = nullptr; - /// Allocator traits. - Expr *AllocatorTraits = nullptr; - /// Locations of '(' and ')' symbols. - SourceLocation LParenLoc, RParenLoc; - }; - /// Called on well-formed 'uses_allocators' clause. - OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc, - ArrayRef Data); - /// Called on well-formed 'affinity' clause. - OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ColonLoc, - SourceLocation EndLoc, Expr *Modifier, - ArrayRef Locators); - /// Called on a well-formed 'bind' clause. - OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on a well-formed 'ompx_dyn_cgroup_mem' clause. - OMPClause *ActOnOpenMPXDynCGroupMemClause(Expr *Size, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on well-formed 'doacross' clause. - OMPClause * - ActOnOpenMPDoacrossClause(OpenMPDoacrossClauseModifier DepType, - SourceLocation DepLoc, SourceLocation ColonLoc, - ArrayRef VarList, SourceLocation StartLoc, - SourceLocation LParenLoc, SourceLocation EndLoc); - - /// Called on a well-formed 'ompx_attribute' clause. - OMPClause *ActOnOpenMPXAttributeClause(ArrayRef Attrs, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc); - - /// Called on a well-formed 'ompx_bare' clause. - OMPClause *ActOnOpenMPXBareClause(SourceLocation StartLoc, - SourceLocation EndLoc); - -private: - void *VarDataSharingAttributesStack; - - /// Number of nested '#pragma omp declare target' directives. - SmallVector DeclareTargetNesting; - - /// Initialization of data-sharing attributes stack. - void InitDataSharingAttributesStack(); - void DestroyDataSharingAttributesStack(); - - /// Returns OpenMP nesting level for current directive. - unsigned getOpenMPNestingLevel() const; - - /// Adjusts the function scopes index for the target-based regions. - void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, - unsigned Level) const; - - /// Returns the number of scopes associated with the construct on the given - /// OpenMP level. - int getNumberOfConstructScopes(unsigned Level) const; - - /// Push new OpenMP function region for non-capturing function. - void pushOpenMPFunctionRegion(); - - /// Pop OpenMP function region for non-capturing function. - void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); - - /// Analyzes and checks a loop nest for use by a loop transformation. - /// - /// \param Kind The loop transformation directive kind. - /// \param NumLoops How many nested loops the directive is expecting. - /// \param AStmt Associated statement of the transformation directive. - /// \param LoopHelpers [out] The loop analysis result. - /// \param Body [out] The body code nested in \p NumLoops loop. - /// \param OriginalInits [out] Collection of statements and declarations that - /// must have been executed/declared before entering the - /// loop. - /// - /// \return Whether there was any error. - bool checkTransformableLoopNest( - OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, - SmallVectorImpl &LoopHelpers, - Stmt *&Body, - SmallVectorImpl, 0>> - &OriginalInits); - - /// Helper to keep information about the current `omp begin/end declare - /// variant` nesting. - struct OMPDeclareVariantScope { - /// The associated OpenMP context selector. - OMPTraitInfo *TI; - - /// The associated OpenMP context selector mangling. - std::string NameSuffix; - - OMPDeclareVariantScope(OMPTraitInfo &TI); - }; - - /// Return the OMPTraitInfo for the surrounding scope, if any. - OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { - return OMPDeclareVariantScopes.empty() ? nullptr - : OMPDeclareVariantScopes.back().TI; - } - - /// The current `omp begin/end declare variant` scopes. - SmallVector OMPDeclareVariantScopes; - - /// The current `omp begin/end assumes` scopes. - SmallVector OMPAssumeScoped; - - /// All `omp assumes` we encountered so far. - SmallVector OMPAssumeGlobal; - - /// OMPD_loop is mapped to OMPD_for, OMPD_distribute or OMPD_simd depending - /// on the parameter of the bind clause. In the methods for the - /// mapped directives, check the parameters of the lastprivate clause. - bool checkLastPrivateForMappedDirectives(ArrayRef Clauses); - /// Depending on the bind clause of OMPD_loop map the directive to new - /// directives. - /// 1) loop bind(parallel) --> OMPD_for - /// 2) loop bind(teams) --> OMPD_distribute - /// 3) loop bind(thread) --> OMPD_simd - /// This is being handled in Sema instead of Codegen because of the need for - /// rigorous semantic checking in the new mapped directives. - bool mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, - ArrayRef Clauses, - OpenMPBindClauseKind &BindKind, - OpenMPDirectiveKind &Kind, - OpenMPDirectiveKind &PrevMappedDirective, - SourceLocation StartLoc, SourceLocation EndLoc, - const DeclarationNameInfo &DirName, - OpenMPDirectiveKind CancelRegion); - - ///@} - // - // - // ------------------------------------------------------------------------- - // - // - - /// \name SYCL Constructs - /// Implementations are in SemaSYCL.cpp - ///@{ - -private: - - void CheckSYCLKernelCall(FunctionDecl *CallerFunc, - ArrayRef Args); - - // We store SYCL Kernels here and handle separately -- which is a hack. - // FIXME: It would be best to refactor this. - llvm::SetVector SyclDeviceDecls; - // SYCL integration header instance for current compilation unit this Sema - // is associated with. - std::unique_ptr SyclIntHeader; - std::unique_ptr SyclIntFooter; - - // We need to store the list of the sycl_kernel functions and their associated - // generated OpenCL Kernels so we can go back and re-name these after the - // fact. - llvm::SmallVector> - SyclKernelsToOpenCLKernels; - - // Used to suppress diagnostics during kernel construction, since these were - // already emitted earlier. Diagnosing during Kernel emissions also skips the - // useful notes that shows where the kernel was called. - bool DiagnosingSYCLKernel = false; - -public: - void addSyclOpenCLKernel(const FunctionDecl *SyclKernel, - FunctionDecl *OpenCLKernel) { - SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel); - } - - void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); } - llvm::SetVector &syclDeviceDecls() { return SyclDeviceDecls; } - - /// Lazily creates and returns SYCL integration header instance. - SYCLIntegrationHeader &getSyclIntegrationHeader() { - if (SyclIntHeader == nullptr) - SyclIntHeader = std::make_unique(*this); - return *SyclIntHeader.get(); - } - - SYCLIntegrationFooter &getSyclIntegrationFooter() { - if (SyclIntFooter == nullptr) - SyclIntFooter = std::make_unique(*this); - return *SyclIntFooter.get(); - } - - void addSyclVarDecl(VarDecl *VD) { - if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty()) - getSyclIntegrationFooter().addVarDecl(VD); - } - - enum SYCLRestrictKind { - KernelGlobalVariable, - KernelRTTI, - KernelNonConstStaticDataVariable, - KernelCallVirtualFunction, - KernelUseExceptions, - KernelCallRecursiveFunction, - KernelCallFunctionPointer, - KernelAllocateStorage, - KernelUseAssembly, - KernelCallDllimportFunction, - KernelCallVariadicFunction, - KernelCallUndefinedFunction, - KernelConstStaticVariable - }; - - bool isDeclAllowedInSYCLDeviceCode(const Decl *D); - void checkSYCLDeviceVarDecl(VarDecl *Var); - void copySYCLKernelAttrs(CXXMethodDecl *CallOperator); - void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC); - void SetSYCLKernelNames(); - void MarkDevices(); - - /// Get the number of fields or captures within the parsed type. - ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT); - ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc, - QualType SourceTy); - - /// Get a value based on the type of the given field number so that callers - /// can wrap it in a decltype() to get the actual type of the field. - ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx); - ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, - QualType SourceTy, Expr *Idx); - - /// Get the number of base classes within the parsed type. - ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT); - ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc, - QualType SourceTy); - - /// Get a value based on the type of the given base number so that callers - /// can wrap it in a decltype() to get the actual type of the base class. - ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx); - ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy, - Expr *Idx); - - bool checkAllowedSYCLInitializer(VarDecl *VD); - - /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current - /// context is "used as device code". - /// - /// - If CurLexicalContext is a kernel function or it is known that the - /// function will be emitted for the device, emits the diagnostics - /// immediately. - /// - If CurLexicalContext is a function and we are compiling - /// for the device, but we don't know that this function will be codegen'ed - /// for device yet, creates a diagnostic which is emitted if and when we - /// realize that the function will be codegen'ed. - /// - /// Example usage: - /// - /// Diagnose __float128 type usage only from SYCL device code if the current - /// target doesn't support it - /// if (!S.Context.getTargetInfo().hasFloat128Type() && - /// S.getLangOpts().SYCLIsDevice) - /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; - SemaDiagnosticBuilder SYCLDiagIfDeviceCode( - SourceLocation Loc, unsigned DiagID, - DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl | - DeviceDiagnosticReason::Esimd); - - void deepTypeCheckForSYCLDevice(SourceLocation UsedAt, - llvm::DenseSet Visited, - ValueDecl *DeclToCheck); - - /// Finishes analysis of the deferred functions calls that may be not - /// properly declared for device compilation. - void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller, - const FunctionDecl *Callee, - SourceLocation Loc, - DeviceDiagnosticReason Reason); - - /// Tells whether given variable is a SYCL explicit SIMD extension's "private - /// global" variable - global variable in the private address space. - bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) { - return getLangOpts().SYCLIsDevice && VDecl->hasAttr() && - VDecl->hasGlobalStorage() && - (VDecl->getType().getAddressSpace() == LangAS::sycl_private); - } - - template - static bool isTypeDecoratedWithDeclAttribute(QualType Ty) { - const CXXRecordDecl *RecTy = Ty->getAsCXXRecordDecl(); - if (!RecTy) - return false; - - if (RecTy->hasAttr()) - return true; - - if (auto *CTSD = dyn_cast(RecTy)) { - ClassTemplateDecl *Template = CTSD->getSpecializedTemplate(); - if (CXXRecordDecl *RD = Template->getTemplatedDecl()) - return RD->hasAttr(); - } - return false; - } - - /// Check whether \p Ty corresponds to a SYCL type of name \p TypeName. - static bool isSyclType(QualType Ty, SYCLTypeAttr::SYCLType TypeName); - - ///@} }; DeductionFailureInfo @@ -15105,32 +13037,4 @@ std::unique_ptr CreateRISCVIntrinsicManager(Sema &S); } // end namespace clang -namespace llvm { -// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its -// SourceLocation. -template <> struct DenseMapInfo { - using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; - using FDBaseInfo = - DenseMapInfo>; - - static FunctionDeclAndLoc getEmptyKey() { - return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; - } - - static FunctionDeclAndLoc getTombstoneKey() { - return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; - } - - static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { - return hash_combine(FDBaseInfo::getHashValue(FDL.FD), - FDL.Loc.getHashValue()); - } - - static bool isEqual(const FunctionDeclAndLoc &LHS, - const FunctionDeclAndLoc &RHS) { - return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; - } -}; -} // namespace llvm - #endif diff --git a/clang/include/clang/Sema/SemaBase.h b/clang/include/clang/Sema/SemaBase.h index b62e39998228a..06850afce4247 100644 --- a/clang/include/clang/Sema/SemaBase.h +++ b/clang/include/clang/Sema/SemaBase.h @@ -211,7 +211,7 @@ class SemaBase { /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// - /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably + /// But see DiagIfDeviceCode() and DiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } diff --git a/clang/include/clang/Sema/SemaCUDA.h b/clang/include/clang/Sema/SemaCUDA.h new file mode 100644 index 0000000000000..63dc3f4da240b --- /dev/null +++ b/clang/include/clang/Sema/SemaCUDA.h @@ -0,0 +1,304 @@ +//===----- SemaCUDA.h ----- Semantic Analysis for CUDA constructs ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file declares semantic analysis for CUDA constructs. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_SEMACUDA_H +#define LLVM_CLANG_SEMA_SEMACUDA_H + +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Redeclarable.h" +#include "clang/Basic/Cuda.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Sema/Lookup.h" +#include "clang/Sema/Ownership.h" +#include "clang/Sema/ParsedAttr.h" +#include "clang/Sema/Scope.h" +#include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaBase.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include + +namespace clang { + +enum class CUDAFunctionTarget; + +class SemaCUDA : public SemaBase { +public: + SemaCUDA(Sema &S); + + /// Increments our count of the number of times we've seen a pragma forcing + /// functions to be __host__ __device__. So long as this count is greater + /// than zero, all functions encountered will be __host__ __device__. + void PushForceHostDevice(); + + /// Decrements our count of the number of times we've seen a pragma forcing + /// functions to be __host__ __device__. Returns false if the count is 0 + /// before incrementing, so you can emit an error. + bool PopForceHostDevice(); + + ExprResult ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc, + MultiExprArg ExecConfig, + SourceLocation GGGLoc); + + /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the + /// key in a hashtable, both the FD and location are hashed. + struct FunctionDeclAndLoc { + CanonicalDeclPtr FD; + SourceLocation Loc; + }; + + /// FunctionDecls and SourceLocations for which CheckCall has emitted a + /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the + /// same deferred diag twice. + llvm::DenseSet LocsWithCUDACallDiags; + + /// An inverse call graph, mapping known-emitted functions to one of their + /// known-emitted callers (plus the location of the call). + /// + /// Functions that we can tell a priori must be emitted aren't added to this + /// map. + llvm::DenseMap, + /* Caller = */ FunctionDeclAndLoc> + DeviceKnownEmittedFns; + + /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current + /// context is "used as device code". + /// + /// - If CurContext is a __host__ function, does not emit any diagnostics + /// unless \p EmitOnBothSides is true. + /// - If CurContext is a __device__ or __global__ function, emits the + /// diagnostics immediately. + /// - If CurContext is a __host__ __device__ function and we are compiling for + /// the device, creates a diagnostic which is emitted if and when we realize + /// that the function will be codegen'ed. + /// + /// Example usage: + /// + /// // Variable-length arrays are not allowed in CUDA device code. + /// if (DiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentTarget()) + /// return ExprError(); + /// // Otherwise, continue parsing as normal. + SemaDiagnosticBuilder DiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); + + /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current + /// context is "used as host code". + /// + /// Same as DiagIfDeviceCode, with "host" and "device" switched. + SemaDiagnosticBuilder DiagIfHostCode(SourceLocation Loc, unsigned DiagID); + + /// Determines whether the given function is a CUDA device/host/kernel/etc. + /// function. + /// + /// Use this rather than examining the function's attributes yourself -- you + /// will get it wrong. Returns CUDAFunctionTarget::Host if D is null. + CUDAFunctionTarget IdentifyTarget(const FunctionDecl *D, + bool IgnoreImplicitHDAttr = false); + CUDAFunctionTarget IdentifyTarget(const ParsedAttributesView &Attrs); + + enum CUDAVariableTarget { + CVT_Device, /// Emitted on device side with a shadow variable on host side + CVT_Host, /// Emitted on host side only + CVT_Both, /// Emitted on both sides with different addresses + CVT_Unified, /// Emitted as a unified address, e.g. managed variables + }; + /// Determines whether the given variable is emitted on host or device side. + CUDAVariableTarget IdentifyTarget(const VarDecl *D); + + /// Defines kinds of CUDA global host/device context where a function may be + /// called. + enum CUDATargetContextKind { + CTCK_Unknown, /// Unknown context + CTCK_InitGlobalVar, /// Function called during global variable + /// initialization + }; + + /// Define the current global CUDA host/device context where a function may be + /// called. Only used when a function is called outside of any functions. + struct CUDATargetContext { + CUDAFunctionTarget Target = CUDAFunctionTarget::HostDevice; + CUDATargetContextKind Kind = CTCK_Unknown; + Decl *D = nullptr; + } CurCUDATargetCtx; + + struct CUDATargetContextRAII { + SemaCUDA &S; + SemaCUDA::CUDATargetContext SavedCtx; + CUDATargetContextRAII(SemaCUDA &S_, SemaCUDA::CUDATargetContextKind K, + Decl *D); + ~CUDATargetContextRAII() { S.CurCUDATargetCtx = SavedCtx; } + }; + + /// Gets the CUDA target for the current context. + CUDAFunctionTarget CurrentTarget() { + return IdentifyTarget(dyn_cast(SemaRef.CurContext)); + } + + static bool isImplicitHostDeviceFunction(const FunctionDecl *D); + + // CUDA function call preference. Must be ordered numerically from + // worst to best. + enum CUDAFunctionPreference { + CFP_Never, // Invalid caller/callee combination. + CFP_WrongSide, // Calls from host-device to host or device + // function that do not match current compilation + // mode. + CFP_HostDevice, // Any calls to host/device functions. + CFP_SameSide, // Calls from host-device to host or device + // function matching current compilation mode. + CFP_Native, // host-to-host or device-to-device calls. + }; + + /// Identifies relative preference of a given Caller/Callee + /// combination, based on their host/device attributes. + /// \param Caller function which needs address of \p Callee. + /// nullptr in case of global context. + /// \param Callee target function + /// + /// \returns preference value for particular Caller/Callee combination. + CUDAFunctionPreference IdentifyPreference(const FunctionDecl *Caller, + const FunctionDecl *Callee); + + /// Determines whether Caller may invoke Callee, based on their CUDA + /// host/device attributes. Returns false if the call is not allowed. + /// + /// Note: Will return true for CFP_WrongSide calls. These may appear in + /// semantically correct CUDA programs, but only if they're never codegen'ed. + bool IsAllowedCall(const FunctionDecl *Caller, const FunctionDecl *Callee) { + return IdentifyPreference(Caller, Callee) != CFP_Never; + } + + /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, + /// depending on FD and the current compilation settings. + void maybeAddHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); + + /// May add implicit CUDAConstantAttr attribute to VD, depending on VD + /// and current compilation settings. + void MaybeAddConstantAttr(VarDecl *VD); + + /// Check whether we're allowed to call Callee from the current context. + /// + /// - If the call is never allowed in a semantically-correct program + /// (CFP_Never), emits an error and returns false. + /// + /// - If the call is allowed in semantically-correct programs, but only if + /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to + /// be emitted if and when the caller is codegen'ed, and returns true. + /// + /// Will only create deferred diagnostics for a given SourceLocation once, + /// so you can safely call this multiple times without generating duplicate + /// deferred errors. + /// + /// - Otherwise, returns true without emitting any diagnostics. + bool CheckCall(SourceLocation Loc, FunctionDecl *Callee); + + void CheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); + + /// Set __device__ or __host__ __device__ attributes on the given lambda + /// operator() method. + /// + /// CUDA lambdas by default is host device function unless it has explicit + /// host or device attribute. + void SetLambdaAttrs(CXXMethodDecl *Method); + + /// Record \p FD if it is a CUDA/HIP implicit host device function used on + /// device side in device compilation. + void RecordImplicitHostDeviceFuncUsedByDevice(const FunctionDecl *FD); + + /// Finds a function in \p Matches with highest calling priority + /// from \p Caller context and erases all functions with lower + /// calling priority. + void EraseUnwantedMatches( + const FunctionDecl *Caller, + llvm::SmallVectorImpl> + &Matches); + + /// Given a implicit special member, infer its CUDA target from the + /// calls it needs to make to underlying base/field special members. + /// \param ClassDecl the class for which the member is being created. + /// \param CSM the kind of special member. + /// \param MemberDecl the special member itself. + /// \param ConstRHS true if this is a copy operation with a const object on + /// its RHS. + /// \param Diagnose true if this call should emit diagnostics. + /// \return true if there was an error inferring. + /// The result of this call is implicit CUDA target attribute(s) attached to + /// the member declaration. + bool inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, + CXXSpecialMemberKind CSM, + CXXMethodDecl *MemberDecl, + bool ConstRHS, bool Diagnose); + + /// \return true if \p CD can be considered empty according to CUDA + /// (E.2.3.1 in CUDA 7.5 Programming guide). + bool isEmptyConstructor(SourceLocation Loc, CXXConstructorDecl *CD); + bool isEmptyDestructor(SourceLocation Loc, CXXDestructorDecl *CD); + + // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In + // case of error emits appropriate diagnostic and invalidates \p Var. + // + // \details CUDA allows only empty constructors as initializers for global + // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all + // __shared__ variables whether they are local or not (they all are implicitly + // static in CUDA). One exception is that CUDA allows constant initializers + // for __constant__ and __device__ variables. + void checkAllowedInitializer(VarDecl *VD); + + /// Check whether NewFD is a valid overload for CUDA. Emits + /// diagnostics and invalidates NewFD if not. + void checkTargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); + /// Copies target attributes from the template TD to the function FD. + void inheritTargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); + + /// Returns the name of the launch configuration function. This is the name + /// of the function that will be called to configure kernel call, with the + /// parameters specified via <<<>>>. + std::string getConfigureFuncName() const; + +private: + unsigned ForceHostDeviceDepth = 0; + + friend class ASTReader; + friend class ASTWriter; +}; + +} // namespace clang + +namespace llvm { +// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its +// SourceLocation. +template <> struct DenseMapInfo { + using FunctionDeclAndLoc = clang::SemaCUDA::FunctionDeclAndLoc; + using FDBaseInfo = + DenseMapInfo>; + + static FunctionDeclAndLoc getEmptyKey() { + return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; + } + + static FunctionDeclAndLoc getTombstoneKey() { + return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; + } + + static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { + return hash_combine(FDBaseInfo::getHashValue(FDL.FD), + FDL.Loc.getHashValue()); + } + + static bool isEqual(const FunctionDeclAndLoc &LHS, + const FunctionDeclAndLoc &RHS) { + return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; + } +}; +} // namespace llvm + +#endif // LLVM_CLANG_SEMA_SEMACUDA_H diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h index acc675963c23a..34acaf19517f2 100644 --- a/clang/include/clang/Sema/SemaHLSL.h +++ b/clang/include/clang/Sema/SemaHLSL.h @@ -13,12 +13,16 @@ #ifndef LLVM_CLANG_SEMA_SEMAHLSL_H #define LLVM_CLANG_SEMA_SEMAHLSL_H +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" #include "clang/AST/Expr.h" +#include "clang/Basic/AttributeCommonInfo.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/SourceLocation.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaBase.h" +#include namespace clang { @@ -26,10 +30,25 @@ class SemaHLSL : public SemaBase { public: SemaHLSL(Sema &S); - Decl *ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer, - SourceLocation KwLoc, IdentifierInfo *Ident, - SourceLocation IdentLoc, SourceLocation LBrace); - void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); + Decl *ActOnStartBuffer(Scope *BufferScope, bool CBuffer, SourceLocation KwLoc, + IdentifierInfo *Ident, SourceLocation IdentLoc, + SourceLocation LBrace); + void ActOnFinishBuffer(Decl *Dcl, SourceLocation RBrace); + HLSLNumThreadsAttr *mergeNumThreadsAttr(Decl *D, + const AttributeCommonInfo &AL, int X, + int Y, int Z); + HLSLShaderAttr *mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL, + HLSLShaderAttr::ShaderType ShaderType); + HLSLParamModifierAttr * + mergeParamModifierAttr(Decl *D, const AttributeCommonInfo &AL, + HLSLParamModifierAttr::Spelling Spelling); + void ActOnTopLevelFunction(FunctionDecl *FD); + void CheckEntryPoint(FunctionDecl *FD); + void CheckSemanticAnnotation(FunctionDecl *EntryPoint, const Decl *Param, + const HLSLAnnotationAttr *AnnotationAttr); + void DiagnoseAttrStageMismatch( + const Attr *A, HLSLShaderAttr::ShaderType Stage, + std::initializer_list AllowedStages); }; } // namespace clang diff --git a/clang/include/clang/Sema/SemaOpenACC.h b/clang/include/clang/Sema/SemaOpenACC.h index 45929e4a9db3f..329dc3945fa2a 100644 --- a/clang/include/clang/Sema/SemaOpenACC.h +++ b/clang/include/clang/Sema/SemaOpenACC.h @@ -19,6 +19,7 @@ #include "clang/Basic/SourceLocation.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/SemaBase.h" +#include namespace clang { class OpenACCClause; @@ -35,7 +36,16 @@ class SemaOpenACC : public SemaBase { SourceRange ClauseRange; SourceLocation LParenLoc; - // TODO OpenACC: Add variant here to store details of individual clauses. + struct DefaultDetails { + OpenACCDefaultClauseKind DefaultClauseKind; + }; + + struct ConditionDetails { + Expr *ConditionExpr; + }; + + std::variant Details = + std::monostate{}; public: OpenACCParsedClause(OpenACCDirectiveKind DirKind, @@ -52,8 +62,53 @@ class SemaOpenACC : public SemaBase { SourceLocation getEndLoc() const { return ClauseRange.getEnd(); } + OpenACCDefaultClauseKind getDefaultClauseKind() const { + assert(ClauseKind == OpenACCClauseKind::Default && + "Parsed clause is not a default clause"); + return std::get(Details).DefaultClauseKind; + } + + const Expr *getConditionExpr() const { + return const_cast(this)->getConditionExpr(); + } + + Expr *getConditionExpr() { + assert((ClauseKind == OpenACCClauseKind::If || + (ClauseKind == OpenACCClauseKind::Self && + DirKind != OpenACCDirectiveKind::Update)) && + "Parsed clause kind does not have a condition expr"); + + // 'self' has an optional ConditionExpr, so be tolerant of that. This will + // assert in variant otherwise. + if (ClauseKind == OpenACCClauseKind::Self && + std::holds_alternative(Details)) + return nullptr; + + return std::get(Details).ConditionExpr; + } + void setLParenLoc(SourceLocation EndLoc) { LParenLoc = EndLoc; } void setEndLoc(SourceLocation EndLoc) { ClauseRange.setEnd(EndLoc); } + + void setDefaultDetails(OpenACCDefaultClauseKind DefKind) { + assert(ClauseKind == OpenACCClauseKind::Default && + "Parsed clause is not a default clause"); + Details = DefaultDetails{DefKind}; + } + + void setConditionDetails(Expr *ConditionExpr) { + assert((ClauseKind == OpenACCClauseKind::If || + (ClauseKind == OpenACCClauseKind::Self && + DirKind != OpenACCDirectiveKind::Update)) && + "Parsed clause kind does not have a condition expr"); + // In C++ we can count on this being a 'bool', but in C this gets left as + // some sort of scalar that codegen will have to take care of converting. + assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() || + ConditionExpr->getType()->isScalarType()) && + "Condition expression type not scalar/dependent"); + + Details = ConditionDetails{ConditionExpr}; + } }; SemaOpenACC(Sema &S); diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h new file mode 100644 index 0000000000000..9927459bbc594 --- /dev/null +++ b/clang/include/clang/Sema/SemaOpenMP.h @@ -0,0 +1,1447 @@ +//===----- SemaOpenMP.h -- Semantic Analysis for OpenMP constructs -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file declares semantic analysis for OpenMP constructs and +/// clauses. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_SEMAOPENMP_H +#define LLVM_CLANG_SEMA_SEMAOPENMP_H + +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclOpenMP.h" +#include "clang/AST/DeclarationName.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprOpenMP.h" +#include "clang/AST/OpenMPClause.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/StmtOpenMP.h" +#include "clang/AST/Type.h" +#include "clang/Basic/IdentifierTable.h" +#include "clang/Basic/LLVM.h" +#include "clang/Basic/OpenMPKinds.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/Specifiers.h" +#include "clang/Sema/DeclSpec.h" +#include "clang/Sema/Ownership.h" +#include "clang/Sema/Scope.h" +#include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaBase.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PointerUnion.h" +#include +#include +#include + +namespace clang { + +class SemaOpenMP : public SemaBase { +public: + SemaOpenMP(Sema &S); + + friend class Parser; + friend class Sema; + + using DeclGroupPtrTy = OpaquePtr; + using CapturedParamNameType = std::pair; + + /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current + /// context is "used as device code". + /// + /// - If CurContext is a `declare target` function or it is known that the + /// function is emitted for the device, emits the diagnostics immediately. + /// - If CurContext is a non-`declare target` function and we are compiling + /// for the device, creates a diagnostic which is emitted if and when we + /// realize that the function will be codegen'ed. + /// + /// Example usage: + /// + /// // Variable-length arrays are not allowed in NVPTX device code. + /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) + /// return ExprError(); + /// // Otherwise, continue parsing as normal. + SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, + unsigned DiagID, + const FunctionDecl *FD); + + /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current + /// context is "used as host code". + /// + /// - If CurContext is a `declare target` function or it is known that the + /// function is emitted for the host, emits the diagnostics immediately. + /// - If CurContext is a non-host function, just ignore it. + /// + /// Example usage: + /// + /// // Variable-length arrays are not allowed in NVPTX device code. + /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) + /// return ExprError(); + /// // Otherwise, continue parsing as normal. + SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, + unsigned DiagID, + const FunctionDecl *FD); + + /// The declarator \p D defines a function in the scope \p S which is nested + /// in an `omp begin/end declare variant` scope. In this method we create a + /// declaration for \p D and rename \p D according to the OpenMP context + /// selector of the surrounding scope. Return all base functions in \p Bases. + void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( + Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, + SmallVectorImpl &Bases); + + /// Register \p D as specialization of all base functions in \p Bases in the + /// current `omp begin/end declare variant` scope. + void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( + Decl *D, SmallVectorImpl &Bases); + + /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. + void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); + + /// Can we exit an OpenMP declare variant scope at the moment. + bool isInOpenMPDeclareVariantScope() const { + return !OMPDeclareVariantScopes.empty(); + } + + ExprResult + VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, + bool StrictlyPositive = true, + bool SuppressExprDiags = false); + + /// Given the potential call expression \p Call, determine if there is a + /// specialization via the OpenMP declare variant mechanism available. If + /// there is, return the specialized call expression, otherwise return the + /// original \p Call. + ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, + SourceLocation LParenLoc, MultiExprArg ArgExprs, + SourceLocation RParenLoc, Expr *ExecConfig); + + /// Handle a `omp begin declare variant`. + void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); + + /// Handle a `omp end declare variant`. + void ActOnOpenMPEndDeclareVariant(); + + /// Function tries to capture lambda's captured variables in the OpenMP region + /// before the original lambda is captured. + void tryCaptureOpenMPLambdas(ValueDecl *V); + + /// Return true if the provided declaration \a VD should be captured by + /// reference. + /// \param Level Relative level of nested OpenMP construct for that the check + /// is performed. + /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. + bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, + unsigned OpenMPCaptureLevel) const; + + /// Check if the specified variable is used in one of the private + /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP + /// constructs. + VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, + unsigned StopAt = 0); + + /// The member expression(this->fd) needs to be rebuilt in the template + /// instantiation to generate private copy for OpenMP when default + /// clause is used. The function will return true if default + /// cluse is used. + bool isOpenMPRebuildMemberExpr(ValueDecl *D); + + ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, + ExprObjectKind OK, SourceLocation Loc); + + /// If the current region is a loop-based region, mark the start of the loop + /// construct. + void startOpenMPLoop(); + + /// If the current region is a range loop-based region, mark the start of the + /// loop construct. + void startOpenMPCXXRangeFor(); + + /// Check if the specified variable is used in 'private' clause. + /// \param Level Relative level of nested OpenMP construct for that the check + /// is performed. + OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, + unsigned CapLevel) const; + + /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) + /// for \p FD based on DSA for the provided corresponding captured declaration + /// \p D. + void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); + + /// Check if the specified variable is captured by 'target' directive. + /// \param Level Relative level of nested OpenMP construct for that the check + /// is performed. + bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, + unsigned CaptureLevel) const; + + /// Check if the specified global variable must be captured by outer capture + /// regions. + /// \param Level Relative level of nested OpenMP construct for that + /// the check is performed. + bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, + unsigned CaptureLevel) const; + + ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, + Expr *Op); + /// Called on start of new data sharing attribute block. + void StartOpenMPDSABlock(OpenMPDirectiveKind K, + const DeclarationNameInfo &DirName, Scope *CurScope, + SourceLocation Loc); + /// Start analysis of clauses. + void StartOpenMPClause(OpenMPClauseKind K); + /// End analysis of clauses. + void EndOpenMPClause(); + /// Called on end of data sharing attribute block. + void EndOpenMPDSABlock(Stmt *CurDirective); + + /// Check if the current region is an OpenMP loop region and if it is, + /// mark loop control variable, used in \p Init for loop initialization, as + /// private by default. + /// \param Init First part of the for loop. + void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); + + /// Called on well-formed '\#pragma omp metadirective' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPMetaDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + + // OpenMP directives and clauses. + /// Called on correct id-expression from the '#pragma omp + /// threadprivate'. + ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, + const DeclarationNameInfo &Id, + OpenMPDirectiveKind Kind); + /// Called on well-formed '#pragma omp threadprivate'. + DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(SourceLocation Loc, + ArrayRef VarList); + /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. + OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, + ArrayRef VarList); + /// Called on well-formed '#pragma omp allocate'. + DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, + ArrayRef VarList, + ArrayRef Clauses, + DeclContext *Owner = nullptr); + + /// Called on well-formed '#pragma omp [begin] assume[s]'. + void ActOnOpenMPAssumesDirective(SourceLocation Loc, + OpenMPDirectiveKind DKind, + ArrayRef Assumptions, + bool SkippedClauses); + + /// Check if there is an active global `omp begin assumes` directive. + bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } + + /// Check if there is an active global `omp assumes` directive. + bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } + + /// Called on well-formed '#pragma omp end assumes'. + void ActOnOpenMPEndAssumesDirective(); + + /// Called on well-formed '#pragma omp requires'. + DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, + ArrayRef ClauseList); + /// Check restrictions on Requires directive + OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, + ArrayRef Clauses); + /// Check if the specified type is allowed to be used in 'omp declare + /// reduction' construct. + QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, + TypeResult ParsedType); + /// Called on start of '#pragma omp declare reduction'. + DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( + Scope *S, DeclContext *DC, DeclarationName Name, + ArrayRef> ReductionTypes, + AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); + /// Initialize declare reduction construct initializer. + void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); + /// Finish current declare reduction construct initializer. + void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); + /// Initialize declare reduction construct initializer. + /// \return omp_priv variable. + VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); + /// Finish current declare reduction construct initializer. + void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, + VarDecl *OmpPrivParm); + /// Called at the end of '#pragma omp declare reduction'. + DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( + Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); + + /// Check variable declaration in 'omp declare mapper' construct. + TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); + /// Check if the specified type is allowed to be used in 'omp declare + /// mapper' construct. + QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, + TypeResult ParsedType); + /// Called on start of '#pragma omp declare mapper'. + DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( + Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, + SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, + Expr *MapperVarRef, ArrayRef Clauses, + Decl *PrevDeclInScope = nullptr); + /// Build the mapper variable of '#pragma omp declare mapper'. + ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, + QualType MapperType, + SourceLocation StartLoc, + DeclarationName VN); + void ActOnOpenMPIteratorVarDecl(VarDecl *VD); + bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; + const ValueDecl *getOpenMPDeclareMapperVarName() const; + + struct DeclareTargetContextInfo { + struct MapInfo { + OMPDeclareTargetDeclAttr::MapTypeTy MT; + SourceLocation Loc; + }; + /// Explicitly listed variables and functions in a 'to' or 'link' clause. + llvm::DenseMap ExplicitlyMapped; + + /// The 'device_type' as parsed from the clause. + OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; + + /// The directive kind, `begin declare target` or `declare target`. + OpenMPDirectiveKind Kind; + + /// The directive with indirect clause. + std::optional Indirect; + + /// The directive location. + SourceLocation Loc; + + DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) + : Kind(Kind), Loc(Loc) {} + }; + + /// Called on the start of target region i.e. '#pragma omp declare target'. + bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); + + /// Called at the end of target region i.e. '#pragma omp end declare target'. + const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); + + /// Called once a target context is completed, that can be when a + /// '#pragma omp end declare target' was encountered or when a + /// '#pragma omp declare target' without declaration-definition-seq was + /// encountered. + void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); + + /// Report unterminated 'omp declare target' or 'omp begin declare target' at + /// the end of a compilation unit. + void DiagnoseUnterminatedOpenMPDeclareTarget(); + + /// Searches for the provided declaration name for OpenMP declare target + /// directive. + NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, + CXXScopeSpec &ScopeSpec, + const DeclarationNameInfo &Id); + + /// Called on correct id-expression from the '#pragma omp declare target'. + void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, + OMPDeclareTargetDeclAttr::MapTypeTy MT, + DeclareTargetContextInfo &DTCI); + + /// Check declaration inside target region. + void + checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, + SourceLocation IdLoc = SourceLocation()); + + /// Adds OMPDeclareTargetDeclAttr to referenced variables in declare target + /// directive. + void ActOnOpenMPDeclareTargetInitializer(Decl *D); + + /// Finishes analysis of the deferred functions calls that may be declared as + /// host/nohost during device/host compilation. + void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, + const FunctionDecl *Callee, + SourceLocation Loc); + + /// Return true if currently in OpenMP task with untied clause context. + bool isInOpenMPTaskUntiedContext() const; + + /// Return true inside OpenMP declare target region. + bool isInOpenMPDeclareTargetContext() const { + return !DeclareTargetNesting.empty(); + } + /// Return true inside OpenMP target region. + bool isInOpenMPTargetExecutionDirective() const; + + /// Return the number of captured regions created for an OpenMP directive. + static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); + + /// Initialization of captured region for OpenMP region. + void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); + + /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to + /// an OpenMP loop directive. + StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); + + /// Process a canonical OpenMP loop nest that can either be a canonical + /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an + /// OpenMP loop transformation construct. + StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); + + /// End of OpenMP region. + /// + /// \param S Statement associated with the current OpenMP region. + /// \param Clauses List of clauses for the current OpenMP region. + /// + /// \returns Statement for finished OpenMP region. + StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef Clauses); + StmtResult ActOnOpenMPExecutableDirective( + OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, + OpenMPDirectiveKind CancelRegion, ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, + OpenMPDirectiveKind PrevMappedDirective = llvm::omp::OMPD_unknown); + /// Called on well-formed '\#pragma omp parallel' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPParallelDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + using VarsWithInheritedDSAType = + llvm::SmallDenseMap; + /// Called on well-formed '\#pragma omp simd' after parsing + /// of the associated statement. + StmtResult + ActOnOpenMPSimdDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '#pragma omp tile' after parsing of its clauses and + /// the associated statement. + StmtResult ActOnOpenMPTileDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '#pragma omp unroll' after parsing of its clauses + /// and the associated statement. + StmtResult ActOnOpenMPUnrollDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp for' after parsing + /// of the associated statement. + StmtResult + ActOnOpenMPForDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp for simd' after parsing + /// of the associated statement. + StmtResult + ActOnOpenMPForSimdDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp sections' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPSectionsDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp section' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp scope' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPScopeDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp single' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPSingleDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp master' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp critical' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, + ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp parallel for' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPParallelForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel for simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelForSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel master' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp parallel masked' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMaskedDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp parallel sections' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp task' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPTaskDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp taskyield'. + StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp error'. + /// Error direcitive is allowed in both declared and excutable contexts. + /// Adding InExContext to identify which context is called from. + StmtResult ActOnOpenMPErrorDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + bool InExContext = true); + /// Called on well-formed '\#pragma omp barrier'. + StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp taskwait'. + StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp taskgroup'. + StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp flush'. + StmtResult ActOnOpenMPFlushDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp depobj'. + StmtResult ActOnOpenMPDepobjDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp scan'. + StmtResult ActOnOpenMPScanDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp ordered' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPOrderedDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp atomic' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPAtomicDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp target' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPTargetDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp target data' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPTargetDataDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp target enter data' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + Stmt *AStmt); + /// Called on well-formed '\#pragma omp target exit data' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + Stmt *AStmt); + /// Called on well-formed '\#pragma omp target parallel' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp target parallel for' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetParallelForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp teams' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPTeamsDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp teams loop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPTeamsGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target teams loop' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel loop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPParallelGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target parallel loop' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPTargetParallelGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp cancellation point'. + StmtResult + ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, + SourceLocation EndLoc, + OpenMPDirectiveKind CancelRegion); + /// Called on well-formed '\#pragma omp cancel'. + StmtResult ActOnOpenMPCancelDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + OpenMPDirectiveKind CancelRegion); + /// Called on well-formed '\#pragma omp taskloop' after parsing of the + /// associated statement. + StmtResult + ActOnOpenMPTaskLoopDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp taskloop simd' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPTaskLoopSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp master taskloop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPMasterTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel master taskloop' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel master taskloop simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp masked taskloop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPMaskedTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp masked taskloop simd' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPMaskedTaskLoopSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel masked taskloop' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMaskedTaskLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel masked taskloop simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPParallelMaskedTaskLoopSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp distribute' after parsing + /// of the associated statement. + StmtResult + ActOnOpenMPDistributeDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target update'. + StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + Stmt *AStmt); + /// Called on well-formed '\#pragma omp distribute parallel for' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPDistributeParallelForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp distribute parallel for simd' + /// after parsing of the associated statement. + StmtResult ActOnOpenMPDistributeParallelForSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp distribute simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPDistributeSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target parallel for simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetParallelForSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target simd' after parsing of + /// the associated statement. + StmtResult + ActOnOpenMPTargetSimdDirective(ArrayRef Clauses, Stmt *AStmt, + SourceLocation StartLoc, SourceLocation EndLoc, + VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp teams distribute' after parsing of + /// the associated statement. + StmtResult ActOnOpenMPTeamsDistributeDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp teams distribute simd' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPTeamsDistributeSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp teams distribute parallel for simd' + /// after parsing of the associated statement. + StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp teams distribute parallel for' + /// after parsing of the associated statement. + StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target teams' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp target teams distribute' after parsing + /// of the associated statement. + StmtResult ActOnOpenMPTargetTeamsDistributeDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target teams distribute parallel for' + /// after parsing of the associated statement. + StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target teams distribute parallel for + /// simd' after parsing of the associated statement. + StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp target teams distribute simd' after + /// parsing of the associated statement. + StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp interop'. + StmtResult ActOnOpenMPInteropDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp dispatch' after parsing of the + // /associated statement. + StmtResult ActOnOpenMPDispatchDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp masked' after parsing of the + // /associated statement. + StmtResult ActOnOpenMPMaskedDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed '\#pragma omp loop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + + /// Checks correctness of linear modifiers. + bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, + SourceLocation LinLoc); + /// Checks that the specified declaration matches requirements for the linear + /// decls. + bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, + OpenMPLinearClauseKind LinKind, QualType Type, + bool IsDeclareSimd = false); + + /// Called on well-formed '\#pragma omp declare simd' after parsing of + /// the associated method/function. + DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( + DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, + Expr *Simdlen, ArrayRef Uniforms, ArrayRef Aligneds, + ArrayRef Alignments, ArrayRef Linears, + ArrayRef LinModifiers, ArrayRef Steps, SourceRange SR); + + /// Checks '\#pragma omp declare variant' variant function and original + /// functions after parsing of the associated method/function. + /// \param DG Function declaration to which declare variant directive is + /// applied to. + /// \param VariantRef Expression that references the variant function, which + /// must be used instead of the original one, specified in \p DG. + /// \param TI The trait info object representing the match clause. + /// \param NumAppendArgs The number of omp_interop_t arguments to account for + /// in checking. + /// \returns std::nullopt, if the function/variant function are not compatible + /// with the pragma, pair of original function/variant ref expression + /// otherwise. + std::optional> + checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, + OMPTraitInfo &TI, unsigned NumAppendArgs, + SourceRange SR); + + /// Called on well-formed '\#pragma omp declare variant' after parsing of + /// the associated method/function. + /// \param FD Function declaration to which declare variant directive is + /// applied to. + /// \param VariantRef Expression that references the variant function, which + /// must be used instead of the original one, specified in \p DG. + /// \param TI The context traits associated with the function variant. + /// \param AdjustArgsNothing The list of 'nothing' arguments. + /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. + /// \param AppendArgs The list of 'append_args' arguments. + /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. + /// \param AppendArgsLoc The Location of an 'append_args' clause. + /// \param SR The SourceRange of the 'declare variant' directive. + void ActOnOpenMPDeclareVariantDirective( + FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, + ArrayRef AdjustArgsNothing, + ArrayRef AdjustArgsNeedDevicePtr, + ArrayRef AppendArgs, SourceLocation AdjustArgsLoc, + SourceLocation AppendArgsLoc, SourceRange SR); + + OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'allocator' clause. + OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'if' clause. + OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, + Expr *Condition, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation NameModifierLoc, + SourceLocation ColonLoc, + SourceLocation EndLoc); + /// Called on well-formed 'final' clause. + OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'num_threads' clause. + OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'align' clause. + OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'safelen' clause. + OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'simdlen' clause. + OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-form 'sizes' clause. + OMPClause *ActOnOpenMPSizesClause(ArrayRef SizeExprs, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-form 'full' clauses. + OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-form 'partial' clauses. + OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'collapse' clause. + OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'ordered' clause. + OMPClause * + ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, + SourceLocation LParenLoc = SourceLocation(), + Expr *NumForLoops = nullptr); + /// Called on well-formed 'grainsize' clause. + OMPClause *ActOnOpenMPGrainsizeClause(OpenMPGrainsizeClauseModifier Modifier, + Expr *Size, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation ModifierLoc, + SourceLocation EndLoc); + /// Called on well-formed 'num_tasks' clause. + OMPClause *ActOnOpenMPNumTasksClause(OpenMPNumTasksClauseModifier Modifier, + Expr *NumTasks, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation ModifierLoc, + SourceLocation EndLoc); + /// Called on well-formed 'hint' clause. + OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'detach' clause. + OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, + SourceLocation ArgumentLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'when' clause. + OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'default' clause. + OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'proc_bind' clause. + OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'order' clause. + OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseModifier Modifier, + OpenMPOrderClauseKind Kind, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation MLoc, SourceLocation KindLoc, + SourceLocation EndLoc); + /// Called on well-formed 'update' clause. + OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + OMPClause *ActOnOpenMPSingleExprWithArgClause( + OpenMPClauseKind Kind, ArrayRef Arguments, Expr *Expr, + SourceLocation StartLoc, SourceLocation LParenLoc, + ArrayRef ArgumentsLoc, SourceLocation DelimLoc, + SourceLocation EndLoc); + /// Called on well-formed 'schedule' clause. + OMPClause *ActOnOpenMPScheduleClause( + OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, + OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, + SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); + + OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'nowait' clause. + OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'untied' clause. + OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'mergeable' clause. + OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'read' clause. + OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'write' clause. + OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'update' clause. + OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'capture' clause. + OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'compare' clause. + OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'fail' clause. + OMPClause *ActOnOpenMPFailClause(SourceLocation StartLoc, + SourceLocation EndLoc); + OMPClause *ActOnOpenMPFailClause(OpenMPClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'seq_cst' clause. + OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'acq_rel' clause. + OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'acquire' clause. + OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'release' clause. + OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'relaxed' clause. + OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'weak' clause. + OMPClause *ActOnOpenMPWeakClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'init' clause. + OMPClause * + ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo, + SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation VarLoc, SourceLocation EndLoc); + + /// Called on well-formed 'use' clause. + OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation VarLoc, SourceLocation EndLoc); + + /// Called on well-formed 'destroy' clause. + OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation VarLoc, + SourceLocation EndLoc); + /// Called on well-formed 'novariants' clause. + OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'nocontext' clause. + OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'filter' clause. + OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'threads' clause. + OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'simd' clause. + OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'nogroup' clause. + OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed 'unified_address' clause. + OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'unified_address' clause. + OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'reverse_offload' clause. + OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'dynamic_allocators' clause. + OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'atomic_default_mem_order' clause. + OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( + OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, + SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); + + /// Called on well-formed 'at' clause. + OMPClause *ActOnOpenMPAtClause(OpenMPAtClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'severity' clause. + OMPClause *ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'message' clause. + /// passing string for message. + OMPClause *ActOnOpenMPMessageClause(Expr *MS, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Data used for processing a list of variables in OpenMP clauses. + struct OpenMPVarListDataTy final { + Expr *DepModOrTailExpr = nullptr; + Expr *IteratorExpr = nullptr; + SourceLocation ColonLoc; + SourceLocation RLoc; + CXXScopeSpec ReductionOrMapperIdScopeSpec; + DeclarationNameInfo ReductionOrMapperId; + int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or + ///< lastprivate clause. + SmallVector + MapTypeModifiers; + SmallVector + MapTypeModifiersLoc; + SmallVector + MotionModifiers; + SmallVector MotionModifiersLoc; + bool IsMapTypeImplicit = false; + SourceLocation ExtraModifierLoc; + SourceLocation OmpAllMemoryLoc; + SourceLocation + StepModifierLoc; /// 'step' modifier location for linear clause + }; + + OMPClause *ActOnOpenMPVarListClause(OpenMPClauseKind Kind, + ArrayRef Vars, + const OMPVarListLocTy &Locs, + OpenMPVarListDataTy &Data); + /// Called on well-formed 'inclusive' clause. + OMPClause *ActOnOpenMPInclusiveClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'exclusive' clause. + OMPClause *ActOnOpenMPExclusiveClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'allocate' clause. + OMPClause * + ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef VarList, + SourceLocation StartLoc, SourceLocation ColonLoc, + SourceLocation LParenLoc, SourceLocation EndLoc); + /// Called on well-formed 'private' clause. + OMPClause *ActOnOpenMPPrivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'firstprivate' clause. + OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'lastprivate' clause. + OMPClause *ActOnOpenMPLastprivateClause( + ArrayRef VarList, OpenMPLastprivateModifier LPKind, + SourceLocation LPKindLoc, SourceLocation ColonLoc, + SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); + /// Called on well-formed 'shared' clause. + OMPClause *ActOnOpenMPSharedClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'reduction' clause. + OMPClause *ActOnOpenMPReductionClause( + ArrayRef VarList, OpenMPReductionClauseModifier Modifier, + SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation ModifierLoc, SourceLocation ColonLoc, + SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, + const DeclarationNameInfo &ReductionId, + ArrayRef UnresolvedReductions = std::nullopt); + /// Called on well-formed 'task_reduction' clause. + OMPClause *ActOnOpenMPTaskReductionClause( + ArrayRef VarList, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, + CXXScopeSpec &ReductionIdScopeSpec, + const DeclarationNameInfo &ReductionId, + ArrayRef UnresolvedReductions = std::nullopt); + /// Called on well-formed 'in_reduction' clause. + OMPClause *ActOnOpenMPInReductionClause( + ArrayRef VarList, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, + CXXScopeSpec &ReductionIdScopeSpec, + const DeclarationNameInfo &ReductionId, + ArrayRef UnresolvedReductions = std::nullopt); + /// Called on well-formed 'linear' clause. + OMPClause *ActOnOpenMPLinearClause( + ArrayRef VarList, Expr *Step, SourceLocation StartLoc, + SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, + SourceLocation LinLoc, SourceLocation ColonLoc, + SourceLocation StepModifierLoc, SourceLocation EndLoc); + /// Called on well-formed 'aligned' clause. + OMPClause *ActOnOpenMPAlignedClause(ArrayRef VarList, Expr *Alignment, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation ColonLoc, + SourceLocation EndLoc); + /// Called on well-formed 'copyin' clause. + OMPClause *ActOnOpenMPCopyinClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'copyprivate' clause. + OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'flush' pseudo clause. + OMPClause *ActOnOpenMPFlushClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'depobj' pseudo clause. + OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'depend' clause. + OMPClause *ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, + Expr *DepModifier, + ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'device' clause. + OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, + Expr *Device, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation ModifierLoc, + SourceLocation EndLoc); + /// Called on well-formed 'map' clause. + OMPClause *ActOnOpenMPMapClause( + Expr *IteratorModifier, ArrayRef MapTypeModifiers, + ArrayRef MapTypeModifiersLoc, + CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, + OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, + SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef VarList, + const OMPVarListLocTy &Locs, bool NoDiagnose = false, + ArrayRef UnresolvedMappers = std::nullopt); + /// Called on well-formed 'num_teams' clause. + OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'thread_limit' clause. + OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'priority' clause. + OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + /// Called on well-formed 'dist_schedule' clause. + OMPClause *ActOnOpenMPDistScheduleClause( + OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, + SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, + SourceLocation CommaLoc, SourceLocation EndLoc); + /// Called on well-formed 'defaultmap' clause. + OMPClause *ActOnOpenMPDefaultmapClause( + OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, + SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, + SourceLocation KindLoc, SourceLocation EndLoc); + /// Called on well-formed 'to' clause. + OMPClause * + ActOnOpenMPToClause(ArrayRef MotionModifiers, + ArrayRef MotionModifiersLoc, + CXXScopeSpec &MapperIdScopeSpec, + DeclarationNameInfo &MapperId, SourceLocation ColonLoc, + ArrayRef VarList, const OMPVarListLocTy &Locs, + ArrayRef UnresolvedMappers = std::nullopt); + /// Called on well-formed 'from' clause. + OMPClause * + ActOnOpenMPFromClause(ArrayRef MotionModifiers, + ArrayRef MotionModifiersLoc, + CXXScopeSpec &MapperIdScopeSpec, + DeclarationNameInfo &MapperId, SourceLocation ColonLoc, + ArrayRef VarList, const OMPVarListLocTy &Locs, + ArrayRef UnresolvedMappers = std::nullopt); + /// Called on well-formed 'use_device_ptr' clause. + OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs); + /// Called on well-formed 'use_device_addr' clause. + OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs); + /// Called on well-formed 'is_device_ptr' clause. + OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs); + /// Called on well-formed 'has_device_addr' clause. + OMPClause *ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs); + /// Called on well-formed 'nontemporal' clause. + OMPClause *ActOnOpenMPNontemporalClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Data for list of allocators. + struct UsesAllocatorsData { + /// Allocator. + Expr *Allocator = nullptr; + /// Allocator traits. + Expr *AllocatorTraits = nullptr; + /// Locations of '(' and ')' symbols. + SourceLocation LParenLoc, RParenLoc; + }; + /// Called on well-formed 'uses_allocators' clause. + OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc, + ArrayRef Data); + /// Called on well-formed 'affinity' clause. + OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation ColonLoc, + SourceLocation EndLoc, Expr *Modifier, + ArrayRef Locators); + /// Called on a well-formed 'bind' clause. + OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on a well-formed 'ompx_dyn_cgroup_mem' clause. + OMPClause *ActOnOpenMPXDynCGroupMemClause(Expr *Size, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on well-formed 'doacross' clause. + OMPClause * + ActOnOpenMPDoacrossClause(OpenMPDoacrossClauseModifier DepType, + SourceLocation DepLoc, SourceLocation ColonLoc, + ArrayRef VarList, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation EndLoc); + + /// Called on a well-formed 'ompx_attribute' clause. + OMPClause *ActOnOpenMPXAttributeClause(ArrayRef Attrs, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc); + + /// Called on a well-formed 'ompx_bare' clause. + OMPClause *ActOnOpenMPXBareClause(SourceLocation StartLoc, + SourceLocation EndLoc); + + ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, + Expr *LowerBound, + SourceLocation ColonLocFirst, + SourceLocation ColonLocSecond, + Expr *Length, Expr *Stride, + SourceLocation RBLoc); + ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, + SourceLocation RParenLoc, + ArrayRef Dims, + ArrayRef Brackets); + + /// Data structure for iterator expression. + struct OMPIteratorData { + IdentifierInfo *DeclIdent = nullptr; + SourceLocation DeclIdentLoc; + ParsedType Type; + OMPIteratorExpr::IteratorRange Range; + SourceLocation AssignLoc; + SourceLocation ColonLoc; + SourceLocation SecColonLoc; + }; + + ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, + SourceLocation LLoc, SourceLocation RLoc, + ArrayRef Data); + +private: + void *VarDataSharingAttributesStack; + + /// Number of nested '#pragma omp declare target' directives. + SmallVector DeclareTargetNesting; + + /// Initialization of data-sharing attributes stack. + void InitDataSharingAttributesStack(); + void DestroyDataSharingAttributesStack(); + + /// Returns OpenMP nesting level for current directive. + unsigned getOpenMPNestingLevel() const; + + /// Adjusts the function scopes index for the target-based regions. + void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, + unsigned Level) const; + + /// Returns the number of scopes associated with the construct on the given + /// OpenMP level. + int getNumberOfConstructScopes(unsigned Level) const; + + /// Push new OpenMP function region for non-capturing function. + void pushOpenMPFunctionRegion(); + + /// Pop OpenMP function region for non-capturing function. + void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); + + /// Analyzes and checks a loop nest for use by a loop transformation. + /// + /// \param Kind The loop transformation directive kind. + /// \param NumLoops How many nested loops the directive is expecting. + /// \param AStmt Associated statement of the transformation directive. + /// \param LoopHelpers [out] The loop analysis result. + /// \param Body [out] The body code nested in \p NumLoops loop. + /// \param OriginalInits [out] Collection of statements and declarations that + /// must have been executed/declared before entering the + /// loop. + /// + /// \return Whether there was any error. + bool checkTransformableLoopNest( + OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, + SmallVectorImpl &LoopHelpers, + Stmt *&Body, + SmallVectorImpl, 0>> + &OriginalInits); + + /// Helper to keep information about the current `omp begin/end declare + /// variant` nesting. + struct OMPDeclareVariantScope { + /// The associated OpenMP context selector. + OMPTraitInfo *TI; + + /// The associated OpenMP context selector mangling. + std::string NameSuffix; + + OMPDeclareVariantScope(OMPTraitInfo &TI); + }; + + /// Return the OMPTraitInfo for the surrounding scope, if any. + OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { + return OMPDeclareVariantScopes.empty() ? nullptr + : OMPDeclareVariantScopes.back().TI; + } + + /// The current `omp begin/end declare variant` scopes. + SmallVector OMPDeclareVariantScopes; + + /// The current `omp begin/end assumes` scopes. + SmallVector OMPAssumeScoped; + + /// All `omp assumes` we encountered so far. + SmallVector OMPAssumeGlobal; + + /// OMPD_loop is mapped to OMPD_for, OMPD_distribute or OMPD_simd depending + /// on the parameter of the bind clause. In the methods for the + /// mapped directives, check the parameters of the lastprivate clause. + bool checkLastPrivateForMappedDirectives(ArrayRef Clauses); + /// Depending on the bind clause of OMPD_loop map the directive to new + /// directives. + /// 1) loop bind(parallel) --> OMPD_for + /// 2) loop bind(teams) --> OMPD_distribute + /// 3) loop bind(thread) --> OMPD_simd + /// This is being handled in Sema instead of Codegen because of the need for + /// rigorous semantic checking in the new mapped directives. + bool mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, + ArrayRef Clauses, + OpenMPBindClauseKind &BindKind, + OpenMPDirectiveKind &Kind, + OpenMPDirectiveKind &PrevMappedDirective, + SourceLocation StartLoc, SourceLocation EndLoc, + const DeclarationNameInfo &DirName, + OpenMPDirectiveKind CancelRegion); +}; + +} // namespace clang + +#endif // LLVM_CLANG_SEMA_SEMAOPENMP_H diff --git a/clang/include/clang/Sema/SemaSYCL.h b/clang/include/clang/Sema/SemaSYCL.h new file mode 100644 index 0000000000000..6a16be6b94818 --- /dev/null +++ b/clang/include/clang/Sema/SemaSYCL.h @@ -0,0 +1,399 @@ +//===----- SemaSYCL.h ------- Semantic Analysis for SYCL constructs -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file declares semantic analysis for SYCL constructs. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_SEMASYCL_H +#define LLVM_CLANG_SEMA_SEMASYCL_H + +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Type.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Sema/Ownership.h" +#include "clang/Sema/SemaBase.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/SetVector.h" + +namespace clang { + +class CXXMethodDecl; +class MangleContext; +class SemaSYCL; + +// TODO SYCL Integration header approach relies on an assumption that kernel +// lambda objects created by the host compiler and any of the device compilers +// will be identical wrt to field types, order and offsets. Some verification +// mechanism should be developed to enforce that. + +// TODO FIXME SYCL Support for SYCL in FE should be refactored: +// - kernel identification and generation should be made a separate pass over +// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl + +// FunctionTemplateDecl::getSpecializations() mechanism could be used for that. +// - All SYCL stuff on Sema level should be encapsulated into a single Sema +// field +// - Move SYCL stuff into a separate header + +// Represents contents of a SYCL integration header file produced by a SYCL +// device compiler and used by SYCL host compiler (via forced inclusion into +// compiled SYCL source): +// - SYCL kernel names +// - SYCL kernel parameters and offsets of corresponding actual arguments +class SYCLIntegrationHeader { +public: + // Kind of kernel's parameters as captured by the compiler in the + // kernel lambda or function object + enum kernel_param_kind_t { + kind_first, + kind_accessor = kind_first, + kind_std_layout, + kind_sampler, + kind_pointer, + kind_specialization_constants_buffer, + kind_stream, + kind_last = kind_stream + }; + +public: + SYCLIntegrationHeader(SemaSYCL &S); + + /// Emits contents of the header into given stream. + void emit(raw_ostream &Out); + + /// Emits contents of the header into a file with given name. + /// Returns true/false on success/failure. + bool emit(StringRef MainSrc); + + /// Signals that subsequent parameter descriptor additions will go to + /// the kernel with given name. Starts new kernel invocation descriptor. + void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType, + SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel, + int64_t ObjSize); + + /// Adds a kernel parameter descriptor to current kernel invocation + /// descriptor. + void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset); + + /// Signals that addition of parameter descriptors to current kernel + /// invocation descriptor has finished. + void endKernel(); + + /// Registers a specialization constant to emit info for it into the header. + void addSpecConstant(StringRef IDName, QualType IDType); + + /// Update the names of a kernel description based on its SyclKernel. + void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name, + StringRef StableName) { + auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) { + return KD.SyclKernel == SyclKernel; + }); + + assert(Itr != KernelDescs.end() && "Unknown kernel description"); + Itr->updateKernelNames(Name, StableName); + } + + /// Signals that emission of __sycl_device_global_registration type and + /// declaration of variable __sycl_device_global_registrar of this type in + /// integration header is required. + void addDeviceGlobalRegistration() { + NeedToEmitDeviceGlobalRegistration = true; + } + + /// Signals that emission of __sycl_host_pipe_registration type and + /// declaration of variable __sycl_host_pipe_registrar of this type in + /// integration header is required. + void addHostPipeRegistration() { NeedToEmitHostPipeRegistration = true; } + +private: + // Kernel actual parameter descriptor. + struct KernelParamDesc { + // Represents a parameter kind. + kernel_param_kind_t Kind = kind_last; + // If Kind is kind_scalar or kind_struct, then + // denotes parameter size in bytes (includes padding for structs) + // If Kind is kind_accessor + // denotes access target; possible access targets are defined in + // access/access.hpp + int Info = 0; + // Offset of the captured parameter value in the lambda or function object. + unsigned Offset = 0; + + KernelParamDesc() = default; + }; + + // Kernel invocation descriptor + struct KernelDesc { + /// sycl_kernel function associated with this kernel. + const FunctionDecl *SyclKernel; + + /// Kernel name. + std::string Name; + + /// Kernel name type. + QualType NameType; + + /// Kernel name with stable lambda name mangling + std::string StableName; + + SourceLocation KernelLocation; + + /// Whether this kernel is an ESIMD one. + bool IsESIMDKernel; + + /// Descriptor of kernel actual parameters. + SmallVector Params; + + // If we are in unnamed kernel/lambda mode AND this is one that the user + // hasn't provided an explicit name for. + bool IsUnnamedKernel; + + /// Size of the kernel object. + int64_t ObjSize = 0; + + KernelDesc(const FunctionDecl *SyclKernel, QualType NameType, + SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel, + int64_t ObjSize) + : SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc), + IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel), + ObjSize(ObjSize) {} + + void updateKernelNames(StringRef Name, StringRef StableName) { + this->Name = Name.str(); + this->StableName = StableName.str(); + } + }; + + /// Returns the latest invocation descriptor started by + /// SYCLIntegrationHeader::startKernel + KernelDesc *getCurKernelDesc() { + return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1] + : nullptr; + } + +private: + /// Keeps invocation descriptors for each kernel invocation started by + /// SYCLIntegrationHeader::startKernel + SmallVector KernelDescs; + + using SpecConstID = std::pair; + + /// Keeps specialization constants met in the translation unit. Maps spec + /// constant's ID type to generated unique name. Duplicates are removed at + /// integration header emission time. + llvm::SmallVector SpecConsts; + + SemaSYCL &S; + + /// Keeps track of whether declaration of __sycl_device_global_registration + /// type and __sycl_device_global_registrar variable are required to emit. + bool NeedToEmitDeviceGlobalRegistration = false; + + /// Keeps track of whether declaration of __sycl_host_pipe_registration + /// type and __sycl_host_pipe_registrar variable are required to emit. + bool NeedToEmitHostPipeRegistration = false; +}; + +class SYCLIntegrationFooter { +public: + SYCLIntegrationFooter(SemaSYCL &S) : S(S) {} + bool emit(StringRef MainSrc); + void addVarDecl(const VarDecl *VD); + +private: + bool emit(raw_ostream &O); + SemaSYCL &S; + llvm::SmallVector GlobalVars; + void emitSpecIDName(raw_ostream &O, const VarDecl *VD); +}; + +class SemaSYCL : public SemaBase { +private: + // We store SYCL Kernels here and handle separately -- which is a hack. + // FIXME: It would be best to refactor this. + llvm::SetVector SyclDeviceDecls; + // SYCL integration header instance for current compilation unit this Sema + // is associated with. + std::unique_ptr SyclIntHeader; + std::unique_ptr SyclIntFooter; + + // We need to store the list of the sycl_kernel functions and their associated + // generated OpenCL Kernels so we can go back and re-name these after the + // fact. + llvm::SmallVector> + SyclKernelsToOpenCLKernels; + + // Used to suppress diagnostics during kernel construction, since these were + // already emitted earlier. Diagnosing during Kernel emissions also skips the + // useful notes that shows where the kernel was called. + bool DiagnosingSYCLKernel = false; + +public: + SemaSYCL(Sema &S); + + void CheckSYCLKernelCall(FunctionDecl *CallerFunc, + ArrayRef Args); + + /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current + /// context is "used as device code". + /// + /// - If CurLexicalContext is a kernel function or it is known that the + /// function will be emitted for the device, emits the diagnostics + /// immediately. + /// - If CurLexicalContext is a function and we are compiling + /// for the device, but we don't know that this function will be codegen'ed + /// for device yet, creates a diagnostic which is emitted if and when we + /// realize that the function will be codegen'ed. + /// + /// Example usage: + /// + /// Diagnose __float128 type usage only from SYCL device code if the current + /// target doesn't support it + /// if (!S.Context.getTargetInfo().hasFloat128Type() && + /// S.getLangOpts().SYCLIsDevice) + /// DiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; + SemaDiagnosticBuilder DiagIfDeviceCode( + SourceLocation Loc, unsigned DiagID, + DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl | + DeviceDiagnosticReason::Esimd); + + void deepTypeCheckForDevice(SourceLocation UsedAt, + llvm::DenseSet Visited, + ValueDecl *DeclToCheck); + + void addSyclOpenCLKernel(const FunctionDecl *SyclKernel, + FunctionDecl *OpenCLKernel) { + SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel); + } + + void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); } + llvm::SetVector &syclDeviceDecls() { return SyclDeviceDecls; } + + /// Lazily creates and returns SYCL integration header instance. + SYCLIntegrationHeader &getSyclIntegrationHeader() { + if (SyclIntHeader == nullptr) + SyclIntHeader = std::make_unique(*this); + return *SyclIntHeader.get(); + } + + SYCLIntegrationFooter &getSyclIntegrationFooter() { + if (SyclIntFooter == nullptr) + SyclIntFooter = std::make_unique(*this); + return *SyclIntFooter.get(); + } + + void addSyclVarDecl(VarDecl *VD) { + if (getLangOpts().SYCLIsDevice && !getLangOpts().SYCLIntFooter.empty()) + getSyclIntegrationFooter().addVarDecl(VD); + } + + bool hasSyclIntegrationHeader() { return SyclIntHeader != nullptr; } + bool hasSyclIntegrationFooter() { return SyclIntFooter != nullptr; } + + enum SYCLRestrictKind { + KernelGlobalVariable, + KernelRTTI, + KernelNonConstStaticDataVariable, + KernelCallVirtualFunction, + KernelUseExceptions, + KernelCallRecursiveFunction, + KernelCallFunctionPointer, + KernelAllocateStorage, + KernelUseAssembly, + KernelCallDllimportFunction, + KernelCallVariadicFunction, + KernelCallUndefinedFunction, + KernelConstStaticVariable + }; + + bool isDeclAllowedInSYCLDeviceCode(const Decl *D); + void checkSYCLDeviceVarDecl(VarDecl *Var); + void copySYCLKernelAttrs(CXXMethodDecl *CallOperator); + void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC); + void SetSYCLKernelNames(); + void MarkDevices(); + + /// Get the number of fields or captures within the parsed type. + ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT); + ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc, + QualType SourceTy); + + /// Get a value based on the type of the given field number so that callers + /// can wrap it in a decltype() to get the actual type of the field. + ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx); + ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, + QualType SourceTy, Expr *Idx); + + /// Get the number of base classes within the parsed type. + ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT); + ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc, + QualType SourceTy); + + /// Get a value based on the type of the given base number so that callers + /// can wrap it in a decltype() to get the actual type of the base class. + ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx); + ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy, + Expr *Idx); + + bool checkAllowedSYCLInitializer(VarDecl *VD); + + /// Finishes analysis of the deferred functions calls that may be not + /// properly declared for device compilation. + void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller, + const FunctionDecl *Callee, + SourceLocation Loc, + DeviceDiagnosticReason Reason); + + /// Tells whether given variable is a SYCL explicit SIMD extension's "private + /// global" variable - global variable in the private address space. + bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) { + return getLangOpts().SYCLIsDevice && VDecl->hasAttr() && + VDecl->hasGlobalStorage() && + (VDecl->getType().getAddressSpace() == LangAS::sycl_private); + } + + template + static bool isTypeDecoratedWithDeclAttribute(QualType Ty) { + const CXXRecordDecl *RecTy = Ty->getAsCXXRecordDecl(); + if (!RecTy) + return false; + + if (RecTy->hasAttr()) + return true; + + if (auto *CTSD = dyn_cast(RecTy)) { + ClassTemplateDecl *Template = CTSD->getSpecializedTemplate(); + if (CXXRecordDecl *RD = Template->getTemplatedDecl()) + return RD->hasAttr(); + } + return false; + } + + /// Check whether \p Ty corresponds to a SYCL type of name \p TypeName. + static bool isSyclType(QualType Ty, SYCLTypeAttr::SYCLType TypeName); + + ExprResult BuildUniqueStableIdExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, Expr *E); + ExprResult ActOnUniqueStableIdExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, Expr *E); + ExprResult BuildUniqueStableNameExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, + TypeSourceInfo *TSI); + ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, + ParsedType ParsedTy); +}; + +} // namespace clang + +#endif // LLVM_CLANG_SEMA_SEMASYCL_H diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index 3bddc20c5d78e..c2fa2f1363ec6 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -698,6 +698,10 @@ enum ASTRecordTypes { /// Record code for an unterminated \#pragma clang assume_nonnull begin /// recorded in a preamble. PP_ASSUME_NONNULL_LOC = 67, + + /// Record code for lexical and visible block for delayed namespace in + /// reduced BMI. + DELAYED_NAMESPACE_LEXICAL_VISIBLE_RECORD = 68, }; /// Record types used within a source manager block. diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h index 1911252b34cd1..43ee06c524b3a 100644 --- a/clang/include/clang/Serialization/ASTReader.h +++ b/clang/include/clang/Serialization/ASTReader.h @@ -517,6 +517,20 @@ class ASTReader /// in the chain. DeclUpdateOffsetsMap DeclUpdateOffsets; + using DelayedNamespaceOffsetMapTy = llvm::DenseMap< + serialization::DeclID, + std::pair>; + + /// Mapping from global declaration IDs to the lexical and visible block + /// offset for delayed namespace in reduced BMI. + /// + /// We can't use the existing DeclUpdate mechanism since the DeclUpdate + /// may only be applied in an outer most read. However, we need to know + /// whether or not a DeclContext has external storage during the recursive + /// reading. So we need to apply the offset immediately after we read the + /// namespace as if it is not delayed. + DelayedNamespaceOffsetMapTy DelayedNamespaceOffsetMap; + struct PendingUpdateRecord { Decl *D; serialization::GlobalDeclID ID; @@ -859,7 +873,7 @@ class ASTReader /// Our current depth in #pragma cuda force_host_device begin/end /// macros. - unsigned ForceCUDAHostDeviceDepth = 0; + unsigned ForceHostDeviceDepth = 0; /// The IDs of the declarations Sema stores directly. /// @@ -1082,12 +1096,12 @@ class ASTReader /// The set of lookup results that we have faked in order to support /// merging of partially deserialized decls but that we have not yet removed. - llvm::SmallMapVector, 16> - PendingFakeLookupResults; + llvm::SmallMapVector, 16> + PendingFakeLookupResults; /// The generation number of each identifier, which keeps track of /// the last time we loaded information about this identifier. - llvm::DenseMap IdentifierGeneration; + llvm::DenseMap IdentifierGeneration; /// Contains declarations and definitions that could be /// "interesting" to the ASTConsumer, when we get that AST consumer. @@ -1492,6 +1506,7 @@ class ASTReader getModuleFileLevelDecls(ModuleFile &Mod); private: + bool isConsumerInterestedIn(Decl *D); void PassInterestingDeclsToConsumer(); void PassInterestingDeclToConsumer(Decl *D); @@ -2330,10 +2345,10 @@ class ASTReader void ReadDefinedMacros() override; /// Update an out-of-date identifier. - void updateOutOfDateIdentifier(IdentifierInfo &II) override; + void updateOutOfDateIdentifier(const IdentifierInfo &II) override; /// Note that this identifier is up-to-date. - void markIdentifierUpToDate(IdentifierInfo *II); + void markIdentifierUpToDate(const IdentifierInfo *II); /// Load all external visible decls in the given DeclContext. void completeVisibleDeclsMap(const DeclContext *DC) override; @@ -2442,6 +2457,12 @@ class BitsUnpacker { uint32_t Value; uint32_t CurrentBitsIndex = ~0; }; + +inline bool shouldSkipCheckingODR(const Decl *D) { + return D->getASTContext().getLangOpts().SkipODRCheckInGMF && + D->isFromExplicitGlobalModule(); +} + } // namespace clang #endif // LLVM_CLANG_SERIALIZATION_ASTREADER_H diff --git a/clang/include/clang/Serialization/ASTWriter.h b/clang/include/clang/Serialization/ASTWriter.h index 214eb3601148b..13b4ad4ad2953 100644 --- a/clang/include/clang/Serialization/ASTWriter.h +++ b/clang/include/clang/Serialization/ASTWriter.h @@ -201,6 +201,16 @@ class ASTWriter : public ASTDeserializationListener, /// The declarations and types to emit. std::queue DeclTypesToEmit; + /// The delayed namespace to emit. Only meaningful for reduced BMI. + /// + /// In reduced BMI, we want to elide the unreachable declarations in + /// the global module fragment. However, in ASTWriterDecl, when we see + /// a namespace, all the declarations in the namespace would be emitted. + /// So the optimization become meaningless. To solve the issue, we + /// delay recording all the declarations until we emit all the declarations. + /// Then we can safely record the reached declarations only. + llvm::SmallVector DelayedNamespace; + /// The first ID number we can use for our own declarations. serialization::DeclID FirstDeclID = serialization::NUM_PREDEF_DECL_IDS; @@ -389,6 +399,11 @@ class ASTWriter : public ASTDeserializationListener, /// record containing modifications to them. DeclUpdateMap DeclUpdates; + /// DeclUpdates added during parsing the GMF. We split these from + /// DeclUpdates since we want to add these updates in GMF on need. + /// Only meaningful for reduced BMI. + DeclUpdateMap DeclUpdatesFromGMF; + using FirstLatestDeclMap = llvm::DenseMap; /// Map of first declarations from a chained PCH that point to the @@ -529,7 +544,8 @@ class ASTWriter : public ASTDeserializationListener, void WriteType(QualType T); bool isLookupResultExternal(StoredDeclsList &Result, DeclContext *DC); - bool isLookupResultEntirelyExternal(StoredDeclsList &Result, DeclContext *DC); + bool isLookupResultEntirelyExternalOrUnreachable(StoredDeclsList &Result, + DeclContext *DC); void GenerateNameLookupTable(const DeclContext *DC, llvm::SmallVectorImpl &LookupTable); @@ -543,6 +559,8 @@ class ASTWriter : public ASTDeserializationListener, void WriteIdentifierTable(Preprocessor &PP, IdentifierResolver &IdResolver, bool IsModule); void WriteDeclAndTypes(ASTContext &Context); + void PrepareWritingSpecialDecls(Sema &SemaRef); + void WriteSpecialDeclRecords(Sema &SemaRef); void WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord); void WriteDeclContextVisibleUpdate(const DeclContext *DC); void WriteFPPragmaOptions(const FPOptionsOverride &Opts); @@ -696,6 +714,8 @@ class ASTWriter : public ASTDeserializationListener, /// Emit a reference to a declaration. void AddDeclRef(const Decl *D, RecordDataImpl &Record); + // Emit a reference to a declaration if the declaration was emitted. + void AddEmittedDeclRef(const Decl *D, RecordDataImpl &Record); /// Force a declaration to be emitted and get its ID. serialization::DeclID GetDeclRef(const Decl *D); @@ -704,6 +724,15 @@ class ASTWriter : public ASTDeserializationListener, /// declaration. serialization::DeclID getDeclID(const Decl *D); + /// Whether or not the declaration got emitted. If not, it wouldn't be + /// emitted. + /// + /// This may only be called after we've done the job to write the + /// declarations (marked by DoneWritingDeclsAndTypes). + /// + /// A declaration may only be omitted in reduced BMI. + bool wasDeclEmitted(const Decl *D) const; + unsigned getAnonymousDeclarationNumber(const NamedDecl *D); /// Add a string to the given record. @@ -798,6 +827,10 @@ class ASTWriter : public ASTDeserializationListener, return WritingModule && WritingModule->isNamedModule(); } + bool isGeneratingReducedBMI() const { return GeneratingReducedBMI; } + + bool getDoneWritingDeclsAndTypes() const { return DoneWritingDeclsAndTypes; } + private: // ASTDeserializationListener implementation void ReaderInitialized(ASTReader *Reader) override; @@ -842,6 +875,11 @@ class ASTWriter : public ASTDeserializationListener, void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override; void AddedAttributeToRecord(const Attr *Attr, const RecordDecl *Record) override; + void EnteringModulePurview() override; + void AddedManglingNumber(const Decl *D, unsigned) override; + void AddedStaticLocalNumbers(const Decl *D, unsigned) override; + void AddedAnonymousNamespace(const TranslationUnitDecl *, + NamespaceDecl *AnonNamespace) override; }; /// AST and semantic-analysis consumer that generates a diff --git a/clang/include/clang/Serialization/ModuleFileExtension.h b/clang/include/clang/Serialization/ModuleFileExtension.h index d7d456c8b5db8..50ce401516275 100644 --- a/clang/include/clang/Serialization/ModuleFileExtension.h +++ b/clang/include/clang/Serialization/ModuleFileExtension.h @@ -9,7 +9,6 @@ #ifndef LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H #define LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H -#include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/Support/ExtensibleRTTI.h" #include "llvm/Support/HashBuilder.h" #include "llvm/Support/MD5.h" diff --git a/clang/include/clang/Serialization/PCHContainerOperations.h b/clang/include/clang/Serialization/PCHContainerOperations.h index ddfddf2dafadf..c9a7e334ce6eb 100644 --- a/clang/include/clang/Serialization/PCHContainerOperations.h +++ b/clang/include/clang/Serialization/PCHContainerOperations.h @@ -12,7 +12,7 @@ #include "clang/Basic/Module.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" -#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/MemoryBufferRef.h" #include namespace llvm { diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h index 9a522a3e2fe25..f7b4510d7f7be 100644 --- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h +++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h @@ -142,6 +142,8 @@ class CachedFileSystemEntry { CachedFileContents *Contents; }; +using CachedRealPath = llvm::ErrorOr; + /// This class is a shared cache, that caches the 'stat' and 'open' calls to the /// underlying real file system, and the scanned preprocessor directives of /// files. @@ -154,9 +156,11 @@ class DependencyScanningFilesystemSharedCache { /// The mutex that needs to be locked before mutation of any member. mutable std::mutex CacheLock; - /// Map from filenames to cached entries. - llvm::StringMap - EntriesByFilename; + /// Map from filenames to cached entries and real paths. + llvm::StringMap< + std::pair, + llvm::BumpPtrAllocator> + CacheByFilename; /// Map from unique IDs to cached entries. llvm::DenseMap @@ -168,6 +172,9 @@ class DependencyScanningFilesystemSharedCache { /// The backing storage for cached contents. llvm::SpecificBumpPtrAllocator ContentsStorage; + /// The backing storage for cached real paths. + llvm::SpecificBumpPtrAllocator RealPathStorage; + /// Returns entry associated with the filename or nullptr if none is found. const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const; @@ -194,6 +201,17 @@ class DependencyScanningFilesystemSharedCache { const CachedFileSystemEntry & getOrInsertEntryForFilename(StringRef Filename, const CachedFileSystemEntry &Entry); + + /// Returns the real path associated with the filename or nullptr if none is + /// found. + const CachedRealPath *findRealPathByFilename(StringRef Filename) const; + + /// Returns the real path associated with the filename if there is some. + /// Otherwise, constructs new one with the given one, associates it with the + /// filename and returns the result. + const CachedRealPath & + getOrEmplaceRealPathForFilename(StringRef Filename, + llvm::ErrorOr RealPath); }; DependencyScanningFilesystemSharedCache(); @@ -210,14 +228,17 @@ class DependencyScanningFilesystemSharedCache { /// This class is a local cache, that caches the 'stat' and 'open' calls to the /// underlying real file system. class DependencyScanningFilesystemLocalCache { - llvm::StringMap Cache; + llvm::StringMap< + std::pair, + llvm::BumpPtrAllocator> + Cache; public: /// Returns entry associated with the filename or nullptr if none is found. const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const { assert(llvm::sys::path::is_absolute_gnu(Filename)); auto It = Cache.find(Filename); - return It == Cache.end() ? nullptr : It->getValue(); + return It == Cache.end() ? nullptr : It->getValue().first; } /// Associates the given entry with the filename and returns the given entry @@ -226,9 +247,40 @@ class DependencyScanningFilesystemLocalCache { insertEntryForFilename(StringRef Filename, const CachedFileSystemEntry &Entry) { assert(llvm::sys::path::is_absolute_gnu(Filename)); - const auto *InsertedEntry = Cache.insert({Filename, &Entry}).first->second; - assert(InsertedEntry == &Entry && "entry already present"); - return *InsertedEntry; + auto [It, Inserted] = Cache.insert({Filename, {&Entry, nullptr}}); + auto &[CachedEntry, CachedRealPath] = It->getValue(); + if (!Inserted) { + // The file is already present in the local cache. If we got here, it only + // contains the real path. Let's make sure the entry is populated too. + assert((!CachedEntry && CachedRealPath) && "entry already present"); + CachedEntry = &Entry; + } + return *CachedEntry; + } + + /// Returns real path associated with the filename or nullptr if none is + /// found. + const CachedRealPath *findRealPathByFilename(StringRef Filename) const { + assert(llvm::sys::path::is_absolute_gnu(Filename)); + auto It = Cache.find(Filename); + return It == Cache.end() ? nullptr : It->getValue().second; + } + + /// Associates the given real path with the filename and returns the given + /// entry pointer (for convenience). + const CachedRealPath & + insertRealPathForFilename(StringRef Filename, + const CachedRealPath &RealPath) { + assert(llvm::sys::path::is_absolute_gnu(Filename)); + auto [It, Inserted] = Cache.insert({Filename, {nullptr, &RealPath}}); + auto &[CachedEntry, CachedRealPath] = It->getValue(); + if (!Inserted) { + // The file is already present in the local cache. If we got here, it only + // contains the entry. Let's make sure the real path is populated too. + assert((!CachedRealPath && CachedEntry) && "real path already present"); + CachedRealPath = &RealPath; + } + return *CachedRealPath; } }; @@ -296,6 +348,9 @@ class DependencyScanningWorkerFilesystem llvm::ErrorOr> openFileForRead(const Twine &Path) override; + std::error_code getRealPath(const Twine &Path, + SmallVectorImpl &Output) override; + std::error_code setCurrentWorkingDirectory(const Twine &Path) override; /// Returns entry for the given filename. @@ -310,6 +365,10 @@ class DependencyScanningWorkerFilesystem /// false if not (i.e. this entry is not a file or its scan fails). bool ensureDirectiveTokensArePopulated(EntryRef Entry); + /// Check whether \p Path exists. By default checks cached result of \c + /// status(), and falls back on FS if unable to do so. + bool exists(const Twine &Path) override; + private: /// For a filename that's not yet associated with any entry in the caches, /// uses the underlying filesystem to either look up the entry based in the @@ -402,6 +461,10 @@ class DependencyScanningWorkerFilesystem llvm::ErrorOr WorkingDirForCacheLookup; void updateWorkingDirForCacheLookup(); + + llvm::ErrorOr + tryGetFilenameForLookup(StringRef OriginalFilename, + llvm::SmallVectorImpl &PathBuf) const; }; } // end namespace dependencies diff --git a/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h index 081899cc2c850..da51292296a90 100644 --- a/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h +++ b/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h @@ -308,6 +308,11 @@ class ModuleDepCollector final : public DependencyCollector { ModuleDeps &Deps); }; +/// Resets codegen options that don't affect modules/PCH. +void resetBenignCodeGenOptions(frontend::ActionKind ProgramAction, + const LangOptions &LangOpts, + CodeGenOptions &CGOpts); + } // end namespace dependencies } // end namespace tooling } // end namespace clang diff --git a/clang/lib/APINotes/APINotesReader.cpp b/clang/lib/APINotes/APINotesReader.cpp index fbbe9c32ce125..dfc3beb6fa13e 100644 --- a/clang/lib/APINotes/APINotesReader.cpp +++ b/clang/lib/APINotes/APINotesReader.cpp @@ -30,23 +30,20 @@ namespace { llvm::VersionTuple ReadVersionTuple(const uint8_t *&Data) { uint8_t NumVersions = (*Data++) & 0x03; - unsigned Major = - endian::readNext(Data); + unsigned Major = endian::readNext(Data); if (NumVersions == 0) return llvm::VersionTuple(Major); - unsigned Minor = - endian::readNext(Data); + unsigned Minor = endian::readNext(Data); if (NumVersions == 1) return llvm::VersionTuple(Major, Minor); unsigned Subminor = - endian::readNext(Data); + endian::readNext(Data); if (NumVersions == 2) return llvm::VersionTuple(Major, Minor, Subminor); - unsigned Build = - endian::readNext(Data); + unsigned Build = endian::readNext(Data); return llvm::VersionTuple(Major, Minor, Subminor, Build); } @@ -71,16 +68,16 @@ class VersionedTableInfo { static std::pair ReadKeyDataLength(const uint8_t *&Data) { unsigned KeyLength = - endian::readNext(Data); + endian::readNext(Data); unsigned DataLength = - endian::readNext(Data); + endian::readNext(Data); return {KeyLength, DataLength}; } static data_type ReadData(internal_key_type Key, const uint8_t *Data, unsigned Length) { unsigned NumElements = - endian::readNext(Data); + endian::readNext(Data); data_type Result; Result.reserve(NumElements); for (unsigned i = 0; i != NumElements; ++i) { @@ -105,14 +102,14 @@ void ReadCommonEntityInfo(const uint8_t *&Data, CommonEntityInfo &Info) { Info.setSwiftPrivate(static_cast((UnavailableBits >> 3) & 0x01)); unsigned MsgLength = - endian::readNext(Data); + endian::readNext(Data); Info.UnavailableMsg = std::string(reinterpret_cast(Data), reinterpret_cast(Data) + MsgLength); Data += MsgLength; unsigned SwiftNameLength = - endian::readNext(Data); + endian::readNext(Data); Info.SwiftName = std::string(reinterpret_cast(Data), reinterpret_cast(Data) + SwiftNameLength); @@ -124,7 +121,7 @@ void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) { ReadCommonEntityInfo(Data, Info); unsigned SwiftBridgeLength = - endian::readNext(Data); + endian::readNext(Data); if (SwiftBridgeLength > 0) { Info.setSwiftBridge(std::string(reinterpret_cast(Data), SwiftBridgeLength - 1)); @@ -132,7 +129,7 @@ void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) { } unsigned ErrorDomainLength = - endian::readNext(Data); + endian::readNext(Data); if (ErrorDomainLength > 0) { Info.setNSErrorDomain(std::optional(std::string( reinterpret_cast(Data), ErrorDomainLength - 1))); @@ -163,9 +160,9 @@ class IdentifierTableInfo { static std::pair ReadKeyDataLength(const uint8_t *&Data) { unsigned KeyLength = - endian::readNext(Data); + endian::readNext(Data); unsigned DataLength = - endian::readNext(Data); + endian::readNext(Data); return {KeyLength, DataLength}; } @@ -175,8 +172,7 @@ class IdentifierTableInfo { static data_type ReadData(internal_key_type key, const uint8_t *Data, unsigned Length) { - return endian::readNext( - Data); + return endian::readNext(Data); } }; @@ -203,26 +199,24 @@ class ObjCContextIDTableInfo { static std::pair ReadKeyDataLength(const uint8_t *&Data) { unsigned KeyLength = - endian::readNext(Data); + endian::readNext(Data); unsigned DataLength = - endian::readNext(Data); + endian::readNext(Data); return {KeyLength, DataLength}; } static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { auto ParentCtxID = - endian::readNext(Data); + endian::readNext(Data); auto ContextKind = - endian::readNext(Data); - auto NameID = - endian::readNext(Data); + endian::readNext(Data); + auto NameID = endian::readNext(Data); return {ParentCtxID, ContextKind, NameID}; } static data_type ReadData(internal_key_type Key, const uint8_t *Data, unsigned Length) { - return endian::readNext( - Data); + return endian::readNext(Data); } }; @@ -232,8 +226,7 @@ class ObjCContextInfoTableInfo ObjCContextInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - return endian::readNext( - Data); + return endian::readNext(Data); } hash_value_type ComputeHash(internal_key_type Key) { @@ -273,8 +266,7 @@ void ReadVariableInfo(const uint8_t *&Data, VariableInfo &Info) { } ++Data; - auto TypeLen = - endian::readNext(Data); + auto TypeLen = endian::readNext(Data); Info.setType(std::string(Data, Data + TypeLen)); Data += TypeLen; } @@ -286,12 +278,9 @@ class ObjCPropertyTableInfo ObjCPropertyInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto ClassID = - endian::readNext(Data); - auto NameID = - endian::readNext(Data); - char IsInstance = - endian::readNext(Data); + auto ClassID = endian::readNext(Data); + auto NameID = endian::readNext(Data); + char IsInstance = endian::readNext(Data); return {ClassID, NameID, IsInstance}; } @@ -314,8 +303,7 @@ class ObjCPropertyTableInfo void ReadParamInfo(const uint8_t *&Data, ParamInfo &Info) { ReadVariableInfo(Data, Info); - uint8_t Payload = - endian::readNext(Data); + uint8_t Payload = endian::readNext(Data); if (auto RawConvention = Payload & 0x7) { auto Convention = static_cast(RawConvention - 1); Info.setRetainCountConvention(Convention); @@ -331,8 +319,7 @@ void ReadParamInfo(const uint8_t *&Data, ParamInfo &Info) { void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) { ReadCommonEntityInfo(Data, Info); - uint8_t Payload = - endian::readNext(Data); + uint8_t Payload = endian::readNext(Data); if (auto RawConvention = Payload & 0x7) { auto Convention = static_cast(RawConvention - 1); Info.setRetainCountConvention(Convention); @@ -343,12 +330,12 @@ void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) { assert(Payload == 0 && "Bad API notes"); Info.NumAdjustedNullable = - endian::readNext(Data); + endian::readNext(Data); Info.NullabilityPayload = - endian::readNext(Data); + endian::readNext(Data); unsigned NumParams = - endian::readNext(Data); + endian::readNext(Data); while (NumParams > 0) { ParamInfo pi; ReadParamInfo(Data, pi); @@ -357,7 +344,7 @@ void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) { } unsigned ResultTypeLen = - endian::readNext(Data); + endian::readNext(Data); Info.ResultType = std::string(Data, Data + ResultTypeLen); Data += ResultTypeLen; } @@ -369,12 +356,10 @@ class ObjCMethodTableInfo ObjCMethodInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto ClassID = - endian::readNext(Data); + auto ClassID = endian::readNext(Data); auto SelectorID = - endian::readNext(Data); - auto IsInstance = - endian::readNext(Data); + endian::readNext(Data); + auto IsInstance = endian::readNext(Data); return {ClassID, SelectorID, IsInstance}; } @@ -419,29 +404,26 @@ class ObjCSelectorTableInfo { static std::pair ReadKeyDataLength(const uint8_t *&Data) { unsigned KeyLength = - endian::readNext(Data); + endian::readNext(Data); unsigned DataLength = - endian::readNext(Data); + endian::readNext(Data); return {KeyLength, DataLength}; } static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { internal_key_type Key; - Key.NumArgs = - endian::readNext(Data); + Key.NumArgs = endian::readNext(Data); unsigned NumIdents = (Length - sizeof(uint16_t)) / sizeof(uint32_t); for (unsigned i = 0; i != NumIdents; ++i) { Key.Identifiers.push_back( - endian::readNext( - Data)); + endian::readNext(Data)); } return Key; } static data_type ReadData(internal_key_type Key, const uint8_t *Data, unsigned Length) { - return endian::readNext( - Data); + return endian::readNext(Data); } }; @@ -451,12 +433,10 @@ class GlobalVariableTableInfo GlobalVariableInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto CtxID = - endian::readNext(Data); + auto CtxID = endian::readNext(Data); auto ContextKind = - endian::readNext(Data); - auto NameID = - endian::readNext(Data); + endian::readNext(Data); + auto NameID = endian::readNext(Data); return {CtxID, ContextKind, NameID}; } @@ -478,12 +458,10 @@ class GlobalFunctionTableInfo GlobalFunctionInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto CtxID = - endian::readNext(Data); + auto CtxID = endian::readNext(Data); auto ContextKind = - endian::readNext(Data); - auto NameID = - endian::readNext(Data); + endian::readNext(Data); + auto NameID = endian::readNext(Data); return {CtxID, ContextKind, NameID}; } @@ -505,8 +483,7 @@ class EnumConstantTableInfo EnumConstantInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto NameID = - endian::readNext(Data); + auto NameID = endian::readNext(Data); return NameID; } @@ -527,13 +504,11 @@ class TagTableInfo : public VersionedTableInfo { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto CtxID = - endian::readNext(Data); + auto CtxID = endian::readNext(Data); auto ContextKind = - endian::readNext(Data); + endian::readNext(Data); auto NameID = - endian::readNext( - Data); + endian::readNext(Data); return {CtxID, ContextKind, NameID}; } @@ -553,21 +528,21 @@ class TagTableInfo static_cast((Payload & 0x3) - 1); unsigned ImportAsLength = - endian::readNext(Data); + endian::readNext(Data); if (ImportAsLength > 0) { Info.SwiftImportAs = std::string(reinterpret_cast(Data), ImportAsLength - 1); Data += ImportAsLength - 1; } unsigned RetainOpLength = - endian::readNext(Data); + endian::readNext(Data); if (RetainOpLength > 0) { Info.SwiftRetainOp = std::string(reinterpret_cast(Data), RetainOpLength - 1); Data += RetainOpLength - 1; } unsigned ReleaseOpLength = - endian::readNext(Data); + endian::readNext(Data); if (ReleaseOpLength > 0) { Info.SwiftReleaseOp = std::string(reinterpret_cast(Data), ReleaseOpLength - 1); @@ -585,13 +560,11 @@ class TypedefTableInfo TypedefInfo> { public: static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) { - auto CtxID = - endian::readNext(Data); + auto CtxID = endian::readNext(Data); auto ContextKind = - endian::readNext(Data); + endian::readNext(Data); auto nameID = - endian::readNext( - Data); + endian::readNext(Data); return {CtxID, ContextKind, nameID}; } diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp index 0786c81516b2d..b9dcfb8951b3e 100644 --- a/clang/lib/ARCMigrate/ObjCMT.cpp +++ b/clang/lib/ARCMigrate/ObjCMT.cpp @@ -1144,7 +1144,7 @@ static bool IsValidIdentifier(ASTContext &Ctx, return false; std::string NameString = Name; NameString[0] = toLowercase(NameString[0]); - IdentifierInfo *II = &Ctx.Idents.get(NameString); + const IdentifierInfo *II = &Ctx.Idents.get(NameString); return II->getTokenID() == tok::identifier; } @@ -1166,7 +1166,7 @@ bool ObjCMigrateASTConsumer::migrateProperty(ASTContext &Ctx, if (OIT_Family != OIT_None) return false; - IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0); + const IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0); Selector SetterSelector = SelectorTable::constructSetterSelector(PP.getIdentifierTable(), PP.getSelectorTable(), @@ -1311,7 +1311,8 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx, std::string StringLoweredClassName = LoweredClassName.lower(); LoweredClassName = StringLoweredClassName; - IdentifierInfo *MethodIdName = OM->getSelector().getIdentifierInfoForSlot(0); + const IdentifierInfo *MethodIdName = + OM->getSelector().getIdentifierInfoForSlot(0); // Handle method with no name at its first selector slot; e.g. + (id):(int)x. if (!MethodIdName) return; diff --git a/clang/lib/ARCMigrate/TransAPIUses.cpp b/clang/lib/ARCMigrate/TransAPIUses.cpp index 638850dcf9ecc..8f5d4f4bde06c 100644 --- a/clang/lib/ARCMigrate/TransAPIUses.cpp +++ b/clang/lib/ARCMigrate/TransAPIUses.cpp @@ -41,7 +41,7 @@ class APIChecker : public RecursiveASTVisitor { getReturnValueSel = sels.getUnarySelector(&ids.get("getReturnValue")); setReturnValueSel = sels.getUnarySelector(&ids.get("setReturnValue")); - IdentifierInfo *selIds[2]; + const IdentifierInfo *selIds[2]; selIds[0] = &ids.get("getArgument"); selIds[1] = &ids.get("atIndex"); getArgumentSel = sels.getSelector(2, selIds); diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp index d8042321319a6..8c77b563657d9 100644 --- a/clang/lib/AST/APValue.cpp +++ b/clang/lib/AST/APValue.cpp @@ -908,7 +908,8 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, for (const auto *FI : RD->fields()) { if (!First) Out << ", "; - if (FI->isUnnamedBitfield()) continue; + if (FI->isUnnamedBitField()) + continue; getStructField(FI->getFieldIndex()). printPretty(Out, Policy, FI->getType(), Ctx); First = false; diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index a7a885839d6c3..5081d258724df 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -799,7 +799,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl( TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), - TTP->getPosition(), TTP->isParameterPack(), nullptr, + TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false, TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), CanonParams, SourceLocation(), /*RequiresClause=*/nullptr)); @@ -2696,7 +2696,7 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, if (Field->isBitField()) { // If we have explicit padding bits, they don't contribute bits // to the actual object representation, so return 0. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) return 0; int64_t BitfieldSize = Field->getBitWidthValue(Context); @@ -6941,16 +6941,13 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { // typedef typename T::type T1; // typedef typename T1::type T2; if (const auto *DNT = T->getAs()) - return NestedNameSpecifier::Create( - *this, DNT->getQualifier(), - const_cast(DNT->getIdentifier())); + return NestedNameSpecifier::Create(*this, DNT->getQualifier(), + DNT->getIdentifier()); if (const auto *DTST = T->getAs()) - return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, - const_cast(T)); + return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, T); // TODO: Set 'Template' parameter to true for other template types. - return NestedNameSpecifier::Create(*this, nullptr, false, - const_cast(T)); + return NestedNameSpecifier::Create(*this, nullptr, false, T); } case NestedNameSpecifier::Global: @@ -12289,8 +12286,13 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, } void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { - if (Number > 1) - MangleNumbers[ND] = Number; + if (Number <= 1) + return; + + MangleNumbers[ND] = Number; + + if (Listener) + Listener->AddedManglingNumber(ND, Number); } unsigned ASTContext::getManglingNumber(const NamedDecl *ND, @@ -12309,8 +12311,13 @@ unsigned ASTContext::getManglingNumber(const NamedDecl *ND, } void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { - if (Number > 1) - StaticLocalNumbers[VD] = Number; + if (Number <= 1) + return; + + StaticLocalNumbers[VD] = Number; + + if (Listener) + Listener->AddedStaticLocalNumbers(VD, Number); } unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp index b25b9bc9b1140..65ebebb44cf82 100644 --- a/clang/lib/AST/ASTImporter.cpp +++ b/clang/lib/AST/ASTImporter.cpp @@ -3953,6 +3953,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { // decl and its redeclarations may be required. } + StringLiteral *Msg = D->getDeletedMessage(); + if (Msg) { + auto Imported = import(Msg); + if (!Imported) + return Imported.takeError(); + Msg = *Imported; + } + ToFunction->setQualifierInfo(ToQualifierLoc); ToFunction->setAccess(D->getAccess()); ToFunction->setLexicalDeclContext(LexicalDC); @@ -3967,6 +3975,11 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { ToFunction->setRangeEnd(ToEndLoc); ToFunction->setDefaultLoc(ToDefaultLoc); + if (Msg) + ToFunction->setDefaultedOrDeletedInfo( + FunctionDecl::DefaultedOrDeletedFunctionInfo::Create( + Importer.getToContext(), {}, Msg)); + // Set the parameters. for (auto *Param : Parameters) { Param->setOwningFunction(ToFunction); @@ -5958,7 +5971,8 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { ToD, D, Importer.getToContext(), Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr, D->getDepth(), D->getPosition(), D->isParameterPack(), - (*NameOrErr).getAsIdentifierInfo(), *TemplateParamsOrErr)) + (*NameOrErr).getAsIdentifierInfo(), D->wasDeclaredWithTypename(), + *TemplateParamsOrErr)) return ToD; if (D->hasDefaultArgument()) { @@ -8389,8 +8403,8 @@ ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { return std::move(Err); PseudoDestructorTypeStorage Storage; - if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) { - IdentifierInfo *ToII = Importer.Import(FromII); + if (const IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) { + const IdentifierInfo *ToII = Importer.Import(FromII); ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc()); if (!ToDestroyedTypeLocOrErr) return ToDestroyedTypeLocOrErr.takeError(); @@ -10200,7 +10214,7 @@ Expected ASTImporter::Import(Selector FromSel) { if (FromSel.isNull()) return Selector{}; - SmallVector Idents; + SmallVector Idents; Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0))); for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I) Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I))); diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index 296f3b2ce4abd..189e3156292a0 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -2917,10 +2917,10 @@ VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD, //===----------------------------------------------------------------------===// ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, - StorageClass S, Expr *DefArg) { + SourceLocation StartLoc, SourceLocation IdLoc, + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, StorageClass S, + Expr *DefArg) { return new (C, DC) ParmVarDecl(ParmVar, C, DC, StartLoc, IdLoc, Id, T, TInfo, S, DefArg); } @@ -3062,7 +3062,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, FunctionDeclBits.IsTrivialForCall = false; FunctionDeclBits.IsDefaulted = false; FunctionDeclBits.IsExplicitlyDefaulted = false; - FunctionDeclBits.HasDefaultedFunctionInfo = false; + FunctionDeclBits.HasDefaultedOrDeletedInfo = false; FunctionDeclBits.IsIneligibleOrNotSelected = false; FunctionDeclBits.HasImplicitReturnZero = false; FunctionDeclBits.IsLateTemplateParsed = false; @@ -3096,30 +3096,65 @@ bool FunctionDecl::isVariadic() const { return false; } -FunctionDecl::DefaultedFunctionInfo * -FunctionDecl::DefaultedFunctionInfo::Create(ASTContext &Context, - ArrayRef Lookups) { - DefaultedFunctionInfo *Info = new (Context.Allocate( - totalSizeToAlloc(Lookups.size()), - std::max(alignof(DefaultedFunctionInfo), alignof(DeclAccessPair)))) - DefaultedFunctionInfo; +FunctionDecl::DefaultedOrDeletedFunctionInfo * +FunctionDecl::DefaultedOrDeletedFunctionInfo::Create( + ASTContext &Context, ArrayRef Lookups, + StringLiteral *DeletedMessage) { + static constexpr size_t Alignment = + std::max({alignof(DefaultedOrDeletedFunctionInfo), + alignof(DeclAccessPair), alignof(StringLiteral *)}); + size_t Size = totalSizeToAlloc( + Lookups.size(), DeletedMessage != nullptr); + + DefaultedOrDeletedFunctionInfo *Info = + new (Context.Allocate(Size, Alignment)) DefaultedOrDeletedFunctionInfo; Info->NumLookups = Lookups.size(); + Info->HasDeletedMessage = DeletedMessage != nullptr; + std::uninitialized_copy(Lookups.begin(), Lookups.end(), Info->getTrailingObjects()); + if (DeletedMessage) + *Info->getTrailingObjects() = DeletedMessage; return Info; } -void FunctionDecl::setDefaultedFunctionInfo(DefaultedFunctionInfo *Info) { - assert(!FunctionDeclBits.HasDefaultedFunctionInfo && "already have this"); +void FunctionDecl::setDefaultedOrDeletedInfo( + DefaultedOrDeletedFunctionInfo *Info) { + assert(!FunctionDeclBits.HasDefaultedOrDeletedInfo && "already have this"); assert(!Body && "can't replace function body with defaulted function info"); - FunctionDeclBits.HasDefaultedFunctionInfo = true; - DefaultedInfo = Info; + FunctionDeclBits.HasDefaultedOrDeletedInfo = true; + DefaultedOrDeletedInfo = Info; } -FunctionDecl::DefaultedFunctionInfo * -FunctionDecl::getDefaultedFunctionInfo() const { - return FunctionDeclBits.HasDefaultedFunctionInfo ? DefaultedInfo : nullptr; +void FunctionDecl::setDeletedAsWritten(bool D, StringLiteral *Message) { + FunctionDeclBits.IsDeleted = D; + + if (Message) { + assert(isDeletedAsWritten() && "Function must be deleted"); + if (FunctionDeclBits.HasDefaultedOrDeletedInfo) + DefaultedOrDeletedInfo->setDeletedMessage(Message); + else + setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo::Create( + getASTContext(), /*Lookups=*/{}, Message)); + } +} + +void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage( + StringLiteral *Message) { + // We should never get here with the DefaultedOrDeletedInfo populated, but + // no space allocated for the deleted message, since that would require + // recreating this, but setDefaultedOrDeletedInfo() disallows overwriting + // an already existing DefaultedOrDeletedFunctionInfo. + assert(HasDeletedMessage && + "No space to store a delete message in this DefaultedOrDeletedInfo"); + *getTrailingObjects() = Message; +} + +FunctionDecl::DefaultedOrDeletedFunctionInfo * +FunctionDecl::getDefalutedOrDeletedInfo() const { + return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo + : nullptr; } bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const { @@ -3206,7 +3241,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { if (!hasBody(Definition)) return nullptr; - assert(!Definition->FunctionDeclBits.HasDefaultedFunctionInfo && + assert(!Definition->FunctionDeclBits.HasDefaultedOrDeletedInfo && "definition should not have a body"); if (Definition->Body) return Definition->Body.get(getASTContext().getExternalSource()); @@ -3215,7 +3250,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { } void FunctionDecl::setBody(Stmt *B) { - FunctionDeclBits.HasDefaultedFunctionInfo = false; + FunctionDeclBits.HasDefaultedOrDeletedInfo = false; Body = LazyDeclStmtPtr(B); if (B) EndRangeLoc = B->getEndLoc(); @@ -4503,7 +4538,7 @@ unsigned FunctionDecl::getODRHash() { } class ODRHash Hash; - Hash.AddFunctionDecl(this, /*SkipBody=*/shouldSkipCheckingODR()); + Hash.AddFunctionDecl(this); setHasODRHash(true); ODRHash = Hash.CalculateHash(); return ODRHash; @@ -4515,7 +4550,7 @@ unsigned FunctionDecl::getODRHash() { FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle) { return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo, @@ -4566,7 +4601,7 @@ unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const { } bool FieldDecl::isZeroLengthBitField(const ASTContext &Ctx) const { - return isUnnamedBitfield() && !getBitWidth()->isValueDependent() && + return isUnnamedBitField() && !getBitWidth()->isValueDependent() && getBitWidthValue(Ctx) == 0; } @@ -5243,6 +5278,13 @@ TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) { return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C); } +void TranslationUnitDecl::setAnonymousNamespace(NamespaceDecl *D) { + AnonymousNamespace = D; + + if (ASTMutationListener *Listener = Ctx.getASTMutationListener()) + Listener->AddedAnonymousNamespace(this, D); +} + void PragmaCommentDecl::anchor() {} PragmaCommentDecl *PragmaCommentDecl::Create(const ASTContext &C, @@ -5442,7 +5484,7 @@ IndirectFieldDecl::IndirectFieldDecl(ASTContext &C, DeclContext *DC, IndirectFieldDecl * IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, llvm::MutableArrayRef CH) { return new (C, DC) IndirectFieldDecl(C, DC, L, Id, T, CH); } @@ -5465,7 +5507,8 @@ void TypeDecl::anchor() {} TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, TypeSourceInfo *TInfo) { + const IdentifierInfo *Id, + TypeSourceInfo *TInfo) { return new (C, DC) TypedefDecl(C, DC, StartLoc, IdLoc, Id, TInfo); } @@ -5515,7 +5558,8 @@ TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) { TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, + SourceLocation IdLoc, + const IdentifierInfo *Id, TypeSourceInfo *TInfo) { return new (C, DC) TypeAliasDecl(C, DC, StartLoc, IdLoc, Id, TInfo); } diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp index 66a727d9dd0c3..434926324c96c 100644 --- a/clang/lib/AST/DeclBase.cpp +++ b/clang/lib/AST/DeclBase.cpp @@ -1106,11 +1106,6 @@ bool Decl::isFromExplicitGlobalModule() const { return getOwningModule() && getOwningModule()->isExplicitGlobalModule(); } -bool Decl::shouldSkipCheckingODR() const { - return getASTContext().getLangOpts().SkipODRCheckInGMF && - isFromExplicitGlobalModule(); -} - static Decl::Kind getKind(const Decl *D) { return D->getKind(); } static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); } diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp index 645ec2f7563bc..00cc857f5e737 100644 --- a/clang/lib/AST/DeclCXX.cpp +++ b/clang/lib/AST/DeclCXX.cpp @@ -668,7 +668,7 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType( for (auto *FD : X->fields()) { // FIXME: Should we really care about the type of the first non-static // data member of a non-union if there are preceding unnamed bit-fields? - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; if (!IsFirstField && !FD->isZeroSize(Ctx)) @@ -947,7 +947,7 @@ void CXXRecordDecl::addedMember(Decl *D) { // A declaration for a bit-field that omits the identifier declares an // unnamed bit-field. Unnamed bit-fields are not members and cannot be // initialized. - if (Field->isUnnamedBitfield()) { + if (Field->isUnnamedBitField()) { // C++ [meta.unary.prop]p4: [LWG2358] // T is a class type [...] with [...] no unnamed bit-fields of non-zero // length @@ -3469,7 +3469,8 @@ static bool isValidStructGUID(ASTContext &Ctx, QualType T) { return false; auto MatcherIt = Fields.begin(); for (const FieldDecl *FD : RD->fields()) { - if (FD->isUnnamedBitfield()) continue; + if (FD->isUnnamedBitField()) + continue; if (FD->isBitField() || MatcherIt == Fields.end() || !(*MatcherIt)(FD->getType())) return false; diff --git a/clang/lib/AST/DeclObjC.cpp b/clang/lib/AST/DeclObjC.cpp index 962f503306a0f..32c14938cd588 100644 --- a/clang/lib/AST/DeclObjC.cpp +++ b/clang/lib/AST/DeclObjC.cpp @@ -66,7 +66,8 @@ void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts, //===----------------------------------------------------------------------===// ObjCContainerDecl::ObjCContainerDecl(Kind DK, DeclContext *DC, - IdentifierInfo *Id, SourceLocation nameLoc, + const IdentifierInfo *Id, + SourceLocation nameLoc, SourceLocation atStartLoc) : NamedDecl(DK, DC, nameLoc, Id), DeclContext(DK) { setAtStartLoc(atStartLoc); @@ -378,10 +379,8 @@ SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const { /// FindPropertyVisibleInPrimaryClass - Finds declaration of the property /// with name 'PropertyId' in the primary class; including those in protocols /// (direct or indirect) used by the primary class. -ObjCPropertyDecl * -ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( - IdentifierInfo *PropertyId, - ObjCPropertyQueryKind QueryKind) const { +ObjCPropertyDecl *ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( + const IdentifierInfo *PropertyId, ObjCPropertyQueryKind QueryKind) const { // FIXME: Should make sure no callers ever do this. if (!hasDefinition()) return nullptr; @@ -1539,14 +1538,10 @@ void ObjCTypeParamList::gatherDefaultTypeArgs( // ObjCInterfaceDecl //===----------------------------------------------------------------------===// -ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C, - DeclContext *DC, - SourceLocation atLoc, - IdentifierInfo *Id, - ObjCTypeParamList *typeParamList, - ObjCInterfaceDecl *PrevDecl, - SourceLocation ClassLoc, - bool isInternal){ +ObjCInterfaceDecl *ObjCInterfaceDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation atLoc, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + ObjCInterfaceDecl *PrevDecl, SourceLocation ClassLoc, bool isInternal) { auto *Result = new (C, DC) ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl, isInternal); @@ -1564,12 +1559,10 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C, return Result; } -ObjCInterfaceDecl::ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC, - SourceLocation AtLoc, IdentifierInfo *Id, - ObjCTypeParamList *typeParamList, - SourceLocation CLoc, - ObjCInterfaceDecl *PrevDecl, - bool IsInternal) +ObjCInterfaceDecl::ObjCInterfaceDecl( + const ASTContext &C, DeclContext *DC, SourceLocation AtLoc, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl, bool IsInternal) : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, AtLoc), redeclarable_base(C) { setPreviousDecl(PrevDecl); @@ -1751,8 +1744,8 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() { /// categories for this class and returns it. Name of the category is passed /// in 'CategoryId'. If category not found, return 0; /// -ObjCCategoryDecl * -ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const { +ObjCCategoryDecl *ObjCInterfaceDecl::FindCategoryDeclaration( + const IdentifierInfo *CategoryId) const { // FIXME: Should make sure no callers ever do this. if (!hasDefinition()) return nullptr; @@ -1838,10 +1831,10 @@ void ObjCIvarDecl::anchor() {} ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, - AccessControl ac, Expr *BW, - bool synthesized) { + SourceLocation IdLoc, + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, AccessControl ac, + Expr *BW, bool synthesized) { if (DC) { // Ivar's can only appear in interfaces, implementations (via synthesized // properties), and class extensions (via direct declaration, or synthesized @@ -2120,28 +2113,23 @@ void ObjCProtocolDecl::setHasODRHash(bool HasHash) { void ObjCCategoryDecl::anchor() {} -ObjCCategoryDecl::ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc, - SourceLocation ClassNameLoc, - SourceLocation CategoryNameLoc, - IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, - ObjCTypeParamList *typeParamList, - SourceLocation IvarLBraceLoc, - SourceLocation IvarRBraceLoc) +ObjCCategoryDecl::ObjCCategoryDecl( + DeclContext *DC, SourceLocation AtLoc, SourceLocation ClassNameLoc, + SourceLocation CategoryNameLoc, const IdentifierInfo *Id, + ObjCInterfaceDecl *IDecl, ObjCTypeParamList *typeParamList, + SourceLocation IvarLBraceLoc, SourceLocation IvarRBraceLoc) : ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc), ClassInterface(IDecl), CategoryNameLoc(CategoryNameLoc), IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) { setTypeParamList(typeParamList); } -ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation AtLoc, - SourceLocation ClassNameLoc, - SourceLocation CategoryNameLoc, - IdentifierInfo *Id, - ObjCInterfaceDecl *IDecl, - ObjCTypeParamList *typeParamList, - SourceLocation IvarLBraceLoc, - SourceLocation IvarRBraceLoc) { +ObjCCategoryDecl *ObjCCategoryDecl::Create( + ASTContext &C, DeclContext *DC, SourceLocation AtLoc, + SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc, + const IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, + ObjCTypeParamList *typeParamList, SourceLocation IvarLBraceLoc, + SourceLocation IvarRBraceLoc) { auto *CatDecl = new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id, IDecl, typeParamList, IvarLBraceLoc, @@ -2190,13 +2178,10 @@ void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) { void ObjCCategoryImplDecl::anchor() {} -ObjCCategoryImplDecl * -ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC, - IdentifierInfo *Id, - ObjCInterfaceDecl *ClassInterface, - SourceLocation nameLoc, - SourceLocation atStartLoc, - SourceLocation CategoryNameLoc) { +ObjCCategoryImplDecl *ObjCCategoryImplDecl::Create( + ASTContext &C, DeclContext *DC, const IdentifierInfo *Id, + ObjCInterfaceDecl *ClassInterface, SourceLocation nameLoc, + SourceLocation atStartLoc, SourceLocation CategoryNameLoc) { if (ClassInterface && ClassInterface->hasDefinition()) ClassInterface = ClassInterface->getDefinition(); return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc, @@ -2365,14 +2350,11 @@ ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { void ObjCPropertyDecl::anchor() {} -ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation L, - IdentifierInfo *Id, - SourceLocation AtLoc, - SourceLocation LParenLoc, - QualType T, - TypeSourceInfo *TSI, - PropertyControl propControl) { +ObjCPropertyDecl * +ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, + const IdentifierInfo *Id, SourceLocation AtLoc, + SourceLocation LParenLoc, QualType T, + TypeSourceInfo *TSI, PropertyControl propControl) { return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T, TSI, propControl); } diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp index fedf03830168f..b4f9821efe2f9 100644 --- a/clang/lib/AST/DeclPrinter.cpp +++ b/clang/lib/AST/DeclPrinter.cpp @@ -119,7 +119,7 @@ namespace { void printTemplateArguments(llvm::ArrayRef Args, const TemplateParameterList *Params); enum class AttrPosAsWritten { Default = 0, Left, Right }; - void + bool prettyPrintAttributes(const Decl *D, AttrPosAsWritten Pos = AttrPosAsWritten::Default); void prettyPrintPragmas(Decl *D); @@ -252,16 +252,19 @@ static DeclPrinter::AttrPosAsWritten getPosAsWritten(const Attr *A, return DeclPrinter::AttrPosAsWritten::Right; } -void DeclPrinter::prettyPrintAttributes(const Decl *D, +// returns true if an attribute was printed. +bool DeclPrinter::prettyPrintAttributes(const Decl *D, AttrPosAsWritten Pos /*=Default*/) { - if (Policy.PolishForDeclaration) - return; + bool hasPrinted = false; if (D->hasAttrs()) { const AttrVec &Attrs = D->getAttrs(); for (auto *A : Attrs) { if (A->isInherited() || A->isImplicit()) continue; + // Print out the keyword attributes, they aren't regular attributes. + if (Policy.PolishForDeclaration && !A->isKeywordAttribute()) + continue; switch (A->getKind()) { #define ATTR(X) #define PRAGMA_SPELLING_ATTR(X) case attr::X: @@ -275,6 +278,7 @@ void DeclPrinter::prettyPrintAttributes(const Decl *D, if (Pos != AttrPosAsWritten::Left) Out << ' '; A->printPretty(Out, Policy); + hasPrinted = true; if (Pos == AttrPosAsWritten::Left) Out << ' '; } @@ -282,6 +286,7 @@ void DeclPrinter::prettyPrintAttributes(const Decl *D, } } } + return hasPrinted; } void DeclPrinter::prettyPrintPragmas(Decl *D) { @@ -828,9 +833,14 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (D->isPureVirtual()) Out << " = 0"; - else if (D->isDeletedAsWritten()) + else if (D->isDeletedAsWritten()) { Out << " = delete"; - else if (D->isExplicitlyDefaulted()) + if (const StringLiteral *M = D->getDeletedMessage()) { + Out << "("; + M->outputString(Out); + Out << ")"; + } + } else if (D->isExplicitlyDefaulted()) Out << " = default"; else if (D->doesThisDeclarationHaveABody() && !Policy.SuppressDefinition) { if (!Policy.TerseOutput) { @@ -1066,12 +1076,15 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { // FIXME: add printing of pragma attributes if required. if (!Policy.SuppressSpecifiers && D->isModulePrivate()) Out << "__module_private__ "; - Out << D->getKindName(); - prettyPrintAttributes(D); + Out << D->getKindName() << ' '; - if (D->getIdentifier()) { + // FIXME: Move before printing the decl kind to match the behavior of the + // attribute printing for variables and function where they are printed first. + if (prettyPrintAttributes(D, AttrPosAsWritten::Left)) Out << ' '; + + if (D->getIdentifier()) { if (auto *NNS = D->getQualifier()) NNS->print(Out, Policy); Out << *D; @@ -1088,16 +1101,13 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { } } - if (D->hasDefinition() && !Policy.SuppressFinalSpecifier) { - if (D->hasAttr()) { - Out << " final"; - } - } + prettyPrintAttributes(D, AttrPosAsWritten::Right); if (D->isCompleteDefinition() && !Policy.SuppressDefinition) { + Out << ' '; // Print the base classes if (D->getNumBases()) { - Out << " : "; + Out << ": "; for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(), BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) { if (Base != D->bases_begin()) @@ -1116,14 +1126,15 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { if (Base->isPackExpansion()) Out << "..."; } + Out << ' '; } // Print the class definition // FIXME: Doesn't print access specifiers, e.g., "public:" if (Policy.TerseOutput) { - Out << " {}"; + Out << "{}"; } else { - Out << " {\n"; + Out << "{\n"; VisitDeclContext(D); Indent() << "}"; } @@ -1224,7 +1235,10 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) { if (const TemplateTemplateParmDecl *TTP = dyn_cast(D)) { - Out << "class"; + if (TTP->wasDeclaredWithTypename()) + Out << "typename"; + else + Out << "class"; if (TTP->isParameterPack()) Out << " ..."; diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp index 3c217d6a6a5ae..5aa2484197372 100644 --- a/clang/lib/AST/DeclTemplate.cpp +++ b/clang/lib/AST/DeclTemplate.cpp @@ -715,7 +715,7 @@ void TemplateTypeParmDecl::setTypeConstraint( NonTypeTemplateParmDecl::NonTypeTemplateParmDecl( DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, unsigned D, - unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, + unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef ExpandedTypes, ArrayRef ExpandedTInfos) : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc), TemplateParmPosition(D, P), ParameterPack(true), @@ -730,12 +730,10 @@ NonTypeTemplateParmDecl::NonTypeTemplateParmDecl( } } -NonTypeTemplateParmDecl * -NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, SourceLocation IdLoc, - unsigned D, unsigned P, IdentifierInfo *Id, - QualType T, bool ParameterPack, - TypeSourceInfo *TInfo) { +NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, + QualType T, bool ParameterPack, TypeSourceInfo *TInfo) { AutoType *AT = C.getLangOpts().CPlusPlus20 ? T->getContainedAutoType() : nullptr; return new (C, DC, @@ -748,7 +746,7 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create( const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef ExpandedTypes, ArrayRef ExpandedTInfos) { AutoType *AT = TInfo->getType()->getContainedAutoType(); @@ -807,10 +805,10 @@ void TemplateTemplateParmDecl::anchor() {} TemplateTemplateParmDecl::TemplateTemplateParmDecl( DeclContext *DC, SourceLocation L, unsigned D, unsigned P, - IdentifierInfo *Id, TemplateParameterList *Params, + IdentifierInfo *Id, bool Typename, TemplateParameterList *Params, ArrayRef Expansions) : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params), - TemplateParmPosition(D, P), ParameterPack(true), + TemplateParmPosition(D, P), Typename(Typename), ParameterPack(true), ExpandedParameterPack(true), NumExpandedParams(Expansions.size()) { if (!Expansions.empty()) std::uninitialized_copy(Expansions.begin(), Expansions.end(), @@ -821,26 +819,26 @@ TemplateTemplateParmDecl * TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, unsigned P, bool ParameterPack, IdentifierInfo *Id, - TemplateParameterList *Params) { + bool Typename, TemplateParameterList *Params) { return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id, - Params); + Typename, Params); } TemplateTemplateParmDecl * TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, unsigned P, - IdentifierInfo *Id, + IdentifierInfo *Id, bool Typename, TemplateParameterList *Params, ArrayRef Expansions) { return new (C, DC, additionalSizeToAlloc(Expansions.size())) - TemplateTemplateParmDecl(DC, L, D, P, Id, Params, Expansions); + TemplateTemplateParmDecl(DC, L, D, P, Id, Typename, Params, Expansions); } TemplateTemplateParmDecl * TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) { return new (C, ID) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, - false, nullptr, nullptr); + false, nullptr, false, nullptr); } TemplateTemplateParmDecl * @@ -849,7 +847,7 @@ TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, auto *TTP = new (C, ID, additionalSizeToAlloc(NumExpansions)) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr, - nullptr, std::nullopt); + false, nullptr, std::nullopt); TTP->NumExpandedParams = NumExpansions; return TTP; } @@ -1471,7 +1469,7 @@ createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) { // template class IntSeq auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create( C, DC, SourceLocation(), /*Depth=*/0, /*Position=*/0, - /*ParameterPack=*/false, /*Id=*/nullptr, TPL); + /*ParameterPack=*/false, /*Id=*/nullptr, /*Typename=*/false, TPL); TemplateTemplateParm->setImplicit(true); // typename T diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index fc9b3ab92553d..ead0b9b3727e1 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -2099,7 +2099,7 @@ const FieldDecl *CastExpr::getTargetFieldForToUnionCast(const RecordDecl *RD, for (Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { if (Ctx.hasSameUnqualifiedType(Field->getType(), OpType) && - !Field->isUnnamedBitfield()) { + !Field->isUnnamedBitField()) { return *Field; } } @@ -3445,7 +3445,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, continue; // Don't emit anonymous bitfields, they just affect layout. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; if (ElementNo < ILE->getNumInits()) { diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 07f0434b9f526..2632b60c6f0a6 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -2492,7 +2492,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, } } for (const auto *I : RD->fields()) { - if (I->isUnnamedBitfield()) + if (I->isUnnamedBitField()) continue; if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(), @@ -3529,7 +3529,7 @@ static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) { return false; for (auto *Field : RD->fields()) - if (!Field->isUnnamedBitfield() && + if (!Field->isUnnamedBitField() && isReadByLvalueToRvalueConversion(Field->getType())) return true; @@ -4898,7 +4898,7 @@ static bool handleDefaultInitValue(QualType T, APValue &Result) { handleDefaultInitValue(I->getType(), Result.getStructBase(Index)); for (const auto *I : RD->fields()) { - if (I->isUnnamedBitfield()) + if (I->isUnnamedBitField()) continue; Success &= handleDefaultInitValue( I->getType(), Result.getStructField(I->getFieldIndex())); @@ -6436,7 +6436,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // Default-initialize any fields with no explicit initializer. for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) { assert(FieldIt != RD->field_end() && "missing field?"); - if (!FieldIt->isUnnamedBitfield()) + if (!FieldIt->isUnnamedBitField()) Success &= handleDefaultInitValue( FieldIt->getType(), Result.getStructField(FieldIt->getFieldIndex())); @@ -6546,7 +6546,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // Default-initialize any remaining fields. if (!RD->isUnion()) { for (; FieldIt != RD->field_end(); ++FieldIt) { - if (!FieldIt->isUnnamedBitfield()) + if (!FieldIt->isUnnamedBitField()) Success &= handleDefaultInitValue( FieldIt->getType(), Result.getStructField(FieldIt->getFieldIndex())); @@ -6708,7 +6708,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange, // fields first and then walk them backwards. SmallVector Fields(RD->fields()); for (const FieldDecl *FD : llvm::reverse(Fields)) { - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; LValue Subobject = This; @@ -10238,7 +10238,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E, for (const auto *I : RD->fields()) { // -- if T is a reference type, no initialization is performed. - if (I->isUnnamedBitfield() || I->getType()->isReferenceType()) + if (I->isUnnamedBitField() || I->getType()->isReferenceType()) continue; LValue Subobject = This; @@ -10261,7 +10261,7 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) { // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the // object's first non-static named data member is zero-initialized RecordDecl::field_iterator I = RD->field_begin(); - while (I != RD->field_end() && (*I)->isUnnamedBitfield()) + while (I != RD->field_end() && (*I)->isUnnamedBitField()) ++I; if (I == RD->field_end()) { Result = APValue((const FieldDecl*)nullptr); @@ -10408,7 +10408,7 @@ bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr( for (const auto *Field : RD->fields()) { // Anonymous bit-fields are not considered members of the class for // purposes of aggregate initialization. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; LValue Subobject = This; diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp index 84bacd457c85b..f317f506d24f4 100644 --- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp +++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp @@ -262,7 +262,7 @@ bool ByteCodeExprGen::VisitCastExpr(const CastExpr *CE) { return this->discard(SubExpr); std::optional FromT = classify(SubExpr->getType()); - std::optional ToT = classifyPrim(CE->getType()); + std::optional ToT = classify(CE->getType()); if (!FromT || !ToT) return false; @@ -398,6 +398,35 @@ bool ByteCodeExprGen::VisitCastExpr(const CastExpr *CE) { return true; } + case CK_VectorSplat: { + assert(!classify(CE->getType())); + assert(classify(SubExpr->getType())); + assert(CE->getType()->isVectorType()); + + if (DiscardResult) + return this->discard(SubExpr); + + assert(Initializing); // FIXME: Not always correct. + const auto *VT = CE->getType()->getAs(); + PrimType ElemT = classifyPrim(SubExpr); + unsigned ElemOffset = allocateLocalPrimitive( + SubExpr, ElemT, /*IsConst=*/true, /*IsExtended=*/false); + + if (!this->visit(SubExpr)) + return false; + if (!this->emitSetLocal(ElemT, ElemOffset, CE)) + return false; + + for (unsigned I = 0; I != VT->getNumElements(); ++I) { + if (!this->emitGetLocal(ElemT, ElemOffset, CE)) + return false; + if (!this->emitInitElem(ElemT, I, CE)) + return false; + } + + return true; + } + case CK_ToVoid: return discard(SubExpr); @@ -942,6 +971,11 @@ bool ByteCodeExprGen::visitInitList(ArrayRef Inits, unsigned InitIndex = 0; for (const Expr *Init : Inits) { + // Skip unnamed bitfields. + while (InitIndex < R->getNumFields() && + R->getField(InitIndex)->Decl->isUnnamedBitField()) + ++InitIndex; + if (!this->emitDupPtr(E)) return false; @@ -1251,6 +1285,15 @@ bool ByteCodeExprGen::VisitUnaryExprOrTypeTraitExpr( return this->emitConst(Size.getQuantity(), E); } + if (Kind == UETT_VectorElements) { + if (const auto *VT = E->getTypeOfArgument()->getAs()) + return this->emitConst(VT->getNumElements(), E); + + // FIXME: Apparently we need to catch the fact that a sizeless vector type + // has been passed and diagnose that (at run time). + assert(E->getTypeOfArgument()->isSizelessVectorType()); + } + return false; } @@ -1258,10 +1301,30 @@ template bool ByteCodeExprGen::VisitMemberExpr(const MemberExpr *E) { // 'Base.Member' const Expr *Base = E->getBase(); + const ValueDecl *Member = E->getMemberDecl(); if (DiscardResult) return this->discard(Base); + // MemberExprs are almost always lvalues, in which case we don't need to + // do the load. But sometimes they aren't. + const auto maybeLoadValue = [&]() -> bool { + if (E->isGLValue()) + return true; + if (std::optional T = classify(E)) + return this->emitLoadPop(*T, E); + return false; + }; + + if (const auto *VD = dyn_cast(Member)) { + // I am almost confident in saying that a var decl must be static + // and therefore registered as a global variable. But this will probably + // turn out to be wrong some time in the future, as always. + if (auto GlobalIndex = P.getGlobal(VD)) + return this->emitGetPtrGlobal(*GlobalIndex, E) && maybeLoadValue(); + return false; + } + if (Initializing) { if (!this->delegate(Base)) return false; @@ -1271,16 +1334,14 @@ bool ByteCodeExprGen::VisitMemberExpr(const MemberExpr *E) { } // Base above gives us a pointer on the stack. - // TODO: Implement non-FieldDecl members. - const ValueDecl *Member = E->getMemberDecl(); if (const auto *FD = dyn_cast(Member)) { const RecordDecl *RD = FD->getParent(); const Record *R = getRecord(RD); const Record::Field *F = R->getField(FD); // Leave a pointer to the field on the stack. if (F->Decl->getType()->isReferenceType()) - return this->emitGetFieldPop(PT_Ptr, F->Offset, E); - return this->emitGetPtrField(F->Offset, E); + return this->emitGetFieldPop(PT_Ptr, F->Offset, E) && maybeLoadValue(); + return this->emitGetPtrField(F->Offset, E) && maybeLoadValue(); } return false; @@ -1615,7 +1676,7 @@ bool ByteCodeExprGen::VisitCompoundAssignOperator( return false; if (!this->emitLoad(*LT, E)) return false; - if (*LT != *LHSComputationT) { + if (LT != LHSComputationT) { if (!this->emitCast(*LT, *LHSComputationT, E)) return false; } @@ -1671,7 +1732,7 @@ bool ByteCodeExprGen::VisitCompoundAssignOperator( } // And now cast from LHSComputationT to ResultT. - if (*ResultT != *LHSComputationT) { + if (ResultT != LHSComputationT) { if (!this->emitCast(*LHSComputationT, *ResultT, E)) return false; } @@ -1782,7 +1843,7 @@ bool ByteCodeExprGen::VisitCompoundLiteralExpr( const Expr *Init = E->getInitializer(); if (Initializing) { // We already have a value, just initialize that. - return this->visitInitializer(Init); + return this->visitInitializer(Init) && this->emitFinishInit(E); } std::optional T = classify(E->getType()); @@ -1801,7 +1862,7 @@ bool ByteCodeExprGen::VisitCompoundLiteralExpr( return this->emitInitGlobal(*T, *GlobalIndex, E); } - return this->visitInitializer(Init); + return this->visitInitializer(Init) && this->emitFinishInit(E); } return false; @@ -1830,7 +1891,7 @@ bool ByteCodeExprGen::VisitCompoundLiteralExpr( } return this->emitInit(*T, E); } else { - if (!this->visitInitializer(Init)) + if (!this->visitInitializer(Init) || !this->emitFinishInit(E)) return false; } @@ -2778,26 +2839,34 @@ bool ByteCodeExprGen::visitVarDecl(const VarDecl *VD) { std::optional VarT = classify(VD->getType()); if (Context::shouldBeGloballyIndexed(VD)) { - // We've already seen and initialized this global. - if (P.getGlobal(VD)) - return true; - - std::optional GlobalIndex = P.createGlobal(VD, Init); - - if (!GlobalIndex) - return false; - - if (Init) { + auto initGlobal = [&](unsigned GlobalIndex) -> bool { + assert(Init); DeclScope LocalScope(this, VD); if (VarT) { if (!this->visit(Init)) return false; - return this->emitInitGlobal(*VarT, *GlobalIndex, VD); + return this->emitInitGlobal(*VarT, GlobalIndex, VD); } - return this->visitGlobalInitializer(Init, *GlobalIndex); + return this->visitGlobalInitializer(Init, GlobalIndex); + }; + + // We've already seen and initialized this global. + if (std::optional GlobalIndex = P.getGlobal(VD)) { + if (P.getPtrGlobal(*GlobalIndex).isInitialized()) + return true; + + // The previous attempt at initialization might've been unsuccessful, + // so let's try this one. + return Init && initGlobal(*GlobalIndex); } - return true; + + std::optional GlobalIndex = P.createGlobal(VD, Init); + + if (!GlobalIndex) + return false; + + return !Init || initGlobal(*GlobalIndex); } else { VariableScope LocalScope(this); if (VarT) { @@ -3142,15 +3211,20 @@ bool ByteCodeExprGen::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitAddf(getRoundingMode(E), E)) return false; - return this->emitStoreFloat(E); + if (!this->emitStoreFloat(E)) + return false; + } else { + assert(isIntegralType(*T)); + if (!this->emitLoad(*T, E)) + return false; + if (!this->emitConst(1, E)) + return false; + if (!this->emitAdd(*T, E)) + return false; + if (!this->emitStore(*T, E)) + return false; } - if (!this->emitLoad(*T, E)) - return false; - if (!this->emitConst(1, E)) - return false; - if (!this->emitAdd(*T, E)) - return false; - return this->emitStore(*T, E); + return E->isGLValue() || this->emitLoadPop(*T, E); } case UO_PreDec: { // --x if (!this->visit(SubExpr)) @@ -3181,15 +3255,20 @@ bool ByteCodeExprGen::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitSubf(getRoundingMode(E), E)) return false; - return this->emitStoreFloat(E); + if (!this->emitStoreFloat(E)) + return false; + } else { + assert(isIntegralType(*T)); + if (!this->emitLoad(*T, E)) + return false; + if (!this->emitConst(1, E)) + return false; + if (!this->emitSub(*T, E)) + return false; + if (!this->emitStore(*T, E)) + return false; } - if (!this->emitLoad(*T, E)) - return false; - if (!this->emitConst(1, E)) - return false; - if (!this->emitSub(*T, E)) - return false; - return this->emitStore(*T, E); + return E->isGLValue() || this->emitLoadPop(*T, E); } case UO_LNot: // !x if (DiscardResult) diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h index db0d73ce23f7c..7e9dc8631fc0d 100644 --- a/clang/lib/AST/Interp/ByteCodeExprGen.h +++ b/clang/lib/AST/Interp/ByteCodeExprGen.h @@ -148,13 +148,20 @@ class ByteCodeExprGen : public ConstStmtVisitor, bool>, return Ctx.classify(Ty); } - /// Classifies a known primitive type + /// Classifies a known primitive type. PrimType classifyPrim(QualType Ty) const { if (auto T = classify(Ty)) { return *T; } llvm_unreachable("not a primitive type"); } + /// Classifies a known primitive expression. + PrimType classifyPrim(const Expr *E) const { + if (auto T = classify(E)) + return *T; + llvm_unreachable("not a primitive type"); + } + /// Evaluates an expression and places the result on the stack. If the /// expression is of composite type, a local variable will be created /// and a pointer to said variable will be placed on the stack. diff --git a/clang/lib/AST/Interp/Disasm.cpp b/clang/lib/AST/Interp/Disasm.cpp index 022b394e58e64..d127f33223e80 100644 --- a/clang/lib/AST/Interp/Disasm.cpp +++ b/clang/lib/AST/Interp/Disasm.cpp @@ -140,7 +140,7 @@ LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const { const Descriptor *Desc = G->block()->getDescriptor(); Pointer GP = getPtrGlobal(GI); - OS << GI << ": " << (void *)G->block() << " "; + OS << GI << ": " << (const void *)G->block() << " "; { ColorScope SC(OS, true, GP.isInitialized() @@ -264,3 +264,19 @@ LLVM_DUMP_METHOD void Record::dump(llvm::raw_ostream &OS, unsigned Indentation, ++I; } } + +LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const { + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_BLUE, true}); + OS << "Block " << (const void *)this << "\n"; + } + unsigned NPointers = 0; + for (const Pointer *P = Pointers; P; P = P->Next) { + ++NPointers; + } + OS << " Pointers: " << NPointers << "\n"; + OS << " Dead: " << IsDead << "\n"; + OS << " Static: " << IsStatic << "\n"; + OS << " Extern: " << IsExtern << "\n"; + OS << " Initialized: " << IsInitialized << "\n"; +} diff --git a/clang/lib/AST/Interp/EvaluationResult.cpp b/clang/lib/AST/Interp/EvaluationResult.cpp index d567b551f7f6f..e92d686c724cc 100644 --- a/clang/lib/AST/Interp/EvaluationResult.cpp +++ b/clang/lib/AST/Interp/EvaluationResult.cpp @@ -105,7 +105,7 @@ static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc, Result &= CheckFieldsInitialized(S, Loc, FieldPtr, FieldPtr.getRecord()); } else if (FieldType->isIncompleteArrayType()) { // Nothing to do here. - } else if (F.Decl->isUnnamedBitfield()) { + } else if (F.Decl->isUnnamedBitField()) { // Nothing do do here. } else if (FieldType->isArrayType()) { const auto *CAT = diff --git a/clang/lib/AST/Interp/FunctionPointer.h b/clang/lib/AST/Interp/FunctionPointer.h index e7fad8161fd9c..fc3d7a4214a72 100644 --- a/clang/lib/AST/Interp/FunctionPointer.h +++ b/clang/lib/AST/Interp/FunctionPointer.h @@ -20,31 +20,46 @@ namespace interp { class FunctionPointer final { private: const Function *Func; + bool Valid; public: - // FIXME: We might want to track the fact that the Function pointer - // has been created from an integer and is most likely garbage anyway. - FunctionPointer(int IntVal = 0, const Descriptor *Desc = nullptr) - : Func(reinterpret_cast(IntVal)) {} + FunctionPointer(const Function *Func) : Func(Func), Valid(true) { + assert(Func); + } - FunctionPointer(const Function *Func) : Func(Func) { assert(Func); } + FunctionPointer(uintptr_t IntVal = 0, const Descriptor *Desc = nullptr) + : Func(reinterpret_cast(IntVal)), Valid(false) {} const Function *getFunction() const { return Func; } bool isZero() const { return !Func; } + bool isValid() const { return Valid; } + bool isWeak() const { + if (!Func || !Valid) + return false; + + return Func->getDecl()->isWeak(); + } APValue toAPValue() const { if (!Func) return APValue(static_cast(nullptr), CharUnits::Zero(), {}, /*OnePastTheEnd=*/false, /*IsNull=*/true); + if (!Valid) + return APValue(static_cast(nullptr), + CharUnits::fromQuantity(getIntegerRepresentation()), {}, + /*OnePastTheEnd=*/false, /*IsNull=*/false); + return APValue(Func->getDecl(), CharUnits::Zero(), {}, /*OnePastTheEnd=*/false, /*IsNull=*/false); } void print(llvm::raw_ostream &OS) const { OS << "FnPtr("; - if (Func) + if (Func && Valid) OS << Func->getName(); + else if (Func) + OS << reinterpret_cast(Func); else OS << "nullptr"; OS << ")"; diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp index e5e2c932f500b..2607e07432516 100644 --- a/clang/lib/AST/Interp/Interp.cpp +++ b/clang/lib/AST/Interp/Interp.cpp @@ -56,22 +56,65 @@ static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) { return true; } +static void diagnoseMissingInitializer(InterpState &S, CodePtr OpPC, + const ValueDecl *VD) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange(); +} + +static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC, + const ValueDecl *VD); +static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC, + const ValueDecl *D) { + const SourceInfo &E = S.Current->getSource(OpPC); + + if (isa(D)) { + if (S.getLangOpts().CPlusPlus11) { + S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D; + S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange(); + } else { + S.FFDiag(E); + } + } else if (const auto *VD = dyn_cast(D)) { + if (!VD->getType().isConstQualified()) { + diagnoseNonConstVariable(S, OpPC, VD); + return false; + } + + // const, but no initializer. + if (!VD->getAnyInitializer()) { + diagnoseMissingInitializer(S, OpPC, VD); + return false; + } + } + return false; +} + static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC, const ValueDecl *VD) { if (!S.getLangOpts().CPlusPlus) return; const SourceInfo &Loc = S.Current->getSource(OpPC); + if (const auto *VarD = dyn_cast(VD); + VarD && VarD->getType().isConstQualified() && + !VarD->getAnyInitializer()) { + diagnoseMissingInitializer(S, OpPC, VD); + return; + } - if (VD->getType()->isIntegralOrEnumerationType()) + if (VD->getType()->isIntegralOrEnumerationType()) { S.FFDiag(Loc, diag::note_constexpr_ltor_non_const_int, 1) << VD; - else - S.FFDiag(Loc, - S.getLangOpts().CPlusPlus11 - ? diag::note_constexpr_ltor_non_constexpr - : diag::note_constexpr_ltor_non_integral, - 1) - << VD << VD->getType(); + S.Note(VD->getLocation(), diag::note_declared_at); + return; + } + + S.FFDiag(Loc, + S.getLangOpts().CPlusPlus11 ? diag::note_constexpr_ltor_non_constexpr + : diag::note_constexpr_ltor_non_integral, + 1) + << VD << VD->getType(); S.Note(VD->getLocation(), diag::note_declared_at); } @@ -202,6 +245,9 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!Ptr.isExtern()) return true; + if (Ptr.isInitialized()) + return true; + if (!S.checkingPotentialConstantExpression() && S.getLangOpts().CPlusPlus) { const auto *VD = Ptr.getDeclDesc()->asValueDecl(); diagnoseNonConstVariable(S, OpPC, VD); @@ -369,9 +415,15 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if (const auto *VD = Ptr.getDeclDesc()->asVarDecl(); VD && VD->hasGlobalStorage()) { const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD; - S.Note(VD->getLocation(), diag::note_declared_at); + if (VD->getAnyInitializer()) { + S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at); + } else { + diagnoseMissingInitializer(S, OpPC, VD); + } + return false; } + if (!S.checkingPotentialConstantExpression()) { S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_uninit) << AK << /*uninitialized=*/true << S.Current->getRange(OpPC); @@ -598,33 +650,6 @@ bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result, return true; } -static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC, - const ValueDecl *D) { - const SourceInfo &E = S.Current->getSource(OpPC); - - if (isa(D)) { - if (S.getLangOpts().CPlusPlus11) { - S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D; - S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange(); - } else { - S.FFDiag(E); - } - } else if (const auto *VD = dyn_cast(D)) { - if (!VD->getType().isConstQualified()) { - diagnoseNonConstVariable(S, OpPC, VD); - return false; - } - - // const, but no initializer. - if (!VD->getAnyInitializer()) { - S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD; - S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange(); - return false; - } - } - return false; -} - /// We aleady know the given DeclRefExpr is invalid for some reason, /// now figure out why and print appropriate diagnostics. bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) { diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h index c7012aa4ec680..dd0bacd73acb1 100644 --- a/clang/lib/AST/Interp/Interp.h +++ b/clang/lib/AST/Interp/Interp.h @@ -758,7 +758,7 @@ inline bool CmpHelperEQ(InterpState &S, CodePtr OpPC, // We cannot compare against weak declarations at compile time. for (const auto &FP : {LHS, RHS}) { - if (!FP.isZero() && FP.getFunction()->getDecl()->isWeak()) { + if (FP.isWeak()) { const SourceInfo &Loc = S.Current->getSource(OpPC); S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison) << FP.toDiagnosticString(S.getCtx()); @@ -2236,6 +2236,10 @@ inline bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize, << const_cast(E) << E->getSourceRange(); return false; } + + if (!FuncPtr.isValid()) + return false; + assert(F); // Check argument nullability state. diff --git a/clang/lib/AST/Interp/InterpBlock.h b/clang/lib/AST/Interp/InterpBlock.h index 9db82567d2d5d..6d5856fbd4ea1 100644 --- a/clang/lib/AST/Interp/InterpBlock.h +++ b/clang/lib/AST/Interp/InterpBlock.h @@ -118,6 +118,9 @@ class Block final { IsInitialized = false; } + void dump() const { dump(llvm::errs()); } + void dump(llvm::raw_ostream &OS) const; + protected: friend class Pointer; friend class DeadBlock; diff --git a/clang/lib/AST/Interp/InterpBuiltin.cpp b/clang/lib/AST/Interp/InterpBuiltin.cpp index 984ba4f7f2689..f562f9e1cb19f 100644 --- a/clang/lib/AST/Interp/InterpBuiltin.cpp +++ b/clang/lib/AST/Interp/InterpBuiltin.cpp @@ -977,6 +977,117 @@ static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, return true; } +/// __builtin_is_aligned() +/// __builtin_align_up() +/// __builtin_align_down() +/// The first parameter is either an integer or a pointer. +/// The second parameter is the requested alignment as an integer. +static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + unsigned BuiltinOp = Func->getBuiltinID(); + unsigned CallSize = callArgSize(S, Call); + + PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); + const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT); + + if (Alignment < 0 || !Alignment.isPowerOf2()) { + S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; + return false; + } + unsigned SrcWidth = S.getCtx().getIntWidth(Call->getArg(0)->getType()); + APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1)); + if (APSInt::compareValues(Alignment, MaxValue) > 0) { + S.FFDiag(Call, diag::note_constexpr_alignment_too_big) + << MaxValue << Call->getArg(0)->getType() << Alignment; + return false; + } + + // The first parameter is either an integer or a pointer (but not a function + // pointer). + PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); + + if (isIntegralType(FirstArgT)) { + const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize); + APSInt Align = Alignment.extOrTrunc(Src.getBitWidth()); + if (BuiltinOp == Builtin::BI__builtin_align_up) { + APSInt AlignedVal = + APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned()); + pushInteger(S, AlignedVal, Call->getType()); + } else if (BuiltinOp == Builtin::BI__builtin_align_down) { + APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned()); + pushInteger(S, AlignedVal, Call->getType()); + } else { + assert(*S.Ctx.classify(Call->getType()) == PT_Bool); + S.Stk.push((Src & (Align - 1)) == 0); + } + return true; + } + + assert(FirstArgT == PT_Ptr); + const Pointer &Ptr = S.Stk.peek(CallSize); + + unsigned PtrOffset = Ptr.getByteOffset(); + PtrOffset = Ptr.getIndex(); + CharUnits BaseAlignment = + S.getCtx().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); + CharUnits PtrAlign = + BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset)); + + if (BuiltinOp == Builtin::BI__builtin_is_aligned) { + if (PtrAlign.getQuantity() >= Alignment) { + S.Stk.push(true); + return true; + } + // If the alignment is not known to be sufficient, some cases could still + // be aligned at run time. However, if the requested alignment is less or + // equal to the base alignment and the offset is not aligned, we know that + // the run-time value can never be aligned. + if (BaseAlignment.getQuantity() >= Alignment && + PtrAlign.getQuantity() < Alignment) { + S.Stk.push(false); + return true; + } + + S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute) + << Alignment; + return false; + } + + assert(BuiltinOp == Builtin::BI__builtin_align_down || + BuiltinOp == Builtin::BI__builtin_align_up); + + // For align_up/align_down, we can return the same value if the alignment + // is known to be greater or equal to the requested value. + if (PtrAlign.getQuantity() >= Alignment) { + S.Stk.push(Ptr); + return true; + } + + // The alignment could be greater than the minimum at run-time, so we cannot + // infer much about the resulting pointer value. One case is possible: + // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we + // can infer the correct index if the requested alignment is smaller than + // the base alignment so we can perform the computation on the offset. + if (BaseAlignment.getQuantity() >= Alignment) { + assert(Alignment.getBitWidth() <= 64 && + "Cannot handle > 64-bit address-space"); + uint64_t Alignment64 = Alignment.getZExtValue(); + CharUnits NewOffset = + CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down + ? llvm::alignDown(PtrOffset, Alignment64) + : llvm::alignTo(PtrOffset, Alignment64)); + + S.Stk.push(Ptr.atIndex(NewOffset.getQuantity())); + return true; + } + + // Otherwise, we cannot constant-evaluate the result. + S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment; + return false; +} + bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, const CallExpr *Call) { const InterpFrame *Frame = S.Current; @@ -1291,6 +1402,13 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, return false; break; + case Builtin::BI__builtin_is_aligned: + case Builtin::BI__builtin_align_up: + case Builtin::BI__builtin_align_down: + if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call)) + return false; + break; + default: S.FFDiag(S.Current->getLocation(OpPC), diag::note_invalid_subexpr_in_const_expr) diff --git a/clang/lib/AST/Interp/InterpFrame.cpp b/clang/lib/AST/Interp/InterpFrame.cpp index 12e2e6ff9155b..ba957546473e9 100644 --- a/clang/lib/AST/Interp/InterpFrame.cpp +++ b/clang/lib/AST/Interp/InterpFrame.cpp @@ -152,6 +152,13 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx, } void InterpFrame::describe(llvm::raw_ostream &OS) const { + // We create frames for builtin functions as well, but we can't reliably + // diagnose them. The 'in call to' diagnostics for them add no value to the + // user _and_ it doesn't generally work since the argument types don't always + // match the function prototype. Just ignore them. + if (const auto *F = getFunction(); F && F->isBuiltin()) + return; + const FunctionDecl *F = getCallee(); if (const auto *M = dyn_cast(F); M && M->isInstance() && !isa(F)) { diff --git a/clang/lib/AST/Interp/InterpState.h b/clang/lib/AST/Interp/InterpState.h index 8f84bf6ed2eaf..c17cfad11b1e2 100644 --- a/clang/lib/AST/Interp/InterpState.h +++ b/clang/lib/AST/Interp/InterpState.h @@ -89,7 +89,11 @@ class InterpState final : public State, public SourceMapper { /// Delegates source mapping to the mapper. SourceInfo getSource(const Function *F, CodePtr PC) const override { - return M ? M->getSource(F, PC) : F->getSource(PC); + if (M) + return M->getSource(F, PC); + + assert(F && "Function cannot be null"); + return F->getSource(PC); } Context &getContext() const { return Ctx; } diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h index fcd00aac62f93..b4475577b7462 100644 --- a/clang/lib/AST/Interp/Pointer.h +++ b/clang/lib/AST/Interp/Pointer.h @@ -241,13 +241,10 @@ class Pointer { /// Checks if the pointer is null. bool isZero() const { - if (Offset != 0) - return false; - if (isBlockPointer()) return asBlockPointer().Pointee == nullptr; assert(isIntegralPointer()); - return asIntPointer().Value == 0; + return asIntPointer().Value == 0 && Offset == 0; } /// Checks if the pointer is live. bool isLive() const { diff --git a/clang/lib/AST/Interp/Program.cpp b/clang/lib/AST/Interp/Program.cpp index 82367164743fc..2c8c6781b3483 100644 --- a/clang/lib/AST/Interp/Program.cpp +++ b/clang/lib/AST/Interp/Program.cpp @@ -177,7 +177,7 @@ std::optional Program::createGlobal(const ValueDecl *VD, bool IsStatic, IsExtern; if (const auto *Var = dyn_cast(VD)) { IsStatic = Context::shouldBeGloballyIndexed(VD); - IsExtern = !Var->getAnyInitializer(); + IsExtern = Var->hasExternalStorage(); } else if (isa(VD)) { IsStatic = true; IsExtern = false; @@ -312,6 +312,11 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) { // Reserve space for fields. Record::FieldList Fields; for (const FieldDecl *FD : RD->fields()) { + // Note that we DO create fields and descriptors + // for unnamed bitfields here, even though we later ignore + // them everywhere. That's because so the FieldDecl's + // getFieldIndex() matches. + // Reserve space for the field's descriptor and the offset. BaseSize += align(sizeof(InlineDescriptor)); diff --git a/clang/lib/AST/Interp/State.cpp b/clang/lib/AST/Interp/State.cpp index 47fbf5145cd4e..0d9dadec4b958 100644 --- a/clang/lib/AST/Interp/State.cpp +++ b/clang/lib/AST/Interp/State.cpp @@ -155,7 +155,8 @@ void State::addCallStack(unsigned Limit) { SmallString<128> Buffer; llvm::raw_svector_ostream Out(Buffer); F->describe(Out); - addDiag(CallRange.getBegin(), diag::note_constexpr_call_here) - << Out.str() << CallRange; + if (!Buffer.empty()) + addDiag(CallRange.getBegin(), diag::note_constexpr_call_here) + << Out.str() << CallRange; } } diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp index fcfd62b723655..da50f8b39d793 100644 --- a/clang/lib/AST/ItaniumMangle.cpp +++ b/clang/lib/AST/ItaniumMangle.cpp @@ -6197,7 +6197,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) { } I = 0; for (const FieldDecl *FD : RD->fields()) { - if (!FD->isUnnamedBitfield() && + if (!FD->isUnnamedBitField() && !isZeroInitialized(FD->getType(), V.getStructField(I))) return false; ++I; @@ -6210,7 +6210,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) { assert(RD && "unexpected type for union value"); // Zero-initialization zeroes the first non-unnamed-bitfield field, if any. for (const FieldDecl *FD : RD->fields()) { - if (!FD->isUnnamedBitfield()) + if (!FD->isUnnamedBitField()) return V.getUnionField() && declaresSameEntity(FD, V.getUnionField()) && isZeroInitialized(FD->getType(), V.getUnionValue()); } @@ -6352,7 +6352,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, llvm::SmallVector Fields(RD->fields()); while ( !Fields.empty() && - (Fields.back()->isUnnamedBitfield() || + (Fields.back()->isUnnamedBitField() || isZeroInitialized(Fields.back()->getType(), V.getStructField(Fields.back()->getFieldIndex())))) { Fields.pop_back(); @@ -6372,7 +6372,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, for (unsigned I = 0, N = Bases.size(); I != N; ++I) mangleValueInTemplateArg(Bases[I].getType(), V.getStructBase(I), false); for (unsigned I = 0, N = Fields.size(); I != N; ++I) { - if (Fields[I]->isUnnamedBitfield()) + if (Fields[I]->isUnnamedBitField()) continue; mangleValueInTemplateArg(Fields[I]->getType(), V.getStructField(Fields[I]->getFieldIndex()), diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp index c4f014d9c7517..fbcc4e3123c6d 100644 --- a/clang/lib/AST/JSONNodeDumper.cpp +++ b/clang/lib/AST/JSONNodeDumper.cpp @@ -975,6 +975,9 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) { if (FD->isDefaulted()) JOS.attribute("explicitlyDefaulted", FD->isDeleted() ? "deleted" : "default"); + + if (StringLiteral *Msg = FD->getDeletedMessage()) + JOS.attribute("deletedMessage", Msg->getString()); } void JSONNodeDumper::VisitEnumDecl(const EnumDecl *ED) { @@ -1579,6 +1582,14 @@ void JSONNodeDumper::VisitMaterializeTemporaryExpr( attributeOnlyIfTrue("boundToLValueRef", MTE->isBoundToLvalueReference()); } +void JSONNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) { + attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit()); +} + +void JSONNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) { + attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit()); +} + void JSONNodeDumper::VisitCXXDependentScopeMemberExpr( const CXXDependentScopeMemberExpr *DSME) { JOS.attribute("isArrow", DSME->isArrow()); diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp index 803144ea29539..9adc41d65a4d9 100644 --- a/clang/lib/AST/MicrosoftMangle.cpp +++ b/clang/lib/AST/MicrosoftMangle.cpp @@ -1933,7 +1933,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, for (const CXXBaseSpecifier &B : RD->bases()) mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++), TAK); for (const FieldDecl *FD : RD->fields()) - if (!FD->isUnnamedBitfield()) + if (!FD->isUnnamedBitField()) mangleTemplateArgValue(FD->getType(), V.getStructField(FD->getFieldIndex()), TAK, /*WithScalarType*/ true); diff --git a/clang/lib/AST/NSAPI.cpp b/clang/lib/AST/NSAPI.cpp index 30252bce29ce0..2140ba6820c4c 100644 --- a/clang/lib/AST/NSAPI.cpp +++ b/clang/lib/AST/NSAPI.cpp @@ -56,10 +56,8 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const { &Ctx.Idents.get("initWithUTF8String")); break; case NSStr_stringWithCStringEncoding: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("stringWithCString"), - &Ctx.Idents.get("encoding") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("stringWithCString"), + &Ctx.Idents.get("encoding")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -93,10 +91,8 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects")); break; case NSArr_arrayWithObjectsCount: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("arrayWithObjects"), - &Ctx.Idents.get("count") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("arrayWithObjects"), + &Ctx.Idents.get("count")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -110,10 +106,9 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex")); break; case NSMutableArr_replaceObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("replaceObjectAtIndex"), - &Ctx.Idents.get("withObject") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -121,18 +116,14 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); break; case NSMutableArr_insertObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("insertObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableArr_setObjectAtIndexedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -167,27 +158,21 @@ Selector NSAPI::getNSDictionarySelector( &Ctx.Idents.get("dictionaryWithDictionary")); break; case NSDict_dictionaryWithObjectForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObject"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObject"), &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSDict_dictionaryWithObjectsForKeys: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObjects"), - &Ctx.Idents.get("forKeys") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSDict_dictionaryWithObjectsForKeysCount: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObjects"), - &Ctx.Idents.get("forKeys"), - &Ctx.Idents.get("count") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys"), + &Ctx.Idents.get("count")}; Sel = Ctx.Selectors.getSelector(3, KeyIdents); break; } @@ -204,10 +189,8 @@ Selector NSAPI::getNSDictionarySelector( &Ctx.Idents.get("initWithObjectsAndKeys")); break; case NSDict_initWithObjectsForKeys: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("initWithObjects"), - &Ctx.Idents.get("forKeys") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("initWithObjects"), + &Ctx.Idents.get("forKeys")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -215,26 +198,20 @@ Selector NSAPI::getNSDictionarySelector( Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey")); break; case NSMutableDict_setObjectForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"), + &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableDict_setObjectForKeyedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("forKeyedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("forKeyedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableDict_setValueForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setValue"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setValue"), + &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -264,34 +241,27 @@ Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); break; case NSOrderedSet_insertObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("insertObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_setObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_setObjectAtIndexedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_replaceObjectAtIndexWithObject: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("replaceObjectAtIndex"), - &Ctx.Idents.get("withObject") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -611,7 +581,7 @@ bool NSAPI::isObjCEnumerator(const Expr *E, Selector NSAPI::getOrInitSelector(ArrayRef Ids, Selector &Sel) const { if (Sel.isNull()) { - SmallVector Idents; + SmallVector Idents; for (ArrayRef::const_iterator I = Ids.begin(), E = Ids.end(); I != E; ++I) Idents.push_back(&Ctx.Idents.get(*I)); @@ -622,7 +592,7 @@ Selector NSAPI::getOrInitSelector(ArrayRef Ids, Selector NSAPI::getOrInitNullarySelector(StringRef Id, Selector &Sel) const { if (Sel.isNull()) { - IdentifierInfo *Ident = &Ctx.Idents.get(Id); + const IdentifierInfo *Ident = &Ctx.Idents.get(Id); Sel = Ctx.Selectors.getSelector(0, &Ident); } return Sel; diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp index 36f2c47b30005..785c46e86a77c 100644 --- a/clang/lib/AST/NestedNameSpecifier.cpp +++ b/clang/lib/AST/NestedNameSpecifier.cpp @@ -55,16 +55,16 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context, return NNS; } -NestedNameSpecifier * -NestedNameSpecifier::Create(const ASTContext &Context, - NestedNameSpecifier *Prefix, IdentifierInfo *II) { +NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, + const IdentifierInfo *II) { assert(II && "Identifier cannot be NULL"); assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent"); NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(Prefix); Mockup.Prefix.setInt(StoredIdentifier); - Mockup.Specifier = II; + Mockup.Specifier = const_cast(II); return FindOrInsert(Context, Mockup); } @@ -87,7 +87,7 @@ NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier * NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier *Prefix, - NamespaceAliasDecl *Alias) { + const NamespaceAliasDecl *Alias) { assert(Alias && "Namespace alias cannot be NULL"); assert((!Prefix || (Prefix->getAsType() == nullptr && @@ -96,7 +96,7 @@ NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(Prefix); Mockup.Prefix.setInt(StoredDecl); - Mockup.Specifier = Alias; + Mockup.Specifier = const_cast(Alias); return FindOrInsert(Context, Mockup); } @@ -112,13 +112,13 @@ NestedNameSpecifier::Create(const ASTContext &Context, return FindOrInsert(Context, Mockup); } -NestedNameSpecifier * -NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) { +NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context, + const IdentifierInfo *II) { assert(II && "Identifier cannot be NULL"); NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(nullptr); Mockup.Prefix.setInt(StoredIdentifier); - Mockup.Specifier = II; + Mockup.Specifier = const_cast(II); return FindOrInsert(Context, Mockup); } diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp index e159a1b00be55..6f04739cf6693 100644 --- a/clang/lib/AST/ODRHash.cpp +++ b/clang/lib/AST/ODRHash.cpp @@ -696,6 +696,12 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function, AddBoolean(Function->isDeletedAsWritten()); AddBoolean(Function->isExplicitlyDefaulted()); + StringLiteral *DeletedMessage = Function->getDeletedMessage(); + AddBoolean(DeletedMessage); + + if (DeletedMessage) + ID.AddString(DeletedMessage->getBytes()); + AddDecl(Function); AddQualType(Function->getReturnType()); diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp index e1db872f25c32..9c259c8f9bd0a 100644 --- a/clang/lib/AST/OpenACCClause.cpp +++ b/clang/lib/AST/OpenACCClause.cpp @@ -13,5 +13,88 @@ #include "clang/AST/OpenACCClause.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" using namespace clang; + +OpenACCDefaultClause *OpenACCDefaultClause::Create(const ASTContext &C, + OpenACCDefaultClauseKind K, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(sizeof(OpenACCDefaultClause), alignof(OpenACCDefaultClause)); + + return new (Mem) OpenACCDefaultClause(K, BeginLoc, LParenLoc, EndLoc); +} + +OpenACCIfClause *OpenACCIfClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause)); + return new (Mem) OpenACCIfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc); +} + +OpenACCIfClause::OpenACCIfClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *ConditionExpr, + SourceLocation EndLoc) + : OpenACCClauseWithCondition(OpenACCClauseKind::If, BeginLoc, LParenLoc, + ConditionExpr, EndLoc) { + assert(ConditionExpr && "if clause requires condition expr"); + assert((ConditionExpr->isInstantiationDependent() || + ConditionExpr->getType()->isScalarType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCSelfClause *OpenACCSelfClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause)); + return new (Mem) + OpenACCSelfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc); +} + +OpenACCSelfClause::OpenACCSelfClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, SourceLocation EndLoc) + : OpenACCClauseWithCondition(OpenACCClauseKind::Self, BeginLoc, LParenLoc, + ConditionExpr, EndLoc) { + assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() || + ConditionExpr->getType()->isScalarType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCClause::child_range OpenACCClause::children() { + switch (getClauseKind()) { + default: + assert(false && "Clause children function not implemented"); + break; +#define VISIT_CLAUSE(CLAUSE_NAME) \ + case OpenACCClauseKind::CLAUSE_NAME: \ + return cast(this)->children(); + +#include "clang/Basic/OpenACCClauses.def" + } + return child_range(child_iterator(), child_iterator()); +} + +//===----------------------------------------------------------------------===// +// OpenACC clauses printing methods +//===----------------------------------------------------------------------===// +void OpenACCClausePrinter::VisitDefaultClause(const OpenACCDefaultClause &C) { + OS << "default(" << C.getDefaultClauseKind() << ")"; +} + +void OpenACCClausePrinter::VisitIfClause(const OpenACCIfClause &C) { + OS << "if(" << C.getConditionExpr() << ")"; +} + +void OpenACCClausePrinter::VisitSelfClause(const OpenACCSelfClause &C) { + OS << "self"; + if (const Expr *CondExpr = C.getConditionExpr()) + OS << "(" << CondExpr << ")"; +} diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp index 21cfd5b1de6e9..9723c0cfa83bb 100644 --- a/clang/lib/AST/ParentMapContext.cpp +++ b/clang/lib/AST/ParentMapContext.cpp @@ -61,7 +61,26 @@ class ParentMapContext::ParentMap { template friend struct ::MatchParents; /// Contains parents of a node. - using ParentVector = llvm::SmallVector; + class ParentVector { + public: + ParentVector() = default; + explicit ParentVector(size_t N, const DynTypedNode &Value) { + Items.reserve(N); + for (; N > 0; --N) + push_back(Value); + } + bool contains(const DynTypedNode &Value) { + return Seen.contains(Value); + } + void push_back(const DynTypedNode &Value) { + if (!Value.getMemoizationData() || Seen.insert(Value).second) + Items.push_back(Value); + } + llvm::ArrayRef view() const { return Items; } + private: + llvm::SmallVector Items; + llvm::SmallDenseSet Seen; + }; /// Maps from a node to its parents. This is used for nodes that have /// pointer identity only, which are more common and we can save space by @@ -99,7 +118,7 @@ class ParentMapContext::ParentMap { return llvm::ArrayRef(); } if (const auto *V = I->second.template dyn_cast()) { - return llvm::ArrayRef(*V); + return V->view(); } return getSingleDynTypedNodeFromParentMap(I->second); } @@ -252,7 +271,7 @@ class ParentMapContext::ParentMap { const auto *S = It->second.dyn_cast(); if (!S) { if (auto *Vec = It->second.dyn_cast()) - return llvm::ArrayRef(*Vec); + return Vec->view(); return getSingleDynTypedNodeFromParentMap(It->second); } const auto *P = dyn_cast(S); diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp index 9ad84e21a7dab..b1c3fb6b0bf2b 100644 --- a/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/clang/lib/AST/RecordLayoutBuilder.cpp @@ -2458,6 +2458,11 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) { } static bool isMsLayout(const ASTContext &Context, bool CheckAuxABI = false) { + // Check if it's CUDA device compilation; ensure layout consistency with host. + if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice && + Context.getAuxTargetInfo()) + return Context.getAuxTargetInfo()->getCXXABI().isMicrosoft(); + return (CheckAuxABI) ? Context.getAuxTargetInfo()->getCXXABI().isMicrosoft() : Context.getTargetInfo().getCXXABI().isMicrosoft(); } diff --git a/clang/lib/AST/SelectorLocationsKind.cpp b/clang/lib/AST/SelectorLocationsKind.cpp index 2c34c9c60c2b2..ebe6324f904c7 100644 --- a/clang/lib/AST/SelectorLocationsKind.cpp +++ b/clang/lib/AST/SelectorLocationsKind.cpp @@ -26,7 +26,7 @@ static SourceLocation getStandardSelLoc(unsigned Index, assert(Index == 0); if (EndLoc.isInvalid()) return SourceLocation(); - IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0); unsigned Len = II ? II->getLength() : 0; return EndLoc.getLocWithOffset(-Len); } @@ -34,7 +34,7 @@ static SourceLocation getStandardSelLoc(unsigned Index, assert(Index < NumSelArgs); if (ArgLoc.isInvalid()) return SourceLocation(); - IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); + const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1; if (WithArgSpace) ++Len; diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp index 426b35848cb5c..d8519b2071e6d 100644 --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -2431,7 +2431,7 @@ OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C, OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef Clauses, Stmt *AssociatedStmt, - const HelperExprs &Exprs) { + const HelperExprs &Exprs, bool CanBeParallelFor) { auto *Dir = createDirective( C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc, @@ -2473,6 +2473,7 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create( Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB); Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond); Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond); + Dir->setCanBeParallelFor(CanBeParallelFor); return Dir; } diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 7bd7f1fb1f4ff..192bdb9294571 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -1453,7 +1453,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) { continue; // Field or identifier node. - IdentifierInfo *Id = ON.getFieldName(); + const IdentifierInfo *Id = ON.getFieldName(); if (!Id) continue; @@ -2382,7 +2382,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { E->getQualifier()->print(OS, Policy); OS << "~"; - if (IdentifierInfo *II = E->getDestroyedTypeIdentifier()) + if (const IdentifierInfo *II = E->getDestroyedTypeIdentifier()) OS << II->getName(); else E->getDestroyedType().print(OS, Policy); diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 557626f28e94e..9320e8c4e4ade 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -61,7 +61,7 @@ namespace { virtual void VisitName(DeclarationName Name, bool TreatAsDecl = false) = 0; /// Visit identifiers that are not in Decl's or Type's. - virtual void VisitIdentifierInfo(IdentifierInfo *II) = 0; + virtual void VisitIdentifierInfo(const IdentifierInfo *II) = 0; /// Visit a nested-name-specifier that occurs within an expression /// or statement. @@ -163,7 +163,7 @@ namespace { ID.AddPointer(Name.getAsOpaquePtr()); } - void VisitIdentifierInfo(IdentifierInfo *II) override { + void VisitIdentifierInfo(const IdentifierInfo *II) override { ID.AddPointer(II); } @@ -211,7 +211,7 @@ namespace { } Hash.AddDeclarationName(Name, TreatAsDecl); } - void VisitIdentifierInfo(IdentifierInfo *II) override { + void VisitIdentifierInfo(const IdentifierInfo *II) override { ID.AddBoolean(II); if (II) { Hash.AddIdentifierInfo(II); @@ -2098,13 +2098,31 @@ StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) { } CXXRecordDecl *Lambda = S->getLambdaClass(); - ID.AddInteger(Lambda->getODRHash()); - for (const auto &Capture : Lambda->captures()) { ID.AddInteger(Capture.getCaptureKind()); if (Capture.capturesVariable()) VisitDecl(Capture.getCapturedVar()); } + + // Profiling the body of the lambda may be dangerous during deserialization. + // So we'd like only to profile the signature here. + ODRHash Hasher; + // FIXME: We can't get the operator call easily by + // `CXXRecordDecl::getLambdaCallOperator()` if we're in deserialization. + // So we have to do something raw here. + for (auto *SubDecl : Lambda->decls()) { + FunctionDecl *Call = nullptr; + if (auto *FTD = dyn_cast(SubDecl)) + Call = FTD->getTemplatedDecl(); + else if (auto *FD = dyn_cast(SubDecl)) + Call = FD; + + if (!Call) + continue; + + Hasher.AddFunctionDecl(Call, /*SkipBody=*/true); + } + ID.AddInteger(Hasher.CalculateHash()); } void @@ -2472,9 +2490,10 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) { namespace { class OpenACCClauseProfiler : public OpenACCClauseVisitor { + StmtProfiler &Profiler; public: - OpenACCClauseProfiler() = default; + OpenACCClauseProfiler(StmtProfiler &P) : Profiler(P) {} void VisitOpenACCClauseList(ArrayRef Clauses) { for (const OpenACCClause *Clause : Clauses) { @@ -2483,7 +2502,27 @@ class OpenACCClauseProfiler Visit(Clause); } } + +#define VISIT_CLAUSE(CLAUSE_NAME) \ + void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause); + +#include "clang/Basic/OpenACCClauses.def" }; + +/// Nothing to do here, there are no sub-statements. +void OpenACCClauseProfiler::VisitDefaultClause( + const OpenACCDefaultClause &Clause) {} + +void OpenACCClauseProfiler::VisitIfClause(const OpenACCIfClause &Clause) { + assert(Clause.hasConditionExpr() && + "if clause requires a valid condition expr"); + Profiler.VisitStmt(Clause.getConditionExpr()); +} + +void OpenACCClauseProfiler::VisitSelfClause(const OpenACCSelfClause &Clause) { + if (Clause.hasConditionExpr()) + Profiler.VisitStmt(Clause.getConditionExpr()); +} } // namespace void StmtProfiler::VisitOpenACCComputeConstruct( @@ -2491,7 +2530,7 @@ void StmtProfiler::VisitOpenACCComputeConstruct( // VisitStmt handles children, so the AssociatedStmt is handled. VisitStmt(S); - OpenACCClauseProfiler P; + OpenACCClauseProfiler P{*this}; P.VisitOpenACCClauseList(S->clauses()); } diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 2a1767db66f50..d8568bb4a41fa 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -390,6 +390,23 @@ void TextNodeDumper::Visit(const OpenACCClause *C) { { ColorScope Color(OS, ShowColors, AttrColor); OS << C->getClauseKind(); + + // Handle clauses with parens for types that have no children, likely + // because there is no sub expression. + switch (C->getClauseKind()) { + case OpenACCClauseKind::Default: + OS << '(' << cast(C)->getDefaultClauseKind() << ')'; + break; + case OpenACCClauseKind::If: + case OpenACCClauseKind::Self: + // The condition expression will be printed as a part of the 'children', + // but print 'clause' here so it is clear what is happening from the dump. + OS << " clause"; + break; + default: + // Nothing to do here. + break; + } } dumpPointer(C); dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc())); @@ -1442,23 +1459,13 @@ void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) { } void TextNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) { - if (Node->hasRewrittenInit()) { + if (Node->hasRewrittenInit()) OS << " has rewritten init"; - AddChild([=] { - ColorScope Color(OS, ShowColors, StmtColor); - Visit(Node->getExpr()); - }); - } } void TextNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) { - if (Node->hasRewrittenInit()) { + if (Node->hasRewrittenInit()) OS << " has rewritten init"; - AddChild([=] { - ColorScope Color(OS, ShowColors, StmtColor); - Visit(Node->getExpr()); - }); - } } void TextNodeDumper::VisitMaterializeTemporaryExpr( @@ -1958,6 +1965,9 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { if (D->isTrivial()) OS << " trivial"; + if (const StringLiteral *M = D->getDeletedMessage()) + AddChild("delete message", [=] { Visit(M); }); + if (D->isIneligibleOrNotSelected()) OS << (isa(D) ? " not_selected" : " ineligible"); diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp index a66aef6323a37..55308d153724a 100644 --- a/clang/lib/AST/TypePrinter.cpp +++ b/clang/lib/AST/TypePrinter.cpp @@ -1222,10 +1222,13 @@ void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) { void TypePrinter::printPackIndexingBefore(const PackIndexingType *T, raw_ostream &OS) { - if (T->hasSelectedType()) + if (T->hasSelectedType()) { OS << T->getSelectedType(); - else - OS << T->getPattern() << "...[" << T->getIndexExpr() << "]"; + } else { + OS << T->getPattern() << "...["; + T->getIndexExpr()->printPretty(OS, nullptr, Policy); + OS << "]"; + } spaceBeforePlaceHolder(OS); } diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp index bb042760d297a..941322be8f870 100644 --- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp +++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp @@ -186,9 +186,10 @@ template <> struct NodeID { static constexpr StringRef value = "decl"; }; constexpr StringRef NodeID::value; constexpr StringRef NodeID::value; -template +template const Stmt *tryEachMatch(ArrayRef Matches, - ExprMutationAnalyzer *Analyzer, F Finder) { + ExprMutationAnalyzer::Analyzer *Analyzer, F Finder) { const StringRef ID = NodeID::value; for (const auto &Nodes : Matches) { if (const Stmt *S = (Analyzer->*Finder)(Nodes.getNodeAs(ID))) @@ -199,33 +200,37 @@ const Stmt *tryEachMatch(ArrayRef Matches, } // namespace -const Stmt *ExprMutationAnalyzer::findMutation(const Expr *Exp) { - return findMutationMemoized(Exp, - {&ExprMutationAnalyzer::findDirectMutation, - &ExprMutationAnalyzer::findMemberMutation, - &ExprMutationAnalyzer::findArrayElementMutation, - &ExprMutationAnalyzer::findCastMutation, - &ExprMutationAnalyzer::findRangeLoopMutation, - &ExprMutationAnalyzer::findReferenceMutation, - &ExprMutationAnalyzer::findFunctionArgMutation}, - Results); +const Stmt *ExprMutationAnalyzer::Analyzer::findMutation(const Expr *Exp) { + return findMutationMemoized( + Exp, + {&ExprMutationAnalyzer::Analyzer::findDirectMutation, + &ExprMutationAnalyzer::Analyzer::findMemberMutation, + &ExprMutationAnalyzer::Analyzer::findArrayElementMutation, + &ExprMutationAnalyzer::Analyzer::findCastMutation, + &ExprMutationAnalyzer::Analyzer::findRangeLoopMutation, + &ExprMutationAnalyzer::Analyzer::findReferenceMutation, + &ExprMutationAnalyzer::Analyzer::findFunctionArgMutation}, + Memorized.Results); } -const Stmt *ExprMutationAnalyzer::findMutation(const Decl *Dec) { - return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findMutation); +const Stmt *ExprMutationAnalyzer::Analyzer::findMutation(const Decl *Dec) { + return tryEachDeclRef(Dec, &ExprMutationAnalyzer::Analyzer::findMutation); } -const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Expr *Exp) { - return findMutationMemoized(Exp, {/*TODO*/}, PointeeResults); +const Stmt * +ExprMutationAnalyzer::Analyzer::findPointeeMutation(const Expr *Exp) { + return findMutationMemoized(Exp, {/*TODO*/}, Memorized.PointeeResults); } -const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Decl *Dec) { - return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findPointeeMutation); +const Stmt * +ExprMutationAnalyzer::Analyzer::findPointeeMutation(const Decl *Dec) { + return tryEachDeclRef(Dec, + &ExprMutationAnalyzer::Analyzer::findPointeeMutation); } -const Stmt *ExprMutationAnalyzer::findMutationMemoized( +const Stmt *ExprMutationAnalyzer::Analyzer::findMutationMemoized( const Expr *Exp, llvm::ArrayRef Finders, - ResultMap &MemoizedResults) { + Memoized::ResultMap &MemoizedResults) { const auto Memoized = MemoizedResults.find(Exp); if (Memoized != MemoizedResults.end()) return Memoized->second; @@ -241,8 +246,9 @@ const Stmt *ExprMutationAnalyzer::findMutationMemoized( return MemoizedResults[Exp] = nullptr; } -const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec, - MutationFinder Finder) { +const Stmt * +ExprMutationAnalyzer::Analyzer::tryEachDeclRef(const Decl *Dec, + MutationFinder Finder) { const auto Refs = match( findAll( declRefExpr(to( @@ -261,8 +267,9 @@ const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec, return nullptr; } -bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm, - ASTContext &Context) { +bool ExprMutationAnalyzer::Analyzer::isUnevaluated(const Stmt *Exp, + const Stmt &Stm, + ASTContext &Context) { return selectFirst( NodeID::value, match( @@ -293,33 +300,36 @@ bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm, Stm, Context)) != nullptr; } -bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) { +bool ExprMutationAnalyzer::Analyzer::isUnevaluated(const Expr *Exp) { return isUnevaluated(Exp, Stm, Context); } const Stmt * -ExprMutationAnalyzer::findExprMutation(ArrayRef Matches) { - return tryEachMatch(Matches, this, &ExprMutationAnalyzer::findMutation); +ExprMutationAnalyzer::Analyzer::findExprMutation(ArrayRef Matches) { + return tryEachMatch(Matches, this, + &ExprMutationAnalyzer::Analyzer::findMutation); } const Stmt * -ExprMutationAnalyzer::findDeclMutation(ArrayRef Matches) { - return tryEachMatch(Matches, this, &ExprMutationAnalyzer::findMutation); +ExprMutationAnalyzer::Analyzer::findDeclMutation(ArrayRef Matches) { + return tryEachMatch(Matches, this, + &ExprMutationAnalyzer::Analyzer::findMutation); } -const Stmt *ExprMutationAnalyzer::findExprPointeeMutation( +const Stmt *ExprMutationAnalyzer::Analyzer::findExprPointeeMutation( ArrayRef Matches) { - return tryEachMatch(Matches, this, - &ExprMutationAnalyzer::findPointeeMutation); + return tryEachMatch( + Matches, this, &ExprMutationAnalyzer::Analyzer::findPointeeMutation); } -const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation( +const Stmt *ExprMutationAnalyzer::Analyzer::findDeclPointeeMutation( ArrayRef Matches) { - return tryEachMatch(Matches, this, - &ExprMutationAnalyzer::findPointeeMutation); + return tryEachMatch( + Matches, this, &ExprMutationAnalyzer::Analyzer::findPointeeMutation); } -const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findDirectMutation(const Expr *Exp) { // LHS of any assignment operators. const auto AsAssignmentLhs = binaryOperator(isAssignmentOperator(), hasLHS(canResolveToExpr(Exp))); @@ -426,7 +436,7 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) { const auto AsNonConstRefReturn = returnStmt(hasReturnValue(canResolveToExpr(Exp))); - // It is used as a non-const-reference for initalizing a range-for loop. + // It is used as a non-const-reference for initializing a range-for loop. const auto AsNonConstRefRangeInit = cxxForRangeStmt(hasRangeInit(declRefExpr( allOf(canResolveToExpr(Exp), hasType(nonConstReferenceType()))))); @@ -443,7 +453,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) { return selectFirst("stmt", Matches); } -const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findMemberMutation(const Expr *Exp) { // Check whether any member of 'Exp' is mutated. const auto MemberExprs = match( findAll(expr(anyOf(memberExpr(hasObjectExpression(canResolveToExpr(Exp))), @@ -456,7 +467,8 @@ const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) { return findExprMutation(MemberExprs); } -const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findArrayElementMutation(const Expr *Exp) { // Check whether any element of an array is mutated. const auto SubscriptExprs = match( findAll(arraySubscriptExpr( @@ -469,7 +481,7 @@ const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) { return findExprMutation(SubscriptExprs); } -const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) { +const Stmt *ExprMutationAnalyzer::Analyzer::findCastMutation(const Expr *Exp) { // If the 'Exp' is explicitly casted to a non-const reference type the // 'Exp' is considered to be modified. const auto ExplicitCast = @@ -504,7 +516,8 @@ const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) { return findExprMutation(Calls); } -const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findRangeLoopMutation(const Expr *Exp) { // Keep the ordering for the specific initialization matches to happen first, // because it is cheaper to match all potential modifications of the loop // variable. @@ -567,7 +580,8 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) { return findDeclMutation(LoopVars); } -const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findReferenceMutation(const Expr *Exp) { // Follow non-const reference returned by `operator*()` of move-only classes. // These are typically smart pointers with unique ownership so we treat // mutation of pointee as mutation of the smart pointer itself. @@ -599,7 +613,8 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) { return findDeclMutation(Refs); } -const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) { +const Stmt * +ExprMutationAnalyzer::Analyzer::findFunctionArgMutation(const Expr *Exp) { const auto NonConstRefParam = forEachArgumentWithParam( canResolveToExpr(Exp), parmVarDecl(hasType(nonConstReferenceType())).bind("parm")); @@ -637,10 +652,9 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) { if (const auto *RefType = ParmType->getAs()) { if (!RefType->getPointeeType().getQualifiers() && RefType->getPointeeType()->getAs()) { - std::unique_ptr &Analyzer = - FuncParmAnalyzer[Func]; - if (!Analyzer) - Analyzer.reset(new FunctionParmMutationAnalyzer(*Func, Context)); + FunctionParmMutationAnalyzer *Analyzer = + FunctionParmMutationAnalyzer::getFunctionParmMutationAnalyzer( + *Func, Context, Memorized); if (Analyzer->findMutation(Parm)) return Exp; continue; @@ -653,13 +667,15 @@ const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) { } FunctionParmMutationAnalyzer::FunctionParmMutationAnalyzer( - const FunctionDecl &Func, ASTContext &Context) - : BodyAnalyzer(*Func.getBody(), Context) { + const FunctionDecl &Func, ASTContext &Context, + ExprMutationAnalyzer::Memoized &Memorized) + : BodyAnalyzer(*Func.getBody(), Context, Memorized) { if (const auto *Ctor = dyn_cast(&Func)) { // CXXCtorInitializer might also mutate Param but they're not part of // function body, check them eagerly here since they're typically trivial. for (const CXXCtorInitializer *Init : Ctor->inits()) { - ExprMutationAnalyzer InitAnalyzer(*Init->getInit(), Context); + ExprMutationAnalyzer::Analyzer InitAnalyzer(*Init->getInit(), Context, + Memorized); for (const ParmVarDecl *Parm : Ctor->parameters()) { if (Results.contains(Parm)) continue; @@ -675,11 +691,14 @@ FunctionParmMutationAnalyzer::findMutation(const ParmVarDecl *Parm) { const auto Memoized = Results.find(Parm); if (Memoized != Results.end()) return Memoized->second; - + // To handle call A -> call B -> call A. Assume parameters of A is not mutated + // before analyzing parameters of A. Then when analyzing the second "call A", + // FunctionParmMutationAnalyzer can use this memoized value to avoid infinite + // recursion. + Results[Parm] = nullptr; if (const Stmt *S = BodyAnalyzer.findMutation(Parm)) return Results[Parm] = S; - - return Results[Parm] = nullptr; + return Results[Parm]; } } // namespace clang diff --git a/clang/lib/Analysis/FlowSensitive/ASTOps.cpp b/clang/lib/Analysis/FlowSensitive/ASTOps.cpp new file mode 100644 index 0000000000000..1982c6c9f3830 --- /dev/null +++ b/clang/lib/Analysis/FlowSensitive/ASTOps.cpp @@ -0,0 +1,249 @@ +//===-- ASTOps.cc -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Operations on AST nodes that are used in flow-sensitive analysis. +// +//===----------------------------------------------------------------------===// + +#include "clang/Analysis/FlowSensitive/ASTOps.h" +#include "clang/AST/ComputeDependence.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/Type.h" +#include "clang/Analysis/FlowSensitive/StorageLocation.h" +#include "clang/Basic/LLVM.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" +#include +#include +#include + +#define DEBUG_TYPE "dataflow" + +namespace clang::dataflow { + +const Expr &ignoreCFGOmittedNodes(const Expr &E) { + const Expr *Current = &E; + if (auto *EWC = dyn_cast(Current)) { + Current = EWC->getSubExpr(); + assert(Current != nullptr); + } + Current = Current->IgnoreParens(); + assert(Current != nullptr); + return *Current; +} + +const Stmt &ignoreCFGOmittedNodes(const Stmt &S) { + if (auto *E = dyn_cast(&S)) + return ignoreCFGOmittedNodes(*E); + return S; +} + +// FIXME: Does not precisely handle non-virtual diamond inheritance. A single +// field decl will be modeled for all instances of the inherited field. +static void getFieldsFromClassHierarchy(QualType Type, FieldSet &Fields) { + if (Type->isIncompleteType() || Type->isDependentType() || + !Type->isRecordType()) + return; + + for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) + Fields.insert(Field); + if (auto *CXXRecord = Type->getAsCXXRecordDecl()) + for (const CXXBaseSpecifier &Base : CXXRecord->bases()) + getFieldsFromClassHierarchy(Base.getType(), Fields); +} + +/// Gets the set of all fields in the type. +FieldSet getObjectFields(QualType Type) { + FieldSet Fields; + getFieldsFromClassHierarchy(Type, Fields); + return Fields; +} + +bool containsSameFields(const FieldSet &Fields, + const RecordStorageLocation::FieldToLoc &FieldLocs) { + if (Fields.size() != FieldLocs.size()) + return false; + for ([[maybe_unused]] auto [Field, Loc] : FieldLocs) + if (!Fields.contains(cast_or_null(Field))) + return false; + return true; +} + +/// Returns the fields of a `RecordDecl` that are initialized by an +/// `InitListExpr`, in the order in which they appear in +/// `InitListExpr::inits()`. +/// `Init->getType()` must be a record type. +static std::vector +getFieldsForInitListExpr(const InitListExpr *InitList) { + const RecordDecl *RD = InitList->getType()->getAsRecordDecl(); + assert(RD != nullptr); + + std::vector Fields; + + if (InitList->getType()->isUnionType()) { + Fields.push_back(InitList->getInitializedFieldInUnion()); + return Fields; + } + + // Unnamed bitfields are only used for padding and do not appear in + // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s + // field list, and we thus need to remove them before mapping inits to + // fields to avoid mapping inits to the wrongs fields. + llvm::copy_if( + RD->fields(), std::back_inserter(Fields), + [](const FieldDecl *Field) { return !Field->isUnnamedBitField(); }); + return Fields; +} + +RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList) { + auto *RD = InitList->getType()->getAsCXXRecordDecl(); + assert(RD != nullptr); + + std::vector Fields = getFieldsForInitListExpr(InitList); + ArrayRef Inits = InitList->inits(); + + // Unions initialized with an empty initializer list need special treatment. + // For structs/classes initialized with an empty initializer list, Clang + // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions, + // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves. + SmallVector InitsForUnion; + if (InitList->getType()->isUnionType() && Inits.empty()) { + assert(Fields.size() == 1); + ImplicitValueInitForUnion.emplace(Fields.front()->getType()); + InitsForUnion.push_back(&*ImplicitValueInitForUnion); + Inits = InitsForUnion; + } + + size_t InitIdx = 0; + + assert(Fields.size() + RD->getNumBases() == Inits.size()); + for (const CXXBaseSpecifier &Base : RD->bases()) { + assert(InitIdx < Inits.size()); + Expr *Init = Inits[InitIdx++]; + BaseInits.emplace_back(&Base, Init); + } + + assert(Fields.size() == Inits.size() - InitIdx); + for (const FieldDecl *Field : Fields) { + assert(InitIdx < Inits.size()); + Expr *Init = Inits[InitIdx++]; + FieldInits.emplace_back(Field, Init); + } +} + +static void insertIfGlobal(const Decl &D, + llvm::DenseSet &Globals) { + if (auto *V = dyn_cast(&D)) + if (V->hasGlobalStorage()) + Globals.insert(V); +} + +static void insertIfFunction(const Decl &D, + llvm::DenseSet &Funcs) { + if (auto *FD = dyn_cast(&D)) + Funcs.insert(FD); +} + +static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) { + // Use getCalleeDecl instead of getMethodDecl in order to handle + // pointer-to-member calls. + const auto *MethodDecl = dyn_cast_or_null(C.getCalleeDecl()); + if (!MethodDecl) + return nullptr; + auto *Body = dyn_cast_or_null(MethodDecl->getBody()); + if (!Body || Body->size() != 1) + return nullptr; + if (auto *RS = dyn_cast(*Body->body_begin())) + if (auto *Return = RS->getRetValue()) + return dyn_cast(Return->IgnoreParenImpCasts()); + return nullptr; +} + +static void getReferencedDecls(const Decl &D, ReferencedDecls &Referenced) { + insertIfGlobal(D, Referenced.Globals); + insertIfFunction(D, Referenced.Functions); + if (const auto *Decomp = dyn_cast(&D)) + for (const auto *B : Decomp->bindings()) + if (auto *ME = dyn_cast_or_null(B->getBinding())) + // FIXME: should we be using `E->getFoundDecl()`? + if (const auto *FD = dyn_cast(ME->getMemberDecl())) + Referenced.Fields.insert(FD); +} + +/// Traverses `S` and inserts into `Referenced` any declarations that are +/// declared in or referenced from sub-statements. +static void getReferencedDecls(const Stmt &S, ReferencedDecls &Referenced) { + for (auto *Child : S.children()) + if (Child != nullptr) + getReferencedDecls(*Child, Referenced); + if (const auto *DefaultArg = dyn_cast(&S)) + getReferencedDecls(*DefaultArg->getExpr(), Referenced); + if (const auto *DefaultInit = dyn_cast(&S)) + getReferencedDecls(*DefaultInit->getExpr(), Referenced); + + if (auto *DS = dyn_cast(&S)) { + if (DS->isSingleDecl()) + getReferencedDecls(*DS->getSingleDecl(), Referenced); + else + for (auto *D : DS->getDeclGroup()) + getReferencedDecls(*D, Referenced); + } else if (auto *E = dyn_cast(&S)) { + insertIfGlobal(*E->getDecl(), Referenced.Globals); + insertIfFunction(*E->getDecl(), Referenced.Functions); + } else if (const auto *C = dyn_cast(&S)) { + // If this is a method that returns a member variable but does nothing else, + // model the field of the return value. + if (MemberExpr *E = getMemberForAccessor(*C)) + if (const auto *FD = dyn_cast(E->getMemberDecl())) + Referenced.Fields.insert(FD); + } else if (auto *E = dyn_cast(&S)) { + // FIXME: should we be using `E->getFoundDecl()`? + const ValueDecl *VD = E->getMemberDecl(); + insertIfGlobal(*VD, Referenced.Globals); + insertIfFunction(*VD, Referenced.Functions); + if (const auto *FD = dyn_cast(VD)) + Referenced.Fields.insert(FD); + } else if (auto *InitList = dyn_cast(&S)) { + if (InitList->getType()->isRecordType()) + for (const auto *FD : getFieldsForInitListExpr(InitList)) + Referenced.Fields.insert(FD); + } +} + +ReferencedDecls getReferencedDecls(const FunctionDecl &FD) { + ReferencedDecls Result; + // Look for global variable and field references in the + // constructor-initializers. + if (const auto *CtorDecl = dyn_cast(&FD)) { + for (const auto *Init : CtorDecl->inits()) { + if (Init->isMemberInitializer()) { + Result.Fields.insert(Init->getMember()); + } else if (Init->isIndirectMemberInitializer()) { + for (const auto *I : Init->getIndirectMember()->chain()) + Result.Fields.insert(cast(I)); + } + const Expr *E = Init->getInit(); + assert(E != nullptr); + getReferencedDecls(*E, Result); + } + // Add all fields mentioned in default member initializers. + for (const FieldDecl *F : CtorDecl->getParent()->fields()) + if (const auto *I = F->getInClassInitializer()) + getReferencedDecls(*I, Result); + } + getReferencedDecls(*FD.getBody(), Result); + + return Result; +} + +} // namespace clang::dataflow diff --git a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt index a3b5d9adc24bd..6631fe27f3d90 100644 --- a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt +++ b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt @@ -1,6 +1,7 @@ add_clang_library(clangAnalysisFlowSensitive AdornedCFG.cpp Arena.cpp + ASTOps.cpp DataflowAnalysisContext.cpp DataflowEnvironment.cpp Formula.cpp diff --git a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp index d520539dd2535..e94fd39c45dc1 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp @@ -14,6 +14,7 @@ #include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h" #include "clang/AST/ExprCXX.h" +#include "clang/Analysis/FlowSensitive/ASTOps.h" #include "clang/Analysis/FlowSensitive/DebugSupport.h" #include "clang/Analysis/FlowSensitive/Formula.h" #include "clang/Analysis/FlowSensitive/Logger.h" @@ -359,55 +360,3 @@ DataflowAnalysisContext::~DataflowAnalysisContext() = default; } // namespace dataflow } // namespace clang - -using namespace clang; - -const Expr &clang::dataflow::ignoreCFGOmittedNodes(const Expr &E) { - const Expr *Current = &E; - if (auto *EWC = dyn_cast(Current)) { - Current = EWC->getSubExpr(); - assert(Current != nullptr); - } - Current = Current->IgnoreParens(); - assert(Current != nullptr); - return *Current; -} - -const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) { - if (auto *E = dyn_cast(&S)) - return ignoreCFGOmittedNodes(*E); - return S; -} - -// FIXME: Does not precisely handle non-virtual diamond inheritance. A single -// field decl will be modeled for all instances of the inherited field. -static void getFieldsFromClassHierarchy(QualType Type, - clang::dataflow::FieldSet &Fields) { - if (Type->isIncompleteType() || Type->isDependentType() || - !Type->isRecordType()) - return; - - for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) - Fields.insert(Field); - if (auto *CXXRecord = Type->getAsCXXRecordDecl()) - for (const CXXBaseSpecifier &Base : CXXRecord->bases()) - getFieldsFromClassHierarchy(Base.getType(), Fields); -} - -/// Gets the set of all fields in the type. -clang::dataflow::FieldSet clang::dataflow::getObjectFields(QualType Type) { - FieldSet Fields; - getFieldsFromClassHierarchy(Type, Fields); - return Fields; -} - -bool clang::dataflow::containsSameFields( - const clang::dataflow::FieldSet &Fields, - const clang::dataflow::RecordStorageLocation::FieldToLoc &FieldLocs) { - if (Fields.size() != FieldLocs.size()) - return false; - for ([[maybe_unused]] auto [Field, Loc] : FieldLocs) - if (!Fields.contains(cast_or_null(Field))) - return false; - return true; -} diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp index 1bfa7ebcfd50c..3f1600d9ac5d8 100644 --- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp +++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp @@ -15,7 +15,9 @@ #include "clang/Analysis/FlowSensitive/DataflowEnvironment.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/Type.h" +#include "clang/Analysis/FlowSensitive/ASTOps.h" #include "clang/Analysis/FlowSensitive/DataflowLattice.h" #include "clang/Analysis/FlowSensitive/Value.h" #include "llvm/ADT/DenseMap.h" @@ -26,6 +28,8 @@ #include #include +#define DEBUG_TYPE "dataflow" + namespace clang { namespace dataflow { @@ -301,90 +305,199 @@ widenKeyToValueMap(const llvm::MapVector &CurMap, return WidenedMap; } -/// Initializes a global storage value. -static void insertIfGlobal(const Decl &D, - llvm::DenseSet &Vars) { - if (auto *V = dyn_cast(&D)) - if (V->hasGlobalStorage()) - Vars.insert(V); -} +namespace { + +// Visitor that builds a map from record prvalues to result objects. +// This traverses the body of the function to be analyzed; for each result +// object that it encounters, it propagates the storage location of the result +// object to all record prvalues that can initialize it. +class ResultObjectVisitor : public RecursiveASTVisitor { +public: + // `ResultObjectMap` will be filled with a map from record prvalues to result + // object. If the function being analyzed returns a record by value, + // `LocForRecordReturnVal` is the location to which this record should be + // written; otherwise, it is null. + explicit ResultObjectVisitor( + llvm::DenseMap &ResultObjectMap, + RecordStorageLocation *LocForRecordReturnVal, + DataflowAnalysisContext &DACtx) + : ResultObjectMap(ResultObjectMap), + LocForRecordReturnVal(LocForRecordReturnVal), DACtx(DACtx) {} + + bool shouldVisitImplicitCode() { return true; } + + bool shouldVisitLambdaBody() const { return false; } + + // Traverse all member and base initializers of `Ctor`. This function is not + // called by `RecursiveASTVisitor`; it should be called manually if we are + // analyzing a constructor. `ThisPointeeLoc` is the storage location that + // `this` points to. + void TraverseConstructorInits(const CXXConstructorDecl *Ctor, + RecordStorageLocation *ThisPointeeLoc) { + assert(ThisPointeeLoc != nullptr); + for (const CXXCtorInitializer *Init : Ctor->inits()) { + Expr *InitExpr = Init->getInit(); + if (FieldDecl *Field = Init->getMember(); + Field != nullptr && Field->getType()->isRecordType()) { + PropagateResultObject(InitExpr, cast( + ThisPointeeLoc->getChild(*Field))); + } else if (Init->getBaseClass()) { + PropagateResultObject(InitExpr, ThisPointeeLoc); + } -static void insertIfFunction(const Decl &D, - llvm::DenseSet &Funcs) { - if (auto *FD = dyn_cast(&D)) - Funcs.insert(FD); -} + // Ensure that any result objects within `InitExpr` (e.g. temporaries) + // are also propagated to the prvalues that initialize them. + TraverseStmt(InitExpr); -static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) { - // Use getCalleeDecl instead of getMethodDecl in order to handle - // pointer-to-member calls. - const auto *MethodDecl = dyn_cast_or_null(C.getCalleeDecl()); - if (!MethodDecl) - return nullptr; - auto *Body = dyn_cast_or_null(MethodDecl->getBody()); - if (!Body || Body->size() != 1) - return nullptr; - if (auto *RS = dyn_cast(*Body->body_begin())) - if (auto *Return = RS->getRetValue()) - return dyn_cast(Return->IgnoreParenImpCasts()); - return nullptr; -} + // If this is a `CXXDefaultInitExpr`, also propagate any result objects + // within the default expression. + if (auto *DefaultInit = dyn_cast(InitExpr)) + TraverseStmt(DefaultInit->getExpr()); + } + } -static void -getFieldsGlobalsAndFuncs(const Decl &D, FieldSet &Fields, - llvm::DenseSet &Vars, - llvm::DenseSet &Funcs) { - insertIfGlobal(D, Vars); - insertIfFunction(D, Funcs); - if (const auto *Decomp = dyn_cast(&D)) - for (const auto *B : Decomp->bindings()) - if (auto *ME = dyn_cast_or_null(B->getBinding())) - // FIXME: should we be using `E->getFoundDecl()`? - if (const auto *FD = dyn_cast(ME->getMemberDecl())) - Fields.insert(FD); -} + bool TraverseBindingDecl(BindingDecl *BD) { + // `RecursiveASTVisitor` doesn't traverse holding variables for + // `BindingDecl`s by itself, so we need to tell it to. + if (VarDecl *HoldingVar = BD->getHoldingVar()) + TraverseDecl(HoldingVar); + return RecursiveASTVisitor::TraverseBindingDecl(BD); + } -/// Traverses `S` and inserts into `Fields`, `Vars` and `Funcs` any fields, -/// global variables and functions that are declared in or referenced from -/// sub-statements. -static void -getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields, - llvm::DenseSet &Vars, - llvm::DenseSet &Funcs) { - for (auto *Child : S.children()) - if (Child != nullptr) - getFieldsGlobalsAndFuncs(*Child, Fields, Vars, Funcs); - if (const auto *DefaultInit = dyn_cast(&S)) - getFieldsGlobalsAndFuncs(*DefaultInit->getExpr(), Fields, Vars, Funcs); - - if (auto *DS = dyn_cast(&S)) { - if (DS->isSingleDecl()) - getFieldsGlobalsAndFuncs(*DS->getSingleDecl(), Fields, Vars, Funcs); - else - for (auto *D : DS->getDeclGroup()) - getFieldsGlobalsAndFuncs(*D, Fields, Vars, Funcs); - } else if (auto *E = dyn_cast(&S)) { - insertIfGlobal(*E->getDecl(), Vars); - insertIfFunction(*E->getDecl(), Funcs); - } else if (const auto *C = dyn_cast(&S)) { - // If this is a method that returns a member variable but does nothing else, - // model the field of the return value. - if (MemberExpr *E = getMemberForAccessor(*C)) - if (const auto *FD = dyn_cast(E->getMemberDecl())) - Fields.insert(FD); - } else if (auto *E = dyn_cast(&S)) { - // FIXME: should we be using `E->getFoundDecl()`? - const ValueDecl *VD = E->getMemberDecl(); - insertIfGlobal(*VD, Vars); - insertIfFunction(*VD, Funcs); - if (const auto *FD = dyn_cast(VD)) - Fields.insert(FD); - } else if (auto *InitList = dyn_cast(&S)) { - if (InitList->getType()->isRecordType()) - for (const auto *FD : getFieldsForInitListExpr(InitList)) - Fields.insert(FD); + bool VisitVarDecl(VarDecl *VD) { + if (VD->getType()->isRecordType() && VD->hasInit()) + PropagateResultObject( + VD->getInit(), + &cast(DACtx.getStableStorageLocation(*VD))); + return true; } -} + + bool VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE) { + if (MTE->getType()->isRecordType()) + PropagateResultObject( + MTE->getSubExpr(), + &cast(DACtx.getStableStorageLocation(*MTE))); + return true; + } + + bool VisitReturnStmt(ReturnStmt *Return) { + Expr *RetValue = Return->getRetValue(); + if (RetValue != nullptr && RetValue->getType()->isRecordType() && + RetValue->isPRValue()) + PropagateResultObject(RetValue, LocForRecordReturnVal); + return true; + } + + bool VisitExpr(Expr *E) { + // Clang's AST can have record-type prvalues without a result object -- for + // example as full-expressions contained in a compound statement or as + // arguments of call expressions. We notice this if we get here and a + // storage location has not yet been associated with `E`. In this case, + // treat this as if it was a `MaterializeTemporaryExpr`. + if (E->isPRValue() && E->getType()->isRecordType() && + !ResultObjectMap.contains(E)) + PropagateResultObject( + E, &cast(DACtx.getStableStorageLocation(*E))); + return true; + } + + // Assigns `Loc` as the result object location of `E`, then propagates the + // location to all lower-level prvalues that initialize the same object as + // `E` (or one of its base classes or member variables). + void PropagateResultObject(Expr *E, RecordStorageLocation *Loc) { + if (!E->isPRValue() || !E->getType()->isRecordType()) { + assert(false); + // Ensure we don't propagate the result object if we hit this in a + // release build. + return; + } + + ResultObjectMap[E] = Loc; + + // The following AST node kinds are "original initializers": They are the + // lowest-level AST node that initializes a given object, and nothing + // below them can initialize the same object (or part of it). + if (isa(E) || isa(E) || isa(E) || + isa(E) || isa(E) || + isa(E) || + // We treat `BuiltinBitCastExpr` as an "original initializer" too as + // it may not even be casting from a record type -- and even if it is, + // the two objects are in general of unrelated type. + isa(E)) { + return; + } + if (auto *Op = dyn_cast(E); + Op && Op->getOpcode() == BO_Cmp) { + // Builtin `<=>` returns a `std::strong_ordering` object. + return; + } + + if (auto *InitList = dyn_cast(E)) { + if (!InitList->isSemanticForm()) + return; + if (InitList->isTransparent()) { + PropagateResultObject(InitList->getInit(0), Loc); + return; + } + + RecordInitListHelper InitListHelper(InitList); + + for (auto [Base, Init] : InitListHelper.base_inits()) { + assert(Base->getType().getCanonicalType() == + Init->getType().getCanonicalType()); + + // Storage location for the base class is the same as that of the + // derived class because we "flatten" the object hierarchy and put all + // fields in `RecordStorageLocation` of the derived class. + PropagateResultObject(Init, Loc); + } + + for (auto [Field, Init] : InitListHelper.field_inits()) { + // Fields of non-record type are handled in + // `TransferVisitor::VisitInitListExpr()`. + if (!Field->getType()->isRecordType()) + continue; + PropagateResultObject( + Init, cast(Loc->getChild(*Field))); + } + return; + } + + if (auto *Op = dyn_cast(E); Op && Op->isCommaOp()) { + PropagateResultObject(Op->getRHS(), Loc); + return; + } + + if (auto *Cond = dyn_cast(E)) { + PropagateResultObject(Cond->getTrueExpr(), Loc); + PropagateResultObject(Cond->getFalseExpr(), Loc); + return; + } + + if (auto *SE = dyn_cast(E)) { + PropagateResultObject(cast(SE->getSubStmt()->body_back()), Loc); + return; + } + + // All other expression nodes that propagate a record prvalue should have + // exactly one child. + SmallVector Children(E->child_begin(), E->child_end()); + LLVM_DEBUG({ + if (Children.size() != 1) + E->dump(); + }); + assert(Children.size() == 1); + for (Stmt *S : Children) + PropagateResultObject(cast(S), Loc); + } + +private: + llvm::DenseMap &ResultObjectMap; + RecordStorageLocation *LocForRecordReturnVal; + DataflowAnalysisContext &DACtx; +}; + +} // namespace Environment::Environment(DataflowAnalysisContext &DACtx) : DACtx(&DACtx), @@ -401,17 +514,23 @@ void Environment::initialize() { if (DeclCtx == nullptr) return; - if (const auto *FuncDecl = dyn_cast(DeclCtx)) { - assert(FuncDecl->doesThisDeclarationHaveABody()); + const auto *FuncDecl = dyn_cast(DeclCtx); + if (FuncDecl == nullptr) + return; - initFieldsGlobalsAndFuncs(FuncDecl); + assert(FuncDecl->doesThisDeclarationHaveABody()); - for (const auto *ParamDecl : FuncDecl->parameters()) { - assert(ParamDecl != nullptr); - setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr)); - } + initFieldsGlobalsAndFuncs(FuncDecl); + + for (const auto *ParamDecl : FuncDecl->parameters()) { + assert(ParamDecl != nullptr); + setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr)); } + if (FuncDecl->getReturnType()->isRecordType()) + LocForRecordReturnVal = &cast( + createStorageLocation(FuncDecl->getReturnType())); + if (const auto *MethodDecl = dyn_cast(DeclCtx)) { auto *Parent = MethodDecl->getParent(); assert(Parent != nullptr); @@ -444,6 +563,12 @@ void Environment::initialize() { initializeFieldsWithValues(ThisLoc); } } + + // We do this below the handling of `CXXMethodDecl` above so that we can + // be sure that the storage location for `this` has been set. + ResultObjectMap = std::make_shared( + buildResultObjectMap(DACtx, FuncDecl, getThisPointeeStorageLocation(), + LocForRecordReturnVal)); } // FIXME: Add support for resetting globals after function calls to enable @@ -451,46 +576,28 @@ void Environment::initialize() { void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) { assert(FuncDecl->doesThisDeclarationHaveABody()); - FieldSet Fields; - llvm::DenseSet Vars; - llvm::DenseSet Funcs; - - // Look for global variable and field references in the - // constructor-initializers. - if (const auto *CtorDecl = dyn_cast(FuncDecl)) { - for (const auto *Init : CtorDecl->inits()) { - if (Init->isMemberInitializer()) { - Fields.insert(Init->getMember()); - } else if (Init->isIndirectMemberInitializer()) { - for (const auto *I : Init->getIndirectMember()->chain()) - Fields.insert(cast(I)); - } - const Expr *E = Init->getInit(); - assert(E != nullptr); - getFieldsGlobalsAndFuncs(*E, Fields, Vars, Funcs); - } - // Add all fields mentioned in default member initializers. - for (const FieldDecl *F : CtorDecl->getParent()->fields()) - if (const auto *I = F->getInClassInitializer()) - getFieldsGlobalsAndFuncs(*I, Fields, Vars, Funcs); - } - getFieldsGlobalsAndFuncs(*FuncDecl->getBody(), Fields, Vars, Funcs); + ReferencedDecls Referenced = getReferencedDecls(*FuncDecl); // These have to be added before the lines that follow to ensure that // `create*` work correctly for structs. - DACtx->addModeledFields(Fields); + DACtx->addModeledFields(Referenced.Fields); - for (const VarDecl *D : Vars) { + for (const VarDecl *D : Referenced.Globals) { if (getStorageLocation(*D) != nullptr) continue; - setStorageLocation(*D, createObject(*D)); + // We don't run transfer functions on the initializers of global variables, + // so they won't be associated with a value or storage location. We + // therefore intentionally don't pass an initializer to `createObject()`; + // in particular, this ensures that `createObject()` will initialize the + // fields of record-type variables with values. + setStorageLocation(*D, createObject(*D, nullptr)); } - for (const FunctionDecl *FD : Funcs) { + for (const FunctionDecl *FD : Referenced.Functions) { if (getStorageLocation(*FD) != nullptr) continue; - auto &Loc = createStorageLocation(FD->getType()); + auto &Loc = createStorageLocation(*FD); setStorageLocation(*FD, Loc); } } @@ -519,6 +626,9 @@ Environment Environment::pushCall(const CallExpr *Call) const { } } + if (Call->getType()->isRecordType() && Call->isPRValue()) + Env.LocForRecordReturnVal = &Env.getResultObjectLocation(*Call); + Env.pushCallInternal(Call->getDirectCallee(), llvm::ArrayRef(Call->getArgs(), Call->getNumArgs())); @@ -529,6 +639,7 @@ Environment Environment::pushCall(const CXXConstructExpr *Call) const { Environment Env(*this); Env.ThisPointeeLoc = &Env.getResultObjectLocation(*Call); + Env.LocForRecordReturnVal = &Env.getResultObjectLocation(*Call); Env.pushCallInternal(Call->getConstructor(), llvm::ArrayRef(Call->getArgs(), Call->getNumArgs())); @@ -557,6 +668,10 @@ void Environment::pushCallInternal(const FunctionDecl *FuncDecl, const VarDecl *Param = *ParamIt; setStorageLocation(*Param, createObject(*Param, Args[ArgIndex])); } + + ResultObjectMap = std::make_shared( + buildResultObjectMap(DACtx, FuncDecl, getThisPointeeStorageLocation(), + LocForRecordReturnVal)); } void Environment::popCall(const CallExpr *Call, const Environment &CalleeEnv) { @@ -600,6 +715,9 @@ bool Environment::equivalentTo(const Environment &Other, if (ReturnLoc != Other.ReturnLoc) return false; + if (LocForRecordReturnVal != Other.LocForRecordReturnVal) + return false; + if (ThisPointeeLoc != Other.ThisPointeeLoc) return false; @@ -623,8 +741,10 @@ LatticeEffect Environment::widen(const Environment &PrevEnv, assert(DACtx == PrevEnv.DACtx); assert(ReturnVal == PrevEnv.ReturnVal); assert(ReturnLoc == PrevEnv.ReturnLoc); + assert(LocForRecordReturnVal == PrevEnv.LocForRecordReturnVal); assert(ThisPointeeLoc == PrevEnv.ThisPointeeLoc); assert(CallStack == PrevEnv.CallStack); + assert(ResultObjectMap == PrevEnv.ResultObjectMap); auto Effect = LatticeEffect::Unchanged; @@ -656,12 +776,16 @@ Environment Environment::join(const Environment &EnvA, const Environment &EnvB, Environment::ValueModel &Model, ExprJoinBehavior ExprBehavior) { assert(EnvA.DACtx == EnvB.DACtx); + assert(EnvA.LocForRecordReturnVal == EnvB.LocForRecordReturnVal); assert(EnvA.ThisPointeeLoc == EnvB.ThisPointeeLoc); assert(EnvA.CallStack == EnvB.CallStack); + assert(EnvA.ResultObjectMap == EnvB.ResultObjectMap); Environment JoinedEnv(*EnvA.DACtx); JoinedEnv.CallStack = EnvA.CallStack; + JoinedEnv.ResultObjectMap = EnvA.ResultObjectMap; + JoinedEnv.LocForRecordReturnVal = EnvA.LocForRecordReturnVal; JoinedEnv.ThisPointeeLoc = EnvA.ThisPointeeLoc; if (EnvA.ReturnVal == nullptr || EnvB.ReturnVal == nullptr) { @@ -730,6 +854,12 @@ StorageLocation &Environment::createStorageLocation(const Expr &E) { void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) { assert(!DeclToLoc.contains(&D)); + // The only kinds of declarations that may have a "variable" storage location + // are declarations of reference type and `BindingDecl`. For all other + // declaration, the storage location should be the stable storage location + // returned by `createStorageLocation()`. + assert(D.getType()->isReferenceType() || isa(D) || + &Loc == &createStorageLocation(D)); DeclToLoc[&D] = &Loc; } @@ -764,77 +894,34 @@ StorageLocation *Environment::getStorageLocation(const Expr &E) const { return It == ExprToLoc.end() ? nullptr : &*It->second; } -// Returns whether a prvalue of record type is the one that originally -// constructs the object (i.e. it doesn't propagate it from one of its -// children). -static bool isOriginalRecordConstructor(const Expr &RecordPRValue) { - if (auto *Init = dyn_cast(&RecordPRValue)) - return !Init->isSemanticForm() || !Init->isTransparent(); - return isa(RecordPRValue) || isa(RecordPRValue) || - isa(RecordPRValue) || - isa(RecordPRValue) || - isa(RecordPRValue) || - // The framework currently does not propagate the objects created in - // the two branches of a `ConditionalOperator` because there is no way - // to reconcile their storage locations, which are different. We - // therefore claim that the `ConditionalOperator` is the expression - // that originally constructs the object. - // Ultimately, this will be fixed by propagating locations down from - // the result object, rather than up from the original constructor as - // we do now (see also the FIXME in the documentation for - // `getResultObjectLocation()`). - isa(RecordPRValue); -} - RecordStorageLocation & Environment::getResultObjectLocation(const Expr &RecordPRValue) const { assert(RecordPRValue.getType()->isRecordType()); assert(RecordPRValue.isPRValue()); - // Returns a storage location that we can use if assertions fail. - auto FallbackForAssertFailure = - [this, &RecordPRValue]() -> RecordStorageLocation & { + assert(ResultObjectMap != nullptr); + RecordStorageLocation *Loc = ResultObjectMap->lookup(&RecordPRValue); + assert(Loc != nullptr); + // In release builds, use the "stable" storage location if the map lookup + // failed. + if (Loc == nullptr) return cast( DACtx->getStableStorageLocation(RecordPRValue)); - }; - - if (isOriginalRecordConstructor(RecordPRValue)) { - auto *Val = cast_or_null(getValue(RecordPRValue)); - // The builtin transfer function should have created a `RecordValue` for all - // original record constructors. - assert(Val); - if (!Val) - return FallbackForAssertFailure(); - return Val->getLoc(); - } - - if (auto *Op = dyn_cast(&RecordPRValue); - Op && Op->isCommaOp()) { - return getResultObjectLocation(*Op->getRHS()); - } - - // All other expression nodes that propagate a record prvalue should have - // exactly one child. - llvm::SmallVector children(RecordPRValue.child_begin(), - RecordPRValue.child_end()); - assert(children.size() == 1); - if (children.empty()) - return FallbackForAssertFailure(); - - return getResultObjectLocation(*cast(children[0])); + return *Loc; } PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) { return DACtx->getOrCreateNullPointerValue(PointeeType); } -void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc) { +void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc, + QualType Type) { llvm::DenseSet Visited; int CreatedValuesCount = 0; - initializeFieldsWithValues(Loc, Visited, 0, CreatedValuesCount); + initializeFieldsWithValues(Loc, Type, Visited, 0, CreatedValuesCount); if (CreatedValuesCount > MaxCompositeValueSize) { - llvm::errs() << "Attempting to initialize a huge value of type: " - << Loc.getType() << '\n'; + llvm::errs() << "Attempting to initialize a huge value of type: " << Type + << '\n'; } } @@ -848,8 +935,7 @@ void Environment::setValue(const Expr &E, Value &Val) { const Expr &CanonE = ignoreCFGOmittedNodes(E); if (auto *RecordVal = dyn_cast(&Val)) { - assert(isOriginalRecordConstructor(CanonE) || - &RecordVal->getLoc() == &getResultObjectLocation(CanonE)); + assert(&RecordVal->getLoc() == &getResultObjectLocation(CanonE)); (void)RecordVal; } @@ -928,7 +1014,8 @@ Value *Environment::createValueUnlessSelfReferential( if (Type->isRecordType()) { CreatedValuesCount++; auto &Loc = cast(createStorageLocation(Type)); - initializeFieldsWithValues(Loc, Visited, Depth, CreatedValuesCount); + initializeFieldsWithValues(Loc, Loc.getType(), Visited, Depth, + CreatedValuesCount); return &refreshRecordValue(Loc, *this); } @@ -960,6 +1047,7 @@ Environment::createLocAndMaybeValue(QualType Ty, } void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc, + QualType Type, llvm::DenseSet &Visited, int Depth, int &CreatedValuesCount) { @@ -967,8 +1055,8 @@ void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc, if (FieldType->isRecordType()) { auto &FieldRecordLoc = cast(FieldLoc); setValue(FieldRecordLoc, create(FieldRecordLoc)); - initializeFieldsWithValues(FieldRecordLoc, Visited, Depth + 1, - CreatedValuesCount); + initializeFieldsWithValues(FieldRecordLoc, FieldRecordLoc.getType(), + Visited, Depth + 1, CreatedValuesCount); } else { if (!Visited.insert(FieldType.getCanonicalType()).second) return; @@ -979,7 +1067,7 @@ void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc, } }; - for (const auto &[Field, FieldLoc] : Loc.children()) { + for (const FieldDecl *Field : DACtx->getModeledFields(Type)) { assert(Field != nullptr); QualType FieldType = Field->getType(); @@ -988,14 +1076,12 @@ void Environment::initializeFieldsWithValues(RecordStorageLocation &Loc, &createLocAndMaybeValue(FieldType, Visited, Depth + 1, CreatedValuesCount)); } else { + StorageLocation *FieldLoc = Loc.getChild(*Field); assert(FieldLoc != nullptr); initField(FieldType, *FieldLoc); } } - for (const auto &[FieldName, FieldLoc] : Loc.synthetic_fields()) { - assert(FieldLoc != nullptr); - QualType FieldType = FieldLoc->getType(); - + for (const auto &[FieldName, FieldType] : DACtx->getSyntheticFields(Type)) { // Synthetic fields cannot have reference type, so we don't need to deal // with this case. assert(!FieldType->isReferenceType()); @@ -1022,38 +1108,36 @@ StorageLocation &Environment::createObjectInternal(const ValueDecl *D, return createObjectInternal(D, Ty.getNonReferenceType(), nullptr); } - Value *Val = nullptr; - if (InitExpr) { - // In the (few) cases where an expression is intentionally - // "uninterpreted", `InitExpr` is not associated with a value. There are - // two ways to handle this situation: propagate the status, so that - // uninterpreted initializers result in uninterpreted variables, or - // provide a default value. We choose the latter so that later refinements - // of the variable can be used for reasoning about the surrounding code. - // For this reason, we let this case be handled by the `createValue()` - // call below. - // - // FIXME. If and when we interpret all language cases, change this to - // assert that `InitExpr` is interpreted, rather than supplying a - // default value (assuming we don't update the environment API to return - // references). - Val = getValue(*InitExpr); - - if (!Val && isa(InitExpr) && - InitExpr->getType()->isPointerType()) - Val = &getOrCreateNullPointerValue(InitExpr->getType()->getPointeeType()); - } - if (!Val) - Val = createValue(Ty); - - if (Ty->isRecordType()) - return cast(Val)->getLoc(); - StorageLocation &Loc = D ? createStorageLocation(*D) : createStorageLocation(Ty); - if (Val) - setValue(Loc, *Val); + if (Ty->isRecordType()) { + auto &RecordLoc = cast(Loc); + if (!InitExpr) + initializeFieldsWithValues(RecordLoc); + refreshRecordValue(RecordLoc, *this); + } else { + Value *Val = nullptr; + if (InitExpr) + // In the (few) cases where an expression is intentionally + // "uninterpreted", `InitExpr` is not associated with a value. There are + // two ways to handle this situation: propagate the status, so that + // uninterpreted initializers result in uninterpreted variables, or + // provide a default value. We choose the latter so that later refinements + // of the variable can be used for reasoning about the surrounding code. + // For this reason, we let this case be handled by the `createValue()` + // call below. + // + // FIXME. If and when we interpret all language cases, change this to + // assert that `InitExpr` is interpreted, rather than supplying a + // default value (assuming we don't update the environment API to return + // references). + Val = getValue(*InitExpr); + if (!Val) + Val = createValue(Ty); + if (Val) + setValue(Loc, *Val); + } return Loc; } @@ -1072,6 +1156,8 @@ bool Environment::allows(const Formula &F) const { void Environment::dump(raw_ostream &OS) const { llvm::DenseMap LocToName; + if (LocForRecordReturnVal != nullptr) + LocToName[LocForRecordReturnVal] = "(returned record)"; if (ThisPointeeLoc != nullptr) LocToName[ThisPointeeLoc] = "this"; @@ -1102,6 +1188,9 @@ void Environment::dump(raw_ostream &OS) const { if (auto Iter = LocToName.find(ReturnLoc); Iter != LocToName.end()) OS << " (" << Iter->second << ")"; OS << "\n"; + } else if (Func->getReturnType()->isRecordType() || + isa(Func)) { + OS << "LocForRecordReturnVal: " << LocForRecordReturnVal << "\n"; } else if (!Func->getReturnType()->isVoidType()) { if (ReturnVal == nullptr) OS << "ReturnVal: nullptr\n"; @@ -1122,6 +1211,22 @@ void Environment::dump() const { dump(llvm::dbgs()); } +Environment::PrValueToResultObject Environment::buildResultObjectMap( + DataflowAnalysisContext *DACtx, const FunctionDecl *FuncDecl, + RecordStorageLocation *ThisPointeeLoc, + RecordStorageLocation *LocForRecordReturnVal) { + assert(FuncDecl->doesThisDeclarationHaveABody()); + + PrValueToResultObject Map; + + ResultObjectVisitor Visitor(Map, LocForRecordReturnVal, *DACtx); + if (const auto *Ctor = dyn_cast(FuncDecl)) + Visitor.TraverseConstructorInits(Ctor, ThisPointeeLoc); + Visitor.TraverseStmt(FuncDecl->getBody()); + + return Map; +} + RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE, const Environment &Env) { Expr *ImplicitObject = MCE.getImplicitObjectArgument(); @@ -1149,64 +1254,6 @@ RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME, return Env.get(*Base); } -std::vector -getFieldsForInitListExpr(const InitListExpr *InitList) { - const RecordDecl *RD = InitList->getType()->getAsRecordDecl(); - assert(RD != nullptr); - - std::vector Fields; - - if (InitList->getType()->isUnionType()) { - Fields.push_back(InitList->getInitializedFieldInUnion()); - return Fields; - } - - // Unnamed bitfields are only used for padding and do not appear in - // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s - // field list, and we thus need to remove them before mapping inits to - // fields to avoid mapping inits to the wrongs fields. - llvm::copy_if( - RD->fields(), std::back_inserter(Fields), - [](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); }); - return Fields; -} - -RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList) { - auto *RD = InitList->getType()->getAsCXXRecordDecl(); - assert(RD != nullptr); - - std::vector Fields = getFieldsForInitListExpr(InitList); - ArrayRef Inits = InitList->inits(); - - // Unions initialized with an empty initializer list need special treatment. - // For structs/classes initialized with an empty initializer list, Clang - // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions, - // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves. - SmallVector InitsForUnion; - if (InitList->getType()->isUnionType() && Inits.empty()) { - assert(Fields.size() == 1); - ImplicitValueInitForUnion.emplace(Fields.front()->getType()); - InitsForUnion.push_back(&*ImplicitValueInitForUnion); - Inits = InitsForUnion; - } - - size_t InitIdx = 0; - - assert(Fields.size() + RD->getNumBases() == Inits.size()); - for (const CXXBaseSpecifier &Base : RD->bases()) { - assert(InitIdx < Inits.size()); - Expr *Init = Inits[InitIdx++]; - BaseInits.emplace_back(&Base, Init); - } - - assert(Fields.size() == Inits.size() - InitIdx); - for (const FieldDecl *Field : Fields) { - assert(InitIdx < Inits.size()); - Expr *Init = Inits[InitIdx++]; - FieldInits.emplace_back(Field, Init); - } -} - RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) { auto &NewVal = Env.create(Loc); Env.setValue(Loc, NewVal); @@ -1216,24 +1263,11 @@ RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) { RecordValue &refreshRecordValue(const Expr &Expr, Environment &Env) { assert(Expr.getType()->isRecordType()); - if (Expr.isPRValue()) { - if (auto *ExistingVal = Env.get(Expr)) { - auto &NewVal = Env.create(ExistingVal->getLoc()); - Env.setValue(Expr, NewVal); - Env.setValue(NewVal.getLoc(), NewVal); - return NewVal; - } - - auto &NewVal = *cast(Env.createValue(Expr.getType())); - Env.setValue(Expr, NewVal); - return NewVal; - } + if (Expr.isPRValue()) + refreshRecordValue(Env.getResultObjectLocation(Expr), Env); - if (auto *Loc = Env.get(Expr)) { - auto &NewVal = Env.create(*Loc); - Env.setValue(*Loc, NewVal); - return NewVal; - } + if (auto *Loc = Env.get(Expr)) + refreshRecordValue(*Loc, Env); auto &NewVal = *cast(Env.createValue(Expr.getType())); Env.setStorageLocation(Expr, NewVal.getLoc()); diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp index 0a2e8368d541d..1e034771014ea 100644 --- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp +++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp @@ -20,7 +20,9 @@ #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtVisitor.h" +#include "clang/Analysis/FlowSensitive/ASTOps.h" #include "clang/Analysis/FlowSensitive/AdornedCFG.h" +#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h" #include "clang/Analysis/FlowSensitive/DataflowEnvironment.h" #include "clang/Analysis/FlowSensitive/NoopAnalysis.h" #include "clang/Analysis/FlowSensitive/RecordOps.h" @@ -460,11 +462,9 @@ class TransferVisitor : public ConstStmtVisitor { // So make sure we have a value if we didn't propagate one above. if (S->isPRValue() && S->getType()->isRecordType()) { if (Env.getValue(*S) == nullptr) { - Value *Val = Env.createValue(S->getType()); - // We're guaranteed to always be able to create a value for record - // types. - assert(Val != nullptr); - Env.setValue(*S, *Val); + auto &Loc = Env.getResultObjectLocation(*S); + Env.initializeFieldsWithValues(Loc); + refreshRecordValue(Loc, Env); } } } @@ -472,6 +472,13 @@ class TransferVisitor : public ConstStmtVisitor { void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) { const Expr *InitExpr = S->getExpr(); assert(InitExpr != nullptr); + + // If this is a prvalue of record type, the handler for `*InitExpr` (if one + // exists) will initialize the result object; there is no value to propgate + // here. + if (S->getType()->isRecordType() && S->isPRValue()) + return; + propagateValueOrStorageLocation(*InitExpr, *S, Env); } @@ -479,6 +486,17 @@ class TransferVisitor : public ConstStmtVisitor { const CXXConstructorDecl *ConstructorDecl = S->getConstructor(); assert(ConstructorDecl != nullptr); + // `CXXConstructExpr` can have array type if default-initializing an array + // of records. We don't handle this specifically beyond potentially inlining + // the call. + if (!S->getType()->isRecordType()) { + transferInlineCall(S, ConstructorDecl); + return; + } + + RecordStorageLocation &Loc = Env.getResultObjectLocation(*S); + Env.setValue(*S, refreshRecordValue(Loc, Env)); + if (ConstructorDecl->isCopyOrMoveConstructor()) { // It is permissible for a copy/move constructor to have additional // parameters as long as they have default arguments defined for them. @@ -491,24 +509,14 @@ class TransferVisitor : public ConstStmtVisitor { if (ArgLoc == nullptr) return; - if (S->isElidable()) { - if (Value *Val = Env.getValue(*ArgLoc)) - Env.setValue(*S, *Val); - } else { - auto &Val = *cast(Env.createValue(S->getType())); - Env.setValue(*S, Val); - copyRecord(*ArgLoc, Val.getLoc(), Env); - } + // Even if the copy/move constructor call is elidable, we choose to copy + // the record in all cases (which isn't wrong, just potentially not + // optimal). + copyRecord(*ArgLoc, Loc, Env); return; } - // `CXXConstructExpr` can have array type if default-initializing an array - // of records, and we currently can't create values for arrays. So check if - // we've got a record type. - if (S->getType()->isRecordType()) { - auto &InitialVal = *cast(Env.createValue(S->getType())); - Env.setValue(*S, InitialVal); - } + Env.initializeFieldsWithValues(Loc, S->getType()); transferInlineCall(S, ConstructorDecl); } @@ -551,19 +559,15 @@ class TransferVisitor : public ConstStmtVisitor { if (S->isGLValue()) { Env.setStorageLocation(*S, *LocDst); } else if (S->getType()->isRecordType()) { - // Make sure that we have a `RecordValue` for this expression so that - // `Environment::getResultObjectLocation()` is able to return a location - // for it. - if (Env.getValue(*S) == nullptr) - refreshRecordValue(*S, Env); + // Assume that the assignment returns the assigned value. + copyRecord(*LocDst, Env.getResultObjectLocation(*S), Env); } return; } - // CXXOperatorCallExpr can be prvalues. Call `VisitCallExpr`() to create - // a `RecordValue` for them so that `Environment::getResultObjectLocation()` - // can return a value. + // `CXXOperatorCallExpr` can be a prvalue. Call `VisitCallExpr`() to + // initialize the prvalue's fields with values. VisitCallExpr(S); } @@ -580,11 +584,6 @@ class TransferVisitor : public ConstStmtVisitor { } } - void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) { - if (Value *Val = Env.createValue(S->getType())) - Env.setValue(*S, *Val); - } - void VisitCallExpr(const CallExpr *S) { // Of clang's builtins, only `__builtin_expect` is handled explicitly, since // others (like trap, debugtrap, and unreachable) are handled by CFG @@ -612,13 +611,14 @@ class TransferVisitor : public ConstStmtVisitor { } else if (const FunctionDecl *F = S->getDirectCallee()) { transferInlineCall(S, F); - // If this call produces a prvalue of record type, make sure that we have - // a `RecordValue` for it. This is required so that - // `Environment::getResultObjectLocation()` is able to return a location - // for this `CallExpr`. + // If this call produces a prvalue of record type, initialize its fields + // with values. if (S->getType()->isRecordType() && S->isPRValue()) - if (Env.getValue(*S) == nullptr) - refreshRecordValue(*S, Env); + if (Env.getValue(*S) == nullptr) { + RecordStorageLocation &Loc = Env.getResultObjectLocation(*S); + Env.initializeFieldsWithValues(Loc); + Env.setValue(*S, refreshRecordValue(Loc, Env)); + } } } @@ -666,8 +666,10 @@ class TransferVisitor : public ConstStmtVisitor { // `getLogicOperatorSubExprValue()`. if (S->isGLValue()) Env.setStorageLocation(*S, Env.createObject(S->getType())); - else if (Value *Val = Env.createValue(S->getType())) - Env.setValue(*S, *Val); + else if (!S->getType()->isRecordType()) { + if (Value *Val = Env.createValue(S->getType())) + Env.setValue(*S, *Val); + } } void VisitInitListExpr(const InitListExpr *S) { @@ -688,71 +690,51 @@ class TransferVisitor : public ConstStmtVisitor { return; } - llvm::DenseMap FieldLocs; - RecordInitListHelper InitListHelper(S); + RecordStorageLocation &Loc = Env.getResultObjectLocation(*S); + Env.setValue(*S, refreshRecordValue(Loc, Env)); - for (auto [Base, Init] : InitListHelper.base_inits()) { - assert(Base->getType().getCanonicalType() == - Init->getType().getCanonicalType()); - auto *BaseVal = Env.get(*Init); - if (!BaseVal) - BaseVal = cast(Env.createValue(Init->getType())); - // Take ownership of the fields of the `RecordValue` for the base class - // and incorporate them into the "flattened" set of fields for the - // derived class. - auto Children = BaseVal->getLoc().children(); - FieldLocs.insert(Children.begin(), Children.end()); - } + // Initialization of base classes and fields of record type happens when we + // visit the nested `CXXConstructExpr` or `InitListExpr` for that base class + // or field. We therefore only need to deal with fields of non-record type + // here. - for (auto [Field, Init] : InitListHelper.field_inits()) { - assert( - // The types are same, or - Field->getType().getCanonicalType().getUnqualifiedType() == - Init->getType().getCanonicalType().getUnqualifiedType() || - // The field's type is T&, and initializer is T - (Field->getType()->isReferenceType() && - Field->getType().getCanonicalType()->getPointeeType() == - Init->getType().getCanonicalType())); - auto& Loc = Env.createObject(Field->getType(), Init); - FieldLocs.insert({Field, &Loc}); - } + RecordInitListHelper InitListHelper(S); - // In the case of a union, we don't in general have initializers for all - // of the fields. Create storage locations for the remaining fields (but - // don't associate them with values). - if (Type->isUnionType()) { - for (const FieldDecl *Field : - Env.getDataflowAnalysisContext().getModeledFields(Type)) { - if (auto [it, inserted] = FieldLocs.insert({Field, nullptr}); inserted) - it->second = &Env.createStorageLocation(Field->getType()); + for (auto [Field, Init] : InitListHelper.field_inits()) { + if (Field->getType()->isRecordType()) + continue; + if (Field->getType()->isReferenceType()) { + assert(Field->getType().getCanonicalType()->getPointeeType() == + Init->getType().getCanonicalType()); + Loc.setChild(*Field, &Env.createObject(Field->getType(), Init)); + continue; } + assert(Field->getType().getCanonicalType().getUnqualifiedType() == + Init->getType().getCanonicalType().getUnqualifiedType()); + StorageLocation *FieldLoc = Loc.getChild(*Field); + // Locations for non-reference fields must always be non-null. + assert(FieldLoc != nullptr); + Value *Val = Env.getValue(*Init); + if (Val == nullptr && isa(Init) && + Init->getType()->isPointerType()) + Val = + &Env.getOrCreateNullPointerValue(Init->getType()->getPointeeType()); + if (Val == nullptr) + Val = Env.createValue(Field->getType()); + if (Val != nullptr) + Env.setValue(*FieldLoc, *Val); } - // Check that we satisfy the invariant that a `RecordStorageLoation` - // contains exactly the set of modeled fields for that type. - // `ModeledFields` includes fields from all the bases, but only the - // modeled ones. However, if a class type is initialized with an - // `InitListExpr`, all fields in the class, including those from base - // classes, are included in the set of modeled fields. The code above - // should therefore populate exactly the modeled fields. - assert(containsSameFields( - Env.getDataflowAnalysisContext().getModeledFields(Type), FieldLocs)); - - RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs; - for (const auto &Entry : - Env.getDataflowAnalysisContext().getSyntheticFields(Type)) { - SyntheticFieldLocs.insert( - {Entry.getKey(), &Env.createObject(Entry.getValue())}); + for (const auto &[FieldName, FieldLoc] : Loc.synthetic_fields()) { + QualType FieldType = FieldLoc->getType(); + if (FieldType->isRecordType()) { + Env.initializeFieldsWithValues(*cast(FieldLoc)); + } else { + if (Value *Val = Env.createValue(FieldType)) + Env.setValue(*FieldLoc, *Val); + } } - auto &Loc = Env.getDataflowAnalysisContext().createRecordStorageLocation( - Type, std::move(FieldLocs), std::move(SyntheticFieldLocs)); - RecordValue &RecordVal = Env.create(Loc); - - Env.setValue(Loc, RecordVal); - - Env.setValue(*S, RecordVal); - // FIXME: Implement array initialization. } diff --git a/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp index 595f70f819ddb..1b73c5d683016 100644 --- a/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp +++ b/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp @@ -369,17 +369,10 @@ builtinTransferInitializer(const CFGInitializer &Elt, ParentLoc->setChild(*Member, InitExprLoc); } else if (auto *InitExprVal = Env.getValue(*InitExpr)) { assert(MemberLoc != nullptr); - if (Member->getType()->isRecordType()) { - auto *InitValStruct = cast(InitExprVal); - // FIXME: Rather than performing a copy here, we should really be - // initializing the field in place. This would require us to propagate the - // storage location of the field to the AST node that creates the - // `RecordValue`. - copyRecord(InitValStruct->getLoc(), - *cast(MemberLoc), Env); - } else { + // Record-type initializers construct themselves directly into the result + // object, so there is no need to handle them here. + if (!Member->getType()->isRecordType()) Env.setValue(*MemberLoc, *InitExprVal); - } } } diff --git a/clang/lib/Analysis/ObjCNoReturn.cpp b/clang/lib/Analysis/ObjCNoReturn.cpp index 9d7c365c3b992..9e651c29e085d 100644 --- a/clang/lib/Analysis/ObjCNoReturn.cpp +++ b/clang/lib/Analysis/ObjCNoReturn.cpp @@ -17,7 +17,8 @@ using namespace clang; -static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) { +static bool isSubclass(const ObjCInterfaceDecl *Class, + const IdentifierInfo *II) { if (!Class) return false; if (Class->getIdentifier() == II) @@ -30,7 +31,7 @@ ObjCNoReturn::ObjCNoReturn(ASTContext &C) NSExceptionII(&C.Idents.get("NSException")) { // Generate selectors. - SmallVector II; + SmallVector II; // raise:format: II.push_back(&C.Idents.get("raise")); diff --git a/clang/lib/Analysis/UninitializedValues.cpp b/clang/lib/Analysis/UninitializedValues.cpp index e9111ded64eb1..bf2f730618650 100644 --- a/clang/lib/Analysis/UninitializedValues.cpp +++ b/clang/lib/Analysis/UninitializedValues.cpp @@ -44,7 +44,7 @@ static bool recordIsNotEmpty(const RecordDecl *RD) { // We consider a record decl to be empty if it contains only unnamed bit- // fields, zero-width fields, and fields of empty record type. for (const auto *FD : RD->fields()) { - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; if (FD->isZeroSize(FD->getASTContext())) continue; diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp index e03fe1b683004..c42e70d5b95ac 100644 --- a/clang/lib/Analysis/UnsafeBufferUsage.cpp +++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp @@ -1114,7 +1114,7 @@ class UPCAddressofArraySubscriptGadget : public FixableGadget { virtual DeclUseList getClaimedVarUseSites() const override { const auto *ArraySubst = cast(Node->getSubExpr()); const auto *DRE = - cast(ArraySubst->getBase()->IgnoreImpCasts()); + cast(ArraySubst->getBase()->IgnoreParenImpCasts()); return {DRE}; } }; diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp index 1b1da6a1356f2..113483db5729b 100644 --- a/clang/lib/Basic/Cuda.cpp +++ b/clang/lib/Basic/Cuda.cpp @@ -86,7 +86,7 @@ static const CudaArchToStringMap arch_names[] = { // clang-format off {CudaArch::UNUSED, "", ""}, SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi - SM(30), SM(32), SM(35), SM(37), // Kepler + SM(30), {CudaArch::SM_32_, "sm_32", "compute_32"}, SM(35), SM(37), // Kepler SM(50), SM(52), SM(53), // Maxwell SM(60), SM(61), SM(62), // Pascal SM(70), SM(72), // Volta @@ -186,7 +186,7 @@ CudaVersion MinVersionForCudaArch(CudaArch A) { case CudaArch::SM_20: case CudaArch::SM_21: case CudaArch::SM_30: - case CudaArch::SM_32: + case CudaArch::SM_32_: case CudaArch::SM_35: case CudaArch::SM_37: case CudaArch::SM_50: @@ -231,7 +231,7 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) { case CudaArch::SM_21: return CudaVersion::CUDA_80; case CudaArch::SM_30: - case CudaArch::SM_32: + case CudaArch::SM_32_: return CudaVersion::CUDA_102; case CudaArch::SM_35: case CudaArch::SM_37: diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp index 46f3e0d3aabc1..fca20375aeb5c 100644 --- a/clang/lib/Basic/IdentifierTable.cpp +++ b/clang/lib/Basic/IdentifierTable.cpp @@ -541,7 +541,8 @@ unsigned Selector::getNumArgs() const { return SI->getNumArgs(); } -IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const { +const IdentifierInfo * +Selector::getIdentifierInfoForSlot(unsigned argIndex) const { if (getIdentifierInfoFlag() < MultiArg) { assert(argIndex == 0 && "illegal keyword index"); return getAsIdentifierInfo(); @@ -553,7 +554,7 @@ IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const { } StringRef Selector::getNameForSlot(unsigned int argIndex) const { - IdentifierInfo *II = getIdentifierInfoForSlot(argIndex); + const IdentifierInfo *II = getIdentifierInfoForSlot(argIndex); return II ? II->getName() : StringRef(); } @@ -574,7 +575,7 @@ std::string Selector::getAsString() const { return ""; if (getIdentifierInfoFlag() < MultiArg) { - IdentifierInfo *II = getAsIdentifierInfo(); + const IdentifierInfo *II = getAsIdentifierInfo(); if (getNumArgs() == 0) { assert(II && "If the number of arguments is 0 then II is guaranteed to " @@ -608,7 +609,7 @@ static bool startsWithWord(StringRef name, StringRef word) { } ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) { - IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); if (!first) return OMF_None; StringRef name = first->getName(); @@ -655,7 +656,7 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) { } ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) { - IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); if (!first) return OIT_None; StringRef name = first->getName(); @@ -683,7 +684,7 @@ ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) { } ObjCStringFormatFamily Selector::getStringFormatFamilyImpl(Selector sel) { - IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *first = sel.getIdentifierInfoForSlot(0); if (!first) return SFF_None; StringRef name = first->getName(); @@ -750,7 +751,8 @@ size_t SelectorTable::getTotalMemory() const { return SelTabImpl.Allocator.getTotalMemory(); } -Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) { +Selector SelectorTable::getSelector(unsigned nKeys, + const IdentifierInfo **IIV) { if (nKeys < 2) return Selector(IIV[0], nKeys); diff --git a/clang/lib/Basic/Module.cpp b/clang/lib/Basic/Module.cpp index 256365d66bb90..bb212cde87882 100644 --- a/clang/lib/Basic/Module.cpp +++ b/clang/lib/Basic/Module.cpp @@ -305,6 +305,10 @@ bool Module::directlyUses(const Module *Requested) { if (Requested->fullModuleNameIs({"_Builtin_stddef", "max_align_t"}) || Requested->fullModuleNameIs({"_Builtin_stddef_wint_t"})) return true; + // Darwin is allowed is to use our builtin 'ptrauth.h' and its accompanying + // module. + if (!Requested->Parent && Requested->Name == "ptrauth") + return true; if (NoUndeclaredIncludes) UndeclaredUses.insert(Requested); diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp index cf8eedffe6966..acdc5d9daadd5 100644 --- a/clang/lib/Basic/Targets/NVPTX.cpp +++ b/clang/lib/Basic/Targets/NVPTX.cpp @@ -243,7 +243,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts, return "210"; case CudaArch::SM_30: return "300"; - case CudaArch::SM_32: + case CudaArch::SM_32_: return "320"; case CudaArch::SM_35: return "350"; diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index f3d705e1551fe..a7ce9dda34bdd 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -353,7 +353,8 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector &Features, if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx")) HasLegalHalfType = true; - FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access"); + FastUnalignedAccess = llvm::is_contained(Features, "+unaligned-scalar-mem") && + llvm::is_contained(Features, "+unaligned-vector-mem"); if (llvm::is_contained(Features, "+experimental")) HasExperimental = true; diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h index 8b1a00776b617..f871b19a382f6 100644 --- a/clang/lib/Basic/Targets/SPIR.h +++ b/clang/lib/Basic/Targets/SPIR.h @@ -426,7 +426,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRVTargetInfo : public BaseSPIRVTargetInfo { // SPIR-V IDs are represented with a single 32-bit word. SizeType = TargetInfo::UnsignedInt; resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-" - "v96:128-v192:256-v256:256-v512:512-v1024:1024"); + "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1"); } void getTargetDefines(const LangOptions &Opts, @@ -447,7 +447,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRV32TargetInfo : public BaseSPIRVTargetInfo { SizeType = TargetInfo::UnsignedInt; PtrDiffType = IntPtrType = TargetInfo::SignedInt; resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-" - "v96:128-v192:256-v256:256-v512:512-v1024:1024"); + "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1"); } void getTargetDefines(const LangOptions &Opts, @@ -468,7 +468,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRV64TargetInfo : public BaseSPIRVTargetInfo { SizeType = TargetInfo::UnsignedLong; PtrDiffType = IntPtrType = TargetInfo::SignedLong; resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-" - "v96:128-v192:256-v256:256-v512:512-v1024:1024"); + "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1"); } void getTargetDefines(const LangOptions &Opts, diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp index 1966af17904d6..bf1767c87fe1c 100644 --- a/clang/lib/Basic/Targets/X86.cpp +++ b/clang/lib/Basic/Targets/X86.cpp @@ -954,6 +954,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__CCMP__"); if (HasCF) Builder.defineMacro("__CF__"); + // Condition here is aligned with the feature set of mapxf in Options.td + if (HasEGPR && HasPush2Pop2 && HasPPX && HasNDD) + Builder.defineMacro("__APX_F__"); // Each case falls through to the previous one here. switch (SSELevel) { diff --git a/clang/lib/CIR/CMakeLists.txt b/clang/lib/CIR/CMakeLists.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/lib/CMakeLists.txt b/clang/lib/CMakeLists.txt index 0cac86451f39e..14ba55360fe05 100644 --- a/clang/lib/CMakeLists.txt +++ b/clang/lib/CMakeLists.txt @@ -31,3 +31,7 @@ if(CLANG_INCLUDE_TESTS) endif() add_subdirectory(Interpreter) add_subdirectory(Support) + +if(CLANG_ENABLE_CIR) + add_subdirectory(CIR) +endif() diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp index dd59101ecc81b..1e4d48e13c025 100644 --- a/clang/lib/CodeGen/ABIInfoImpl.cpp +++ b/clang/lib/CodeGen/ABIInfoImpl.cpp @@ -187,7 +187,7 @@ CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); Address NextPtr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); - CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); + CGF.Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr); // If the argument is smaller than a slot, and this is a big-endian // target, the argument will be right-adjusted in its slot. @@ -239,15 +239,15 @@ Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1, const llvm::Twine &Name) { assert(Addr1.getType() == Addr2.getType()); llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); - PHI->addIncoming(Addr1.getPointer(), Block1); - PHI->addIncoming(Addr2.getPointer(), Block2); + PHI->addIncoming(Addr1.emitRawPointer(CGF), Block1); + PHI->addIncoming(Addr2.emitRawPointer(CGF), Block2); CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); return Address(PHI, Addr1.getElementType(), Align); } bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr) { - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) return true; QualType FT = FD->getType(); @@ -400,7 +400,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty); llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy); llvm::Value *Addr = - CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); + CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF), BaseTy); return Address(Addr, ElementTy, TyAlignForABI); } else { assert((AI.isDirect() || AI.isExtend()) && @@ -416,7 +416,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); Address Temp = CGF.CreateMemTemp(Ty, "varet"); - Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), + Val = CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF), CGF.ConvertTypeForMem(Ty)); CGF.Builder.CreateStore(Val, Temp); return Temp; diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h index cf48df8f5e736..35ec370a139c9 100644 --- a/clang/lib/CodeGen/Address.h +++ b/clang/lib/CodeGen/Address.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H #include "clang/AST/CharUnits.h" +#include "clang/AST/Type.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/IR/Constants.h" #include "llvm/Support/MathExtras.h" @@ -22,28 +23,41 @@ namespace clang { namespace CodeGen { +class Address; +class CGBuilderTy; +class CodeGenFunction; +class CodeGenModule; + // Indicates whether a pointer is known not to be null. enum KnownNonNull_t { NotKnownNonNull, KnownNonNull }; -/// An aligned address. -class Address { +/// An abstract representation of an aligned address. This is designed to be an +/// IR-level abstraction, carrying just the information necessary to perform IR +/// operations on an address like loads and stores. In particular, it doesn't +/// carry C type information or allow the representation of things like +/// bit-fields; clients working at that level should generally be using +/// `LValue`. +/// The pointer contained in this class is known to be unsigned. +class RawAddress { llvm::PointerIntPair PointerAndKnownNonNull; llvm::Type *ElementType; CharUnits Alignment; protected: - Address(std::nullptr_t) : ElementType(nullptr) {} + RawAddress(std::nullptr_t) : ElementType(nullptr) {} public: - Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment, - KnownNonNull_t IsKnownNonNull = NotKnownNonNull) + RawAddress(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) : PointerAndKnownNonNull(Pointer, IsKnownNonNull), ElementType(ElementType), Alignment(Alignment) { assert(Pointer != nullptr && "Pointer cannot be null"); assert(ElementType != nullptr && "Element type cannot be null"); } - static Address invalid() { return Address(nullptr); } + inline RawAddress(Address Addr); + + static RawAddress invalid() { return RawAddress(nullptr); } bool isValid() const { return PointerAndKnownNonNull.getPointer() != nullptr; } @@ -80,6 +94,133 @@ class Address { return Alignment; } + /// Return address with different element type, but same pointer and + /// alignment. + RawAddress withElementType(llvm::Type *ElemTy) const { + return RawAddress(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); + } + + KnownNonNull_t isKnownNonNull() const { + assert(isValid()); + return (KnownNonNull_t)PointerAndKnownNonNull.getInt(); + } +}; + +/// Like RawAddress, an abstract representation of an aligned address, but the +/// pointer contained in this class is possibly signed. +class Address { + friend class CGBuilderTy; + + // The boolean flag indicates whether the pointer is known to be non-null. + llvm::PointerIntPair Pointer; + + /// The expected IR type of the pointer. Carrying accurate element type + /// information in Address makes it more convenient to work with Address + /// values and allows frontend assertions to catch simple mistakes. + llvm::Type *ElementType = nullptr; + + CharUnits Alignment; + + /// Offset from the base pointer. + llvm::Value *Offset = nullptr; + + llvm::Value *emitRawPointerSlow(CodeGenFunction &CGF) const; + +protected: + Address(std::nullptr_t) : ElementType(nullptr) {} + +public: + Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) + : Pointer(pointer, IsKnownNonNull), ElementType(elementType), + Alignment(alignment) { + assert(pointer != nullptr && "Pointer cannot be null"); + assert(elementType != nullptr && "Element type cannot be null"); + assert(!alignment.isZero() && "Alignment cannot be zero"); + } + + Address(llvm::Value *BasePtr, llvm::Type *ElementType, CharUnits Alignment, + llvm::Value *Offset, KnownNonNull_t IsKnownNonNull = NotKnownNonNull) + : Pointer(BasePtr, IsKnownNonNull), ElementType(ElementType), + Alignment(Alignment), Offset(Offset) {} + + Address(RawAddress RawAddr) + : Pointer(RawAddr.isValid() ? RawAddr.getPointer() : nullptr), + ElementType(RawAddr.isValid() ? RawAddr.getElementType() : nullptr), + Alignment(RawAddr.isValid() ? RawAddr.getAlignment() + : CharUnits::Zero()) {} + + static Address invalid() { return Address(nullptr); } + bool isValid() const { return Pointer.getPointer() != nullptr; } + + /// This function is used in situations where the caller is doing some sort of + /// opaque "laundering" of the pointer. + void replaceBasePointer(llvm::Value *P) { + assert(isValid() && "pointer isn't valid"); + assert(P->getType() == Pointer.getPointer()->getType() && + "Pointer's type changed"); + Pointer.setPointer(P); + assert(isValid() && "pointer is invalid after replacement"); + } + + CharUnits getAlignment() const { return Alignment; } + + void setAlignment(CharUnits Value) { Alignment = Value; } + + llvm::Value *getBasePointer() const { + assert(isValid() && "pointer isn't valid"); + return Pointer.getPointer(); + } + + /// Return the type of the pointer value. + llvm::PointerType *getType() const { + return llvm::PointerType::get( + ElementType, + llvm::cast(Pointer.getPointer()->getType()) + ->getAddressSpace()); + } + + /// Return the type of the values stored in this address. + llvm::Type *getElementType() const { + assert(isValid()); + return ElementType; + } + + /// Return the address space that this address resides in. + unsigned getAddressSpace() const { return getType()->getAddressSpace(); } + + /// Return the IR name of the pointer value. + llvm::StringRef getName() const { return Pointer.getPointer()->getName(); } + + // This function is called only in CGBuilderBaseTy::CreateElementBitCast. + void setElementType(llvm::Type *Ty) { + assert(hasOffset() && + "this funcion shouldn't be called when there is no offset"); + ElementType = Ty; + } + + /// Whether the pointer is known not to be null. + KnownNonNull_t isKnownNonNull() const { + assert(isValid()); + return (KnownNonNull_t)Pointer.getInt(); + } + + Address setKnownNonNull() { + assert(isValid()); + Pointer.setInt(KnownNonNull); + return *this; + } + + bool hasOffset() const { return Offset; } + + llvm::Value *getOffset() const { return Offset; } + + /// Return the pointer contained in this class after authenticating it and + /// adding offset to it if necessary. + llvm::Value *emitRawPointer(CodeGenFunction &CGF) const { + return getBasePointer(); + } + /// Return address with different pointer, but same element type and /// alignment. Address withPointer(llvm::Value *NewPointer, @@ -91,61 +232,59 @@ class Address { /// Return address with different alignment, but same pointer and element /// type. Address withAlignment(CharUnits NewAlignment) const { - return Address(getPointer(), getElementType(), NewAlignment, + return Address(Pointer.getPointer(), getElementType(), NewAlignment, isKnownNonNull()); } /// Return address with different element type, but same pointer and /// alignment. Address withElementType(llvm::Type *ElemTy) const { - return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull()); - } - - /// Whether the pointer is known not to be null. - KnownNonNull_t isKnownNonNull() const { - assert(isValid()); - return (KnownNonNull_t)PointerAndKnownNonNull.getInt(); - } - - /// Set the non-null bit. - Address setKnownNonNull() { - assert(isValid()); - PointerAndKnownNonNull.setInt(true); - return *this; + if (!hasOffset()) + return Address(getBasePointer(), ElemTy, getAlignment(), nullptr, + isKnownNonNull()); + Address A(*this); + A.ElementType = ElemTy; + return A; } }; +inline RawAddress::RawAddress(Address Addr) + : PointerAndKnownNonNull(Addr.isValid() ? Addr.getBasePointer() : nullptr, + Addr.isValid() ? Addr.isKnownNonNull() + : NotKnownNonNull), + ElementType(Addr.isValid() ? Addr.getElementType() : nullptr), + Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {} + /// A specialization of Address that requires the address to be an /// LLVM Constant. -class ConstantAddress : public Address { - ConstantAddress(std::nullptr_t) : Address(nullptr) {} +class ConstantAddress : public RawAddress { + ConstantAddress(std::nullptr_t) : RawAddress(nullptr) {} public: ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType, CharUnits alignment) - : Address(pointer, elementType, alignment) {} + : RawAddress(pointer, elementType, alignment) {} static ConstantAddress invalid() { return ConstantAddress(nullptr); } llvm::Constant *getPointer() const { - return llvm::cast(Address::getPointer()); + return llvm::cast(RawAddress::getPointer()); } ConstantAddress withElementType(llvm::Type *ElemTy) const { return ConstantAddress(getPointer(), ElemTy, getAlignment()); } - static bool isaImpl(Address addr) { + static bool isaImpl(RawAddress addr) { return llvm::isa(addr.getPointer()); } - static ConstantAddress castImpl(Address addr) { + static ConstantAddress castImpl(RawAddress addr) { return ConstantAddress(llvm::cast(addr.getPointer()), addr.getElementType(), addr.getAlignment()); } }; - } // Present a minimal LLVM-like casting interface. diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 6dc0719b78121..5e2f90f74c81f 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -80,7 +80,7 @@ namespace { AtomicSizeInBits = C.toBits( C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) .alignTo(lvalue.getAlignment())); - llvm::Value *BitFieldPtr = lvalue.getBitFieldPointer(); + llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF); auto OffsetInChars = (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * lvalue.getAlignment(); @@ -140,13 +140,13 @@ namespace { const LValue &getAtomicLValue() const { return LVal; } llvm::Value *getAtomicPointer() const { if (LVal.isSimple()) - return LVal.getPointer(CGF); + return LVal.emitRawPointer(CGF); else if (LVal.isBitField()) - return LVal.getBitFieldPointer(); + return LVal.getRawBitFieldPointer(CGF); else if (LVal.isVectorElt()) - return LVal.getVectorPointer(); + return LVal.getRawVectorPointer(CGF); assert(LVal.isExtVectorElt()); - return LVal.getExtVectorPointer(); + return LVal.getRawExtVectorPointer(CGF); } Address getAtomicAddress() const { llvm::Type *ElTy; @@ -369,7 +369,7 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const { return false; CGF.Builder.CreateMemSet( - addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0), + addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0), CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), LVal.getAlignment().getAsAlign()); return true; @@ -1056,7 +1056,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { return getTargetHooks().performAddrSpaceCast( *this, V, AS, LangAS::opencl_generic, DestType, false); }; - Args.add(RValue::get(CastToGenericAddrSpace(Ptr.getPointer(), + + Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this), E->getPtr()->getType())), getContext().VoidPtrTy); @@ -1087,10 +1088,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { LibCallName = "__atomic_compare_exchange"; RetTy = getContext().BoolTy; HaveRetTy = true; - Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(), + Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), E->getVal1()->getType())), getContext().VoidPtrTy); - Args.add(RValue::get(CastToGenericAddrSpace(Val2.getPointer(), + Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this), E->getVal2()->getType())), getContext().VoidPtrTy); Args.add(RValue::get(Order), getContext().IntTy); @@ -1106,7 +1107,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__scoped_atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange_n: LibCallName = "__atomic_exchange"; - Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(), + Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), E->getVal1()->getType())), getContext().VoidPtrTy); break; @@ -1121,7 +1122,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { LibCallName = "__atomic_store"; RetTy = getContext().VoidTy; HaveRetTy = true; - Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(), + Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), E->getVal1()->getType())), getContext().VoidPtrTy); break; @@ -1200,7 +1201,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { if (!HaveRetTy) { // Value is returned through parameter before the order. RetTy = getContext().VoidTy; - Args.add(RValue::get(CastToGenericAddrSpace(Dest.getPointer(), RetTy)), + Args.add(RValue::get( + CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)), getContext().VoidPtrTy); } // Order is always the last parameter. @@ -1526,7 +1528,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, } else TempAddr = CreateTempAlloca(); - EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile); + EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile); // Okay, turn that back into the original value or whole atomic (for // non-simple lvalues) type. @@ -1682,9 +1684,9 @@ std::pair AtomicInfo::EmitAtomicCompareExchange( if (shouldUseLibcall()) { // Produce a source address. Address ExpectedAddr = materializeRValue(Expected); - Address DesiredAddr = materializeRValue(Desired); - auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), - DesiredAddr.getPointer(), + llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); + llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF); + auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, Success, Failure); return std::make_pair( convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(), @@ -1767,7 +1769,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall( Address ExpectedAddr = CreateTempAlloca(); - EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile); + EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile); auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); CGF.EmitBlock(ContBB); @@ -1781,10 +1783,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall( AggValueSlot::ignored(), SourceLocation(), /*AsValue=*/false); EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr); + llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); + llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF); auto *Res = - EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), - DesiredAddr.getPointer(), - AO, Failure); + EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure); CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } @@ -1805,7 +1807,11 @@ void AtomicInfo::EmitAtomicUpdateOp( /*NumReservedValues=*/2); PHI->addIncoming(OldVal, CurBB); Address NewAtomicAddr = CreateTempAlloca(); - Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr); + Address NewAtomicIntAddr = + shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true) + ? castToAtomicIntPointer(NewAtomicAddr) + : NewAtomicAddr; + if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || requiresMemSetZero(getAtomicAddress().getElementType())) { CGF.Builder.CreateStore(PHI, NewAtomicIntAddr); @@ -1854,7 +1860,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, Address ExpectedAddr = CreateTempAlloca(); - EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile); + EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile); auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); CGF.EmitBlock(ContBB); @@ -1865,10 +1871,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, CGF.Builder.CreateStore(OldVal, DesiredAddr); } EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr); + llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); + llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF); auto *Res = - EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), - DesiredAddr.getPointer(), - AO, Failure); + EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure); CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } @@ -1968,7 +1974,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy); - args.add(RValue::get(srcAddr.getPointer()), getContext().VoidPtrTy); + args.add(RValue::get(srcAddr.emitRawPointer(*this)), + getContext().VoidPtrTy); args.add( RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))), getContext().IntTy); diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index 7f7ee00a09bd8..1413cc1c0beba 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -36,7 +36,8 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name) : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false), NoEscape(false), HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false), CapturesNonExternalType(false), - LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) { + LocalAddress(RawAddress::invalid()), StructureType(nullptr), + Block(block) { // Skip asm prefix, if any. 'name' is usually taken directly from // the mangled name of the enclosing function. @@ -794,7 +795,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // Otherwise, we have to emit this as a local block. - Address blockAddr = blockInfo.LocalAddress; + RawAddress blockAddr = blockInfo.LocalAddress; assert(blockAddr.isValid() && "block has no address!"); llvm::Constant *isa; @@ -939,7 +940,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { if (CI.isNested()) byrefPointer = Builder.CreateLoad(src, "byref.capture"); else - byrefPointer = src.getPointer(); + byrefPointer = src.emitRawPointer(*this); // Write that void* into the capture field. Builder.CreateStore(byrefPointer, blockField); @@ -962,9 +963,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // If it's a reference variable, copy the reference into the block field. } else if (type->getAs()) { - Builder.CreateStore(src.getPointer(), blockField); + Builder.CreateStore(src.emitRawPointer(*this), blockField); - // If type is const-qualified, copy the value into the block field. + // If type is const-qualified, copy the value into the block field. } else if (type.isConstQualified() && type.getObjCLifetime() == Qualifiers::OCL_Strong && CGM.getCodeGenOpts().OptimizationLevel != 0) { @@ -1378,7 +1379,7 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D, // Allocate a stack slot like for any local variable to guarantee optimal // debug info at -O0. The mem2reg pass will eliminate it when optimizing. - Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr"); + RawAddress alloc = CreateMemTemp(D->getType(), D->getName() + ".addr"); Builder.CreateStore(arg, alloc); if (CGDebugInfo *DI = getDebugInfo()) { if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { @@ -1447,7 +1448,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction( selfTy = getContext().getPointerType(getContext().getAddrSpaceQualType( getContext().VoidTy, LangAS::opencl_generic)); - IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); + const IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); ImplicitParamDecl SelfDecl(getContext(), const_cast(blockDecl), SourceLocation(), II, selfTy, @@ -1498,7 +1499,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction( // frame setup instruction by llvm::DwarfDebug::beginFunction(). auto NL = ApplyDebugLocation::CreateEmpty(*this); Builder.CreateStore(BlockPointer, Alloca); - BlockPointerDbgLoc = Alloca.getPointer(); + BlockPointerDbgLoc = Alloca.emitRawPointer(*this); } // If we have a C++ 'this' reference, go ahead and force it into @@ -1558,8 +1559,8 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction( const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (capture.isConstant()) { auto addr = LocalDeclMap.find(variable)->second; - (void)DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(), - Builder); + (void)DI->EmitDeclareOfAutoVariable( + variable, addr.emitRawPointer(*this), Builder); continue; } @@ -1663,7 +1664,7 @@ struct CallBlockRelease final : EHScopeStack::Cleanup { if (LoadBlockVarAddr) { BlockVarAddr = CGF.Builder.CreateLoad(Addr); } else { - BlockVarAddr = Addr.getPointer(); + BlockVarAddr = Addr.emitRawPointer(CGF); } CGF.BuildBlockRelease(BlockVarAddr, FieldFlags, CanThrow); @@ -1963,13 +1964,15 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { // it. It's not quite worth the annoyance to avoid creating it in the // first place. if (!needsEHCleanup(captureType.isDestructedType())) - cast(dstField.getPointer())->eraseFromParent(); + if (auto *I = + cast_or_null(dstField.getBasePointer())) + I->eraseFromParent(); } break; } case BlockCaptureEntityKind::BlockObject: { llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src"); - llvm::Value *dstAddr = dstField.getPointer(); + llvm::Value *dstAddr = dstField.emitRawPointer(*this); llvm::Value *args[] = { dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask()) }; @@ -2140,7 +2143,7 @@ class ObjectByrefHelpers final : public BlockByrefHelpers { llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags); llvm::FunctionCallee fn = CGF.CGM.getBlockObjectAssign(); - llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal }; + llvm::Value *args[] = {destField.emitRawPointer(CGF), srcValue, flagsVal}; CGF.EmitNounwindRuntimeCall(fn, args); } @@ -2697,7 +2700,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { storeHeaderField(V, getPointerSize(), "byref.isa"); // Store the address of the variable into its own forwarding pointer. - storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding"); + storeHeaderField(addr.emitRawPointer(*this), getPointerSize(), + "byref.forwarding"); // Blocks ABI: // c) the flags field is set to either 0 if no helper functions are @@ -2788,7 +2792,7 @@ static void configureBlocksRuntimeObject(CodeGenModule &CGM, auto *GV = cast(C->stripPointerCasts()); if (CGM.getTarget().getTriple().isOSBinFormatCOFF()) { - IdentifierInfo &II = CGM.getContext().Idents.get(C->getName()); + const IdentifierInfo &II = CGM.getContext().Idents.get(C->getName()); TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl(); DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl); diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h index 4ef1ae9f33655..8d10c4f69b202 100644 --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -271,7 +271,8 @@ class CGBlockInfo { /// The block's captures. Non-constant captures are sorted by their offsets. llvm::SmallVector SortedCaptures; - Address LocalAddress; + // Currently we assume that block-pointer types are never signed. + RawAddress LocalAddress; llvm::StructType *StructureType; const BlockDecl *Block; const BlockExpr *BlockExpression; diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h index bf5ab171d720d..6dd9da7c4cade 100644 --- a/clang/lib/CodeGen/CGBuilder.h +++ b/clang/lib/CodeGen/CGBuilder.h @@ -10,7 +10,9 @@ #define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H #include "Address.h" +#include "CGValue.h" #include "CodeGenTypeCache.h" +#include "llvm/Analysis/Utils/Local.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Type.h" @@ -18,12 +20,15 @@ namespace clang { namespace CodeGen { +class CGBuilderTy; class CodeGenFunction; /// This is an IRBuilder insertion helper that forwards to /// CodeGenFunction::InsertHelper, which adds necessary metadata to /// instructions. class CGBuilderInserter final : public llvm::IRBuilderDefaultInserter { + friend CGBuilderTy; + public: CGBuilderInserter() = default; explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {} @@ -43,10 +48,42 @@ typedef llvm::IRBuilder CGBuilderBaseTy; class CGBuilderTy : public CGBuilderBaseTy { + friend class Address; + /// Storing a reference to the type cache here makes it a lot easier /// to build natural-feeling, target-specific IR. const CodeGenTypeCache &TypeCache; + CodeGenFunction *getCGF() const { return getInserter().CGF; } + + llvm::Value *emitRawPointerFromAddress(Address Addr) const { + return Addr.getBasePointer(); + } + + template + Address createConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, + const llvm::Twine &Name) { + const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + llvm::GetElementPtrInst *GEP; + if (IsInBounds) + GEP = cast(CreateConstInBoundsGEP2_32( + Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1, + Name)); + else + GEP = cast(CreateConstGEP2_32( + Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1, + Name)); + llvm::APInt Offset( + DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0, + /*isSigned=*/true); + if (!GEP->accumulateConstantOffset(DL, Offset)) + llvm_unreachable("offset of GEP with constants is always computable"); + return Address(GEP, GEP->getResultElementType(), + Addr.getAlignment().alignmentAtOffset( + CharUnits::fromQuantity(Offset.getSExtValue())), + IsInBounds ? Addr.isKnownNonNull() : NotKnownNonNull); + } + public: CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C) : CGBuilderBaseTy(C), TypeCache(TypeCache) {} @@ -69,20 +106,22 @@ class CGBuilderTy : public CGBuilderBaseTy { // Note that we intentionally hide the CreateLoad APIs that don't // take an alignment. llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") { - return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(), + return CreateAlignedLoad(Addr.getElementType(), + emitRawPointerFromAddress(Addr), Addr.getAlignment().getAsAlign(), Name); } llvm::LoadInst *CreateLoad(Address Addr, const char *Name) { // This overload is required to prevent string literals from // ending up in the IsVolatile overload. - return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(), + return CreateAlignedLoad(Addr.getElementType(), + emitRawPointerFromAddress(Addr), Addr.getAlignment().getAsAlign(), Name); } llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile, const llvm::Twine &Name = "") { - return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(), - Addr.getAlignment().getAsAlign(), IsVolatile, - Name); + return CreateAlignedLoad( + Addr.getElementType(), emitRawPointerFromAddress(Addr), + Addr.getAlignment().getAsAlign(), IsVolatile, Name); } using CGBuilderBaseTy::CreateAlignedLoad; @@ -96,7 +135,7 @@ class CGBuilderTy : public CGBuilderBaseTy { // take an alignment. llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile = false) { - return CreateAlignedStore(Val, Addr.getPointer(), + return CreateAlignedStore(Val, emitRawPointerFromAddress(Addr), Addr.getAlignment().getAsAlign(), IsVolatile); } @@ -132,33 +171,41 @@ class CGBuilderTy : public CGBuilderBaseTy { llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID = llvm::SyncScope::System) { return CGBuilderBaseTy::CreateAtomicCmpXchg( - Addr.getPointer(), Cmp, New, Addr.getAlignment().getAsAlign(), - SuccessOrdering, FailureOrdering, SSID); + Addr.emitRawPointer(*getCGF()), Cmp, New, + Addr.getAlignment().getAsAlign(), SuccessOrdering, FailureOrdering, + SSID); } llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID = llvm::SyncScope::System) { - return CGBuilderBaseTy::CreateAtomicRMW(Op, Addr.getPointer(), Val, - Addr.getAlignment().getAsAlign(), - Ordering, SSID); + return CGBuilderBaseTy::CreateAtomicRMW( + Op, Addr.emitRawPointer(*getCGF()), Val, + Addr.getAlignment().getAsAlign(), Ordering, SSID); } using CGBuilderBaseTy::CreateAddrSpaceCast; Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, + llvm::Type *ElementTy, const llvm::Twine &Name = "") { - return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name), - Addr.isKnownNonNull()); + if (!Addr.hasOffset()) + return Address(CreateAddrSpaceCast(Addr.getBasePointer(), Ty, Name), + ElementTy, Addr.getAlignment(), nullptr, + Addr.isKnownNonNull()); + // Eagerly force a raw address if these is an offset. + return RawAddress( + CreateAddrSpaceCast(Addr.emitRawPointer(*getCGF()), Ty, Name), + ElementTy, Addr.getAlignment(), Addr.isKnownNonNull()); } using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast; Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name = "") { - llvm::Value *Ptr = - CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name); - return Address(Ptr, ElementTy, Addr.getAlignment(), Addr.isKnownNonNull()); + if (Addr.getType()->getAddressSpace() == Ty->getPointerAddressSpace()) + return Addr.withElementType(ElementTy); + return CreateAddrSpaceCast(Addr, Ty, ElementTy, Name); } /// Given @@ -176,10 +223,11 @@ class CGBuilderTy : public CGBuilderBaseTy { const llvm::StructLayout *Layout = DL.getStructLayout(ElTy); auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index)); - return Address( - CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name), - ElTy->getElementType(Index), - Addr.getAlignment().alignmentAtOffset(Offset), Addr.isKnownNonNull()); + return Address(CreateStructGEP(Addr.getElementType(), Addr.getBasePointer(), + Index, Name), + ElTy->getElementType(Index), + Addr.getAlignment().alignmentAtOffset(Offset), + Addr.isKnownNonNull()); } /// Given @@ -198,7 +246,7 @@ class CGBuilderTy : public CGBuilderBaseTy { CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy->getElementType())); return Address( - CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(), + CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(), {getSize(CharUnits::Zero()), getSize(Index)}, Name), ElTy->getElementType(), Addr.getAlignment().alignmentAtOffset(Index * EltSize), @@ -216,10 +264,10 @@ class CGBuilderTy : public CGBuilderBaseTy { const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy)); - return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(), - getSize(Index), Name), - ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize), - Addr.isKnownNonNull()); + return Address( + CreateInBoundsGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name), + ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize), + Addr.isKnownNonNull()); } /// Given @@ -229,110 +277,133 @@ class CGBuilderTy : public CGBuilderBaseTy { /// where i64 is actually the target word size. Address CreateConstGEP(Address Addr, uint64_t Index, const llvm::Twine &Name = "") { + llvm::Type *ElTy = Addr.getElementType(); const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); - CharUnits EltSize = - CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType())); + CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy)); - return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(), - getSize(Index), Name), + return Address(CreateGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name), Addr.getElementType(), - Addr.getAlignment().alignmentAtOffset(Index * EltSize), - NotKnownNonNull); + Addr.getAlignment().alignmentAtOffset(Index * EltSize)); } /// Create GEP with single dynamic index. The address alignment is reduced /// according to the element size. using CGBuilderBaseTy::CreateGEP; - Address CreateGEP(Address Addr, llvm::Value *Index, + Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name = "") { const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType())); return Address( - CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name), + CreateGEP(Addr.getElementType(), Addr.emitRawPointer(CGF), Index, Name), Addr.getElementType(), - Addr.getAlignment().alignmentOfArrayElement(EltSize), NotKnownNonNull); + Addr.getAlignment().alignmentOfArrayElement(EltSize)); } /// Given a pointer to i8, adjust it by a given constant offset. Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name = "") { assert(Addr.getElementType() == TypeCache.Int8Ty); - return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(), - getSize(Offset), Name), - Addr.getElementType(), - Addr.getAlignment().alignmentAtOffset(Offset), - Addr.isKnownNonNull()); + return Address( + CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(), + getSize(Offset), Name), + Addr.getElementType(), Addr.getAlignment().alignmentAtOffset(Offset), + Addr.isKnownNonNull()); } + Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name = "") { assert(Addr.getElementType() == TypeCache.Int8Ty); - return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(), + return Address(CreateGEP(Addr.getElementType(), Addr.getBasePointer(), getSize(Offset), Name), Addr.getElementType(), - Addr.getAlignment().alignmentAtOffset(Offset), - NotKnownNonNull); + Addr.getAlignment().alignmentAtOffset(Offset)); } using CGBuilderBaseTy::CreateConstInBoundsGEP2_32; Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name = "") { - const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + return createConstGEP2_32(Addr, Idx0, Idx1, Name); + } - auto *GEP = cast(CreateConstInBoundsGEP2_32( - Addr.getElementType(), Addr.getPointer(), Idx0, Idx1, Name)); - llvm::APInt Offset( - DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0, - /*isSigned=*/true); - if (!GEP->accumulateConstantOffset(DL, Offset)) - llvm_unreachable("offset of GEP with constants is always computable"); - return Address(GEP, GEP->getResultElementType(), - Addr.getAlignment().alignmentAtOffset( - CharUnits::fromQuantity(Offset.getSExtValue())), - Addr.isKnownNonNull()); + using CGBuilderBaseTy::CreateConstGEP2_32; + Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, + const llvm::Twine &Name = "") { + return createConstGEP2_32(Addr, Idx0, Idx1, Name); + } + + Address CreateGEP(Address Addr, ArrayRef IdxList, + llvm::Type *ElementType, CharUnits Align, + const Twine &Name = "") { + llvm::Value *Ptr = emitRawPointerFromAddress(Addr); + return RawAddress(CreateGEP(Addr.getElementType(), Ptr, IdxList, Name), + ElementType, Align); + } + + using CGBuilderBaseTy::CreateInBoundsGEP; + Address CreateInBoundsGEP(Address Addr, ArrayRef IdxList, + llvm::Type *ElementType, CharUnits Align, + const Twine &Name = "") { + return RawAddress(CreateInBoundsGEP(Addr.getElementType(), + emitRawPointerFromAddress(Addr), + IdxList, Name), + ElementType, Align, Addr.isKnownNonNull()); + } + + using CGBuilderBaseTy::CreateIsNull; + llvm::Value *CreateIsNull(Address Addr, const Twine &Name = "") { + if (!Addr.hasOffset()) + return CreateIsNull(Addr.getBasePointer(), Name); + // The pointer isn't null if Addr has an offset since offsets can always + // be applied inbound. + return llvm::ConstantInt::getFalse(Context); } using CGBuilderBaseTy::CreateMemCpy; llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile = false) { - return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(), - Src.getPointer(), Src.getAlignment().getAsAlign(), Size, - IsVolatile); + llvm::Value *DestPtr = emitRawPointerFromAddress(Dest); + llvm::Value *SrcPtr = emitRawPointerFromAddress(Src); + return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr, + Src.getAlignment().getAsAlign(), Size, IsVolatile); } llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size, bool IsVolatile = false) { - return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(), - Src.getPointer(), Src.getAlignment().getAsAlign(), Size, - IsVolatile); + llvm::Value *DestPtr = emitRawPointerFromAddress(Dest); + llvm::Value *SrcPtr = emitRawPointerFromAddress(Src); + return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr, + Src.getAlignment().getAsAlign(), Size, IsVolatile); } using CGBuilderBaseTy::CreateMemCpyInline; llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) { - return CreateMemCpyInline( - Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(), - Src.getAlignment().getAsAlign(), getInt64(Size)); + llvm::Value *DestPtr = emitRawPointerFromAddress(Dest); + llvm::Value *SrcPtr = emitRawPointerFromAddress(Src); + return CreateMemCpyInline(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr, + Src.getAlignment().getAsAlign(), getInt64(Size)); } using CGBuilderBaseTy::CreateMemMove; llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile = false) { - return CreateMemMove(Dest.getPointer(), Dest.getAlignment().getAsAlign(), - Src.getPointer(), Src.getAlignment().getAsAlign(), - Size, IsVolatile); + llvm::Value *DestPtr = emitRawPointerFromAddress(Dest); + llvm::Value *SrcPtr = emitRawPointerFromAddress(Src); + return CreateMemMove(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr, + Src.getAlignment().getAsAlign(), Size, IsVolatile); } using CGBuilderBaseTy::CreateMemSet; llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile = false) { - return CreateMemSet(Dest.getPointer(), Value, Size, + return CreateMemSet(emitRawPointerFromAddress(Dest), Value, Size, Dest.getAlignment().getAsAlign(), IsVolatile); } using CGBuilderBaseTy::CreateMemSetInline; llvm::CallInst *CreateMemSetInline(Address Dest, llvm::Value *Value, uint64_t Size) { - return CreateMemSetInline(Dest.getPointer(), + return CreateMemSetInline(emitRawPointerFromAddress(Dest), Dest.getAlignment().getAsAlign(), Value, getInt64(Size)); } @@ -346,16 +417,31 @@ class CGBuilderTy : public CGBuilderBaseTy { const llvm::StructLayout *Layout = DL.getStructLayout(ElTy); auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index)); - return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(), - Index, FieldIndex, DbgInfo), - ElTy->getElementType(Index), - Addr.getAlignment().alignmentAtOffset(Offset)); + return Address( + CreatePreserveStructAccessIndex(ElTy, emitRawPointerFromAddress(Addr), + Index, FieldIndex, DbgInfo), + ElTy->getElementType(Index), + Addr.getAlignment().alignmentAtOffset(Offset)); + } + + using CGBuilderBaseTy::CreatePreserveUnionAccessIndex; + Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, + llvm::MDNode *DbgInfo) { + Addr.replaceBasePointer(CreatePreserveUnionAccessIndex( + Addr.getBasePointer(), FieldIndex, DbgInfo)); + return Addr; } using CGBuilderBaseTy::CreateLaunderInvariantGroup; Address CreateLaunderInvariantGroup(Address Addr) { - return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()), - Addr.isKnownNonNull()); + Addr.replaceBasePointer(CreateLaunderInvariantGroup(Addr.getBasePointer())); + return Addr; + } + + using CGBuilderBaseTy::CreateStripInvariantGroup; + Address CreateStripInvariantGroup(Address Addr) { + Addr.replaceBasePointer(CreateStripInvariantGroup(Addr.getBasePointer())); + return Addr; } }; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 032b434395f58..ce0f932b28962 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -2279,9 +2279,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( auto AL = ApplyDebugLocation::CreateArtificial(*this); CharUnits Offset; - Address BufAddr = - Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty, - BufferAlignment); + Address BufAddr = makeNaturalAddressForPointer( + Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy, + BufferAlignment); Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), @@ -2324,7 +2324,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { // Ignore argument 1, the format string. It is not currently used. CallArgList Args; - Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); + Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy); for (const auto &Item : Layout.Items) { int Size = Item.getSizeByte(); @@ -2364,8 +2364,8 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { if (!isa(ArgVal)) { CleanupKind Cleanup = getARCCleanupKind(); QualType Ty = TheExpr->getType(); - Address Alloca = Address::invalid(); - Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); + RawAddress Alloca = RawAddress::invalid(); + RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); ArgVal = EmitARCRetain(Ty, ArgVal); Builder.CreateStore(ArgVal, Addr); pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty, @@ -2398,7 +2398,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( Layout, BufAddr.getAlignment()); EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); - return RValue::get(BufAddr.getPointer()); + return RValue::get(BufAddr, *this); } static bool isSpecialUnsignedMultiplySignedResult( @@ -3149,7 +3149,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Check NonnullAttribute/NullabilityArg and Alignment. auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg, unsigned ParmNum) { - Value *Val = A.getPointer(); + Value *Val = A.emitRawPointer(*this); EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD, ParmNum); @@ -3178,12 +3178,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_va_end: EmitVAStartEnd(BuiltinID == Builtin::BI__va_start ? EmitScalarExpr(E->getArg(0)) - : EmitVAListRef(E->getArg(0)).getPointer(), + : EmitVAListRef(E->getArg(0)).emitRawPointer(*this), BuiltinID != Builtin::BI__builtin_va_end); return RValue::get(nullptr); case Builtin::BI__builtin_va_copy: { - Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); - Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); + Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this); + Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this); Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}), {DstPtr, SrcPtr}); return RValue::get(nullptr); @@ -3516,6 +3516,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD}); return RValue::get(nullptr); } + case Builtin::BI__builtin_allow_runtime_check: { + StringRef Kind = + cast(E->getArg(0)->IgnoreParenCasts())->getString(); + LLVMContext &Ctx = CGM.getLLVMContext(); + llvm::Value *Allow = Builder.CreateCall( + CGM.getIntrinsic(llvm::Intrinsic::allow_runtime_check), + llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind))); + return RValue::get(Allow); + } case Builtin::BI__arithmetic_fence: { // Create the builtin call if FastMath is selected, and the target // supports the builtin, otherwise just return the argument. @@ -4014,13 +4023,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); Address Src = EmitPointerWithAlignment(E->getArg(0)); - EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(), - E->getArg(0)->getExprLoc(), FD, 0); + EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, + 0); Value *Result = MB.CreateColumnMajorLoad( - Src.getElementType(), Src.getPointer(), + Src.getElementType(), Src.emitRawPointer(*this), Align(Src.getAlignment().getQuantity()), Stride, IsVolatile, - ResultTy->getNumRows(), ResultTy->getNumColumns(), - "matrix"); + ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix"); return RValue::get(Result); } @@ -4035,11 +4044,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, assert(PtrTy && "arg1 must be of pointer type"); bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); - EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 0); + EmitNonNullArgCheck(RValue::get(Dst.emitRawPointer(*this)), + E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, + 0); Value *Result = MB.CreateColumnMajorStore( - Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()), - Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns()); + Matrix, Dst.emitRawPointer(*this), + Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile, + MatrixTy->getNumRows(), MatrixTy->getNumColumns()); return RValue::get(Result); } @@ -4198,7 +4209,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_bzero: { Address Dest = EmitPointerWithAlignment(E->getArg(0)); Value *SizeVal = EmitScalarExpr(E->getArg(1)); - EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + EmitNonNullArgCheck(Dest, E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); return RValue::get(nullptr); @@ -4209,10 +4220,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Address Src = EmitPointerWithAlignment(E->getArg(0)); Address Dest = EmitPointerWithAlignment(E->getArg(1)); Value *SizeVal = EmitScalarExpr(E->getArg(2)); - EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(), - E->getArg(0)->getExprLoc(), FD, 0); - EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 0); + EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, + 0); + EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)), + E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, + 0); Builder.CreateMemMove(Dest, Src, SizeVal, false); return RValue::get(nullptr); } @@ -4229,10 +4242,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Builder.CreateMemCpy(Dest, Src, SizeVal, false); if (BuiltinID == Builtin::BImempcpy || BuiltinID == Builtin::BI__builtin_mempcpy) - return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(), - Dest.getPointer(), SizeVal)); + return RValue::get(Builder.CreateInBoundsGEP( + Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal)); else - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BI__builtin_memcpy_inline: { @@ -4264,7 +4277,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Address Src = EmitPointerWithAlignment(E->getArg(1)); Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); Builder.CreateMemCpy(Dest, Src, SizeVal, false); - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BI__builtin_objc_memmove_collectable: { @@ -4273,7 +4286,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Value *SizeVal = EmitScalarExpr(E->getArg(2)); CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestAddr, SrcAddr, SizeVal); - return RValue::get(DestAddr.getPointer()); + return RValue::get(DestAddr, *this); } case Builtin::BI__builtin___memmove_chk: { @@ -4290,7 +4303,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Address Src = EmitPointerWithAlignment(E->getArg(1)); Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); Builder.CreateMemMove(Dest, Src, SizeVal, false); - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BImemmove: @@ -4301,7 +4314,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0); EmitArgCheck(TCK_Load, Src, E->getArg(1), 1); Builder.CreateMemMove(Dest, Src, SizeVal, false); - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BImemset: case Builtin::BI__builtin_memset: { @@ -4309,10 +4322,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); Value *SizeVal = EmitScalarExpr(E->getArg(2)); - EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + EmitNonNullArgCheck(Dest, E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BI__builtin_memset_inline: { Address Dest = EmitPointerWithAlignment(E->getArg(0)); @@ -4320,8 +4333,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); uint64_t Size = E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); - EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), - E->getArg(0)->getExprLoc(), FD, 0); + EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)), + E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, + 0); Builder.CreateMemSetInline(Dest, ByteVal, Size); return RValue::get(nullptr); } @@ -4340,7 +4354,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Builder.getInt8Ty()); Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); - return RValue::get(Dest.getPointer()); + return RValue::get(Dest, *this); } case Builtin::BI__builtin_wmemchr: { // The MSVC runtime library does not provide a definition of wmemchr, so we @@ -4562,14 +4576,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Store the stack pointer to the setjmp buffer. Value *StackAddr = Builder.CreateStackSave(); - assert(Buf.getPointer()->getType() == StackAddr->getType()); + assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType()); Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); Builder.CreateStore(StackAddr, StackSaveSlot); // Call LLVM's EH setjmp, which is lightweight. Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); - return RValue::get(Builder.CreateCall(F, Buf.getPointer())); + return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this))); } case Builtin::BI__builtin_longjmp: { Value *Buf = EmitScalarExpr(E->getArg(0)); @@ -5742,7 +5756,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); - llvm::Value *Range = NDRangeL.getAddress(*this).getPointer(); + llvm::Value *Range = NDRangeL.getAddress(*this).emitRawPointer(*this); llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType(); if (NumArgs == 4) { @@ -5851,9 +5865,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, getContext(), Expr::NPC_ValueDependentIsNotNull)) { EventWaitList = llvm::ConstantPointerNull::get(PtrTy); } else { - EventWaitList = E->getArg(4)->getType()->isArrayType() - ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() - : EmitScalarExpr(E->getArg(4)); + EventWaitList = + E->getArg(4)->getType()->isArrayType() + ? EmitArrayToPointerDecay(E->getArg(4)).emitRawPointer(*this) + : EmitScalarExpr(E->getArg(4)); // Convert to generic address space. EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy); } @@ -5949,7 +5964,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Type *GenericVoidPtrTy = Builder.getPtrTy( getContext().getTargetAddressSpace(LangAS::opencl_generic)); LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); - llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer(); + llvm::Value *NDRange = NDRangeL.getAddress(*this).emitRawPointer(*this); auto Info = CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); Value *Kernel = @@ -6033,7 +6048,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto PTy0 = FTy->getParamType(0); if (PTy0 != Arg0Val->getType()) { if (Arg0Ty->isArrayType()) - Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); + Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this); else Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); } @@ -6071,7 +6086,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto PTy1 = FTy->getParamType(1); if (PTy1 != Arg1Val->getType()) { if (Arg1Ty->isArrayType()) - Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); + Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this); else Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); } @@ -6085,7 +6100,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ms_va_start: case Builtin::BI__builtin_ms_va_end: return RValue::get( - EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), + EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).emitRawPointer(*this), BuiltinID == Builtin::BI__builtin_ms_va_start)); case Builtin::BI__builtin_ms_va_copy: { @@ -6136,8 +6151,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) - return emitLibraryCall(*this, FD, E, - cast(EmitScalarExpr(E->getCallee()))); + return emitLibraryCall( + *this, FD, E, cast(EmitScalarExpr(E->getCallee()))); // Check that a call to a target specific builtin has the correct target // features. @@ -6254,7 +6269,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(nullptr); return RValue::get(V); case TEK_Aggregate: - return RValue::getAggregate(ReturnValue.getValue(), + return RValue::getAggregate(ReturnValue.getAddress(), ReturnValue.isVolatile()); case TEK_Complex: llvm_unreachable("No current target builtin returns complex"); @@ -9015,7 +9030,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, // Get the alignment for the argument in addition to the value; // we'll use it later. PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); - Ops.push_back(PtrOp0.getPointer()); + Ops.push_back(PtrOp0.emitRawPointer(*this)); continue; } } @@ -9042,7 +9057,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, // Get the alignment for the argument in addition to the value; // we'll use it later. PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); - Ops.push_back(PtrOp1.getPointer()); + Ops.push_back(PtrOp1.emitRawPointer(*this)); continue; } } @@ -9463,7 +9478,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, if (ReturnValue.isNull()) return MvecOut; else - return Builder.CreateStore(MvecOut, ReturnValue.getValue()); + return Builder.CreateStore(MvecOut, ReturnValue.getAddress()); } case CustomCodeGen::VST24: { @@ -11649,7 +11664,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, // Get the alignment for the argument in addition to the value; // we'll use it later. PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); - Ops.push_back(PtrOp0.getPointer()); + Ops.push_back(PtrOp0.emitRawPointer(*this)); continue; } } @@ -13515,15 +13530,15 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, if (!getDebugInfo()) { CGM.Error(E->getExprLoc(), "using __builtin_preserve_field_info() without -g"); - return IsBitField ? EmitLValue(Arg).getBitFieldPointer() - : EmitLValue(Arg).getPointer(*this); + return IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this) + : EmitLValue(Arg).emitRawPointer(*this); } // Enable underlying preserve_*_access_index() generation. bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; IsInPreservedAIRegion = true; - Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer() - : EmitLValue(Arg).getPointer(*this); + Value *FieldAddr = IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this) + : EmitLValue(Arg).emitRawPointer(*this); IsInPreservedAIRegion = OldIsInPreservedAIRegion; ConstantInt *C = cast(EmitScalarExpr(E->getArg(1))); @@ -14517,14 +14532,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI_mm_setcsr: case X86::BI__builtin_ia32_ldmxcsr: { - Address Tmp = CreateMemTemp(E->getArg(0)->getType()); + RawAddress Tmp = CreateMemTemp(E->getArg(0)->getType()); Builder.CreateStore(Ops[0], Tmp); return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), Tmp.getPointer()); } case X86::BI_mm_getcsr: case X86::BI__builtin_ia32_stmxcsr: { - Address Tmp = CreateMemTemp(E->getType()); + RawAddress Tmp = CreateMemTemp(E->getType()); Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), Tmp.getPointer()); return Builder.CreateLoad(Tmp, "stmxcsr"); @@ -17810,7 +17825,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, SmallVector Ops; for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) if (E->getArg(i)->getType()->isArrayType()) - Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer()); + Ops.push_back( + EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this)); else Ops.push_back(EmitScalarExpr(E->getArg(i))); // The first argument of these two builtins is a pointer used to store their @@ -18286,7 +18302,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, Value *Op0 = EmitScalarExpr(E->getArg(0)); return Builder.CreateIntrinsic( /*ReturnType=*/llvm::Type::getInt1Ty(getLLVMContext()), - Intrinsic::dx_any, ArrayRef{Op0}, nullptr, "dx.any"); + CGM.getHLSLRuntime().getAnyIntrinsic(), ArrayRef{Op0}, nullptr, + "hlsl.any"); } case Builtin::BI__builtin_hlsl_elementwise_clamp: { Value *OpX = EmitScalarExpr(E->getArg(0)); @@ -18395,9 +18412,16 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, Value *Op0 = EmitScalarExpr(E->getArg(0)); if (!E->getArg(0)->getType()->hasFloatingRepresentation()) llvm_unreachable("rcp operand must have a float representation"); - return Builder.CreateIntrinsic( - /*ReturnType=*/Op0->getType(), Intrinsic::dx_rcp, - ArrayRef{Op0}, nullptr, "dx.rcp"); + llvm::Type *Ty = Op0->getType(); + llvm::Type *EltTy = Ty->getScalarType(); + Constant *One = + Ty->isVectorTy() + ? ConstantVector::getSplat( + ElementCount::getFixed( + dyn_cast(Ty)->getNumElements()), + ConstantFP::get(EltTy, 1.0)) + : ConstantFP::get(EltTy, 1.0); + return Builder.CreateFDiv(One, Op0, "hlsl.rcp"); } case Builtin::BI__builtin_hlsl_elementwise_rsqrt: { Value *Op0 = EmitScalarExpr(E->getArg(0)); @@ -22288,14 +22312,14 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, // Save returned values. assert(II.NumResults); if (II.NumResults == 1) { - Builder.CreateAlignedStore(Result, Dst.getPointer(), + Builder.CreateAlignedStore(Result, Dst.emitRawPointer(*this), CharUnits::fromQuantity(4)); } else { for (unsigned i = 0; i < II.NumResults; ++i) { Builder.CreateAlignedStore( Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), Dst.getElementType()), - Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), + Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); } @@ -22335,7 +22359,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, for (unsigned i = 0; i < II.NumResults; ++i) { Value *V = Builder.CreateAlignedLoad( Src.getElementType(), - Builder.CreateGEP(Src.getElementType(), Src.getPointer(), + Builder.CreateGEP(Src.getElementType(), Src.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); Values.push_back(Builder.CreateBitCast(V, ParamType)); @@ -22407,7 +22431,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, for (unsigned i = 0; i < MI.NumEltsA; ++i) { Value *V = Builder.CreateAlignedLoad( SrcA.getElementType(), - Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(), + Builder.CreateGEP(SrcA.getElementType(), SrcA.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); Values.push_back(Builder.CreateBitCast(V, AType)); @@ -22417,7 +22441,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, for (unsigned i = 0; i < MI.NumEltsB; ++i) { Value *V = Builder.CreateAlignedLoad( SrcB.getElementType(), - Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(), + Builder.CreateGEP(SrcB.getElementType(), SrcB.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); Values.push_back(Builder.CreateBitCast(V, BType)); @@ -22428,7 +22452,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, for (unsigned i = 0; i < MI.NumEltsC; ++i) { Value *V = Builder.CreateAlignedLoad( SrcC.getElementType(), - Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(), + Builder.CreateGEP(SrcC.getElementType(), SrcC.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); Values.push_back(Builder.CreateBitCast(V, CType)); @@ -22438,7 +22462,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, for (unsigned i = 0; i < MI.NumEltsD; ++i) Builder.CreateAlignedStore( Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), - Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), + Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this), llvm::ConstantInt::get(IntTy, i)), CharUnits::fromQuantity(4)); return Result; @@ -22696,7 +22720,7 @@ struct BuiltinAlignArgs { BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) { QualType AstType = E->getArg(0)->getType(); if (AstType->isArrayType()) - Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF); else Src = CGF.EmitScalarExpr(E->getArg(0)); SrcType = Src->getType(); @@ -23314,7 +23338,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, } case WebAssembly::BI__builtin_wasm_table_get: { assert(E->getArg(0)->getType()->isArrayType()); - Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); Value *Index = EmitScalarExpr(E->getArg(1)); Function *Callee; if (E->getType().isWebAssemblyExternrefType()) @@ -23328,7 +23352,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, } case WebAssembly::BI__builtin_wasm_table_set: { assert(E->getArg(0)->getType()->isArrayType()); - Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); Value *Index = EmitScalarExpr(E->getArg(1)); Value *Val = EmitScalarExpr(E->getArg(2)); Function *Callee; @@ -23343,13 +23367,13 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, } case WebAssembly::BI__builtin_wasm_table_size: { assert(E->getArg(0)->getType()->isArrayType()); - Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Value *Value = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size); return Builder.CreateCall(Callee, Value); } case WebAssembly::BI__builtin_wasm_table_grow: { assert(E->getArg(0)->getType()->isArrayType()); - Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); Value *Val = EmitScalarExpr(E->getArg(1)); Value *NElems = EmitScalarExpr(E->getArg(2)); @@ -23366,7 +23390,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, } case WebAssembly::BI__builtin_wasm_table_fill: { assert(E->getArg(0)->getType()->isArrayType()); - Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); + Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); Value *Index = EmitScalarExpr(E->getArg(1)); Value *Val = EmitScalarExpr(E->getArg(2)); Value *NElems = EmitScalarExpr(E->getArg(3)); @@ -23384,8 +23408,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, } case WebAssembly::BI__builtin_wasm_table_copy: { assert(E->getArg(0)->getType()->isArrayType()); - Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer(); - Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer(); + Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); + Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).emitRawPointer(*this); Value *DstIdx = EmitScalarExpr(E->getArg(2)); Value *SrcIdx = EmitScalarExpr(E->getArg(3)); Value *NElems = EmitScalarExpr(E->getArg(4)); @@ -23464,7 +23488,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { // The base pointer is passed by address, so it needs to be loaded. Address A = EmitPointerWithAlignment(E->getArg(0)); - Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment()); + Address BP = Address(A.emitRawPointer(*this), Int8PtrTy, A.getAlignment()); llvm::Value *Base = Builder.CreateLoad(BP); // The treatment of both loads and stores is the same: the arguments for // the builtin are the same as the arguments for the intrinsic. @@ -23505,8 +23529,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression // per call. Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); - DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment()); - llvm::Value *DestAddress = DestAddr.getPointer(); + DestAddr = DestAddr.withElementType(Int8Ty); + llvm::Value *DestAddress = DestAddr.emitRawPointer(*this); // Operands are Base, Dest, Modifier. // The intrinsic format in LLVM IR is defined as @@ -23557,8 +23581,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); - Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), - PredAddr.getAlignment()); + Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this), + PredAddr.getAlignment()); return Builder.CreateExtractValue(Result, 0); } // These are identical to the builtins above, except they don't consume @@ -23576,8 +23600,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}); llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); - Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), - PredAddr.getAlignment()); + Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this), + PredAddr.getAlignment()); return Builder.CreateExtractValue(Result, 0); } diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp index 24f900a3d9d36..9846aa85a1cf5 100644 --- a/clang/lib/CodeGen/CGCUDANV.cpp +++ b/clang/lib/CodeGen/CGCUDANV.cpp @@ -333,11 +333,11 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF, llvm::ConstantInt::get(SizeTy, std::max(1, Args.size()))); // Store pointers to the arguments in a locally allocated launch_args. for (unsigned i = 0; i < Args.size(); ++i) { - llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer(); + llvm::Value *VarPtr = CGF.GetAddrOfLocalVar(Args[i]).emitRawPointer(CGF); llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, PtrTy); CGF.Builder.CreateDefaultAlignedStore( - VoidVarPtr, - CGF.Builder.CreateConstGEP1_32(PtrTy, KernelArgs.getPointer(), i)); + VoidVarPtr, CGF.Builder.CreateConstGEP1_32( + PtrTy, KernelArgs.emitRawPointer(CGF), i)); } llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end"); @@ -363,7 +363,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF, KernelLaunchAPI = KernelLaunchAPI + "_ptsz"; } auto LaunchKernelName = addPrefixToName(KernelLaunchAPI); - IdentifierInfo &cudaLaunchKernelII = + const IdentifierInfo &cudaLaunchKernelII = CGM.getContext().Idents.get(LaunchKernelName); FunctionDecl *cudaLaunchKernelFD = nullptr; for (auto *Result : DC->lookup(&cudaLaunchKernelII)) { @@ -395,9 +395,10 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF, /*isVarArg=*/false), addUnderscoredPrefixToName("PopCallConfiguration")); - CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn, - {GridDim.getPointer(), BlockDim.getPointer(), - ShmemSize.getPointer(), Stream.getPointer()}); + CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn, {GridDim.emitRawPointer(CGF), + BlockDim.emitRawPointer(CGF), + ShmemSize.emitRawPointer(CGF), + Stream.emitRawPointer(CGF)}); // Emit the call to cudaLaunch llvm::Value *Kernel = @@ -407,7 +408,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF, cudaLaunchKernelFD->getParamDecl(0)->getType()); LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty); LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty); - LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()), + LaunchKernelArgs.add(RValue::get(KernelArgs, CGF), cudaLaunchKernelFD->getParamDecl(3)->getType()); LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)), cudaLaunchKernelFD->getParamDecl(4)->getType()); @@ -440,8 +441,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF, auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType()); Offset = Offset.alignTo(TInfo.Align); llvm::Value *Args[] = { - CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(), - PtrTy), + CGF.Builder.CreatePointerCast( + CGF.GetAddrOfLocalVar(A).emitRawPointer(CGF), PtrTy), llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()), llvm::ConstantInt::get(SizeTy, Offset.getQuantity()), }; diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp index a8bf57a277e90..7c6dfc3e59d8c 100644 --- a/clang/lib/CodeGen/CGCXXABI.cpp +++ b/clang/lib/CodeGen/CGCXXABI.cpp @@ -20,6 +20,12 @@ using namespace CodeGen; CGCXXABI::~CGCXXABI() { } +Address CGCXXABI::getThisAddress(CodeGenFunction &CGF) { + return CGF.makeNaturalAddressForPointer( + CGF.CXXABIThisValue, CGF.CXXABIThisDecl->getType()->getPointeeType(), + CGF.CXXABIThisAlignment); +} + void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) { DiagnosticsEngine &Diags = CGF.CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -44,8 +50,12 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer( llvm::Value *MemPtr, const MemberPointerType *MPT) { ErrorUnsupportedABI(CGF, "calls through member pointers"); - ThisPtrForCall = This.getPointer(); - const auto *FPT = MPT->getPointeeType()->castAs(); + const auto *RD = + cast(MPT->getClass()->castAs()->getDecl()); + ThisPtrForCall = + CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD)); + const FunctionProtoType *FPT = + MPT->getPointeeType()->getAs(); llvm::Constant *FnPtr = llvm::Constant::getNullValue( llvm::PointerType::getUnqual(CGM.getLLVMContext())); return CGCallee::forDirect(FnPtr, FPT); @@ -251,16 +261,15 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr, // If we don't need an array cookie, bail out early. if (!requiresArrayCookie(expr, eltTy)) { - allocPtr = ptr.getPointer(); + allocPtr = ptr.emitRawPointer(CGF); numElements = nullptr; cookieSize = CharUnits::Zero(); return; } cookieSize = getArrayCookieSizeImpl(eltTy); - Address allocAddr = - CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize); - allocPtr = allocAddr.getPointer(); + Address allocAddr = CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize); + allocPtr = allocAddr.emitRawPointer(CGF); numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize); } diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h index ad1ad08d08568..c7eccbd0095a9 100644 --- a/clang/lib/CodeGen/CGCXXABI.h +++ b/clang/lib/CodeGen/CGCXXABI.h @@ -57,12 +57,8 @@ class CGCXXABI { llvm::Value *getThisValue(CodeGenFunction &CGF) { return CGF.CXXABIThisValue; } - Address getThisAddress(CodeGenFunction &CGF) { - return Address( - CGF.CXXABIThisValue, - CGF.ConvertTypeForMem(CGF.CXXABIThisDecl->getType()->getPointeeType()), - CGF.CXXABIThisAlignment); - } + + Address getThisAddress(CodeGenFunction &CGF); /// Issue a diagnostic about unsupported features in the ABI. void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S); @@ -475,12 +471,6 @@ class CGCXXABI { BaseSubobject Base, const CXXRecordDecl *NearestVBase) = 0; - /// Get the address point of the vtable for the given base subobject while - /// building a constexpr. - virtual llvm::Constant * - getVTableAddressPointForConstExpr(BaseSubobject Base, - const CXXRecordDecl *VTableClass) = 0; - /// Get the address of the vtable for the given record decl which should be /// used for the vptr at the given offset in RD. virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index cd5905b8d748e..65c088ef5082e 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1046,15 +1046,9 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref Fn) { - CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); - CharUnits EltAlign = - BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); - llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy); - for (int i = 0, n = CAE->NumElts; i < n; i++) { - llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( - BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); - Fn(Address(EltAddr, EltTy, EltAlign)); + Address EltAddr = CGF.Builder.CreateConstGEP2_32(BaseAddr, 0, i); + Fn(EltAddr); } } @@ -1169,9 +1163,10 @@ void CodeGenFunction::ExpandTypeToArgs( } /// Create a temporary allocation for the purposes of coercion. -static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, - CharUnits MinAlign, - const Twine &Name = "tmp") { +static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, + llvm::Type *Ty, + CharUnits MinAlign, + const Twine &Name = "tmp") { // Don't use an alignment that's worse than what LLVM would prefer. auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); @@ -1341,11 +1336,11 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, } // Otherwise do coercion through memory. This is stupid, but simple. - Address Tmp = + RawAddress Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); CGF.Builder.CreateMemCpy( - Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), - Src.getAlignment().getAsAlign(), + Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), + Src.emitRawPointer(CGF), Src.getAlignment().getAsAlign(), llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); return CGF.Builder.CreateLoad(Tmp); } @@ -1429,11 +1424,12 @@ static void CreateCoercedStore(llvm::Value *Src, // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. - Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); + RawAddress Tmp = + CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); CGF.Builder.CreateStore(Src, Tmp); CGF.Builder.CreateMemCpy( - Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), - Tmp.getAlignment().getAsAlign(), + Dst.emitRawPointer(CGF), Dst.getAlignment().getAsAlign(), + Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); } } @@ -3102,17 +3098,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, case ABIArgInfo::Indirect: case ABIArgInfo::IndirectAliased: { assert(NumIRArgs == 1); - Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), - ArgI.getIndirectAlign(), KnownNonNull); + Address ParamAddr = makeNaturalAddressForPointer( + Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr, + nullptr, KnownNonNull); if (!hasScalarEvaluationKind(Ty)) { // Aggregates and complex variables are accessed by reference. All we // need to do is realign the value, if requested. Also, if the address // may be aliased, copy it to ensure that the parameter variable is // mutable and has a unique adress, as C requires. - Address V = ParamAddr; if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { - Address AlignedTemp = CreateMemTemp(Ty, "coerce"); + RawAddress AlignedTemp = CreateMemTemp(Ty, "coerce"); // Copy from the incoming argument pointer to the temporary with the // appropriate alignment. @@ -3122,11 +3118,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, CharUnits Size = getContext().getTypeSizeInChars(Ty); Builder.CreateMemCpy( AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), - ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), + ParamAddr.emitRawPointer(*this), + ParamAddr.getAlignment().getAsAlign(), llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); - V = AlignedTemp; + ParamAddr = AlignedTemp; } - ArgVals.push_back(ParamValue::forIndirect(V)); + ArgVals.push_back(ParamValue::forIndirect(ParamAddr)); } else { // Load scalar value from indirect argument. llvm::Value *V = @@ -3244,10 +3241,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, == ParameterABI::SwiftErrorResult) { QualType pointeeTy = Ty->getPointeeType(); assert(pointeeTy->isPointerType()); - Address temp = - CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); - Address arg(V, ConvertTypeForMem(pointeeTy), - getContext().getTypeAlignInChars(pointeeTy)); + RawAddress temp = + CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); + Address arg = makeNaturalAddressForPointer( + V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy)); llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); Builder.CreateStore(incomingErrorValue, temp); V = temp.getPointer(); @@ -3583,7 +3580,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::LoadInst *load = dyn_cast(retainedValue->stripPointerCasts()); if (!load || load->isAtomic() || load->isVolatile() || - load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) + load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getBasePointer()) return nullptr; // Okay! Burn it all down. This relies for correctness on the @@ -3620,12 +3617,15 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, /// Heuristically search for a dominating store to the return-value slot. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { + llvm::Value *ReturnValuePtr = CGF.ReturnValue.getBasePointer(); + // Check if a User is a store which pointerOperand is the ReturnValue. // We are looking for stores to the ReturnValue, not for stores of the // ReturnValue to some other location. - auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { + auto GetStoreIfValid = [&CGF, + ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * { auto *SI = dyn_cast(U); - if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() || + if (!SI || SI->getPointerOperand() != ReturnValuePtr || SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) return nullptr; // These aren't actually possible for non-coerced returns, and we @@ -3639,7 +3639,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { // for something immediately preceding the IP. Sometimes this can // happen with how we generate implicit-returns; it can also happen // with noreturn cleanups. - if (!CGF.ReturnValue.getPointer()->hasOneUse()) { + if (!ReturnValuePtr->hasOneUse()) { llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); if (IP->empty()) return nullptr; @@ -3657,8 +3657,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { return nullptr; } - llvm::StoreInst *store = - GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); + llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back()); if (!store) return nullptr; // Now do a first-and-dirty dominance check: just walk up the @@ -3747,7 +3746,7 @@ static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { const FieldDecl *F = *I; - if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || + if (F->isUnnamedBitField() || F->isZeroLengthBitField(Context) || F->getType()->isIncompleteArrayType()) continue; @@ -4202,7 +4201,11 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, } static bool isProvablyNull(llvm::Value *addr) { - return isa(addr); + return llvm::isa_and_nonnull(addr); +} + +static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) { + return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout()); } /// Emit the actual writing-back of a writeback. @@ -4210,21 +4213,20 @@ static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback) { const LValue &srcLV = writeback.Source; Address srcAddr = srcLV.getAddress(CGF); - assert(!isProvablyNull(srcAddr.getPointer()) && + assert(!isProvablyNull(srcAddr.getBasePointer()) && "shouldn't have writeback for provably null argument"); llvm::BasicBlock *contBB = nullptr; // If the argument wasn't provably non-null, we need to null check // before doing the store. - bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), - CGF.CGM.getDataLayout()); + bool provablyNonNull = isProvablyNonNull(srcAddr, CGF); + if (!provablyNonNull) { llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); contBB = CGF.createBasicBlock("icr.done"); - llvm::Value *isNull = - CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); + llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); CGF.EmitBlock(writebackBB); } @@ -4328,7 +4330,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); // If the address is a constant null, just pass the appropriate null. - if (isProvablyNull(srcAddr.getPointer())) { + if (isProvablyNull(srcAddr.getBasePointer())) { args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), CRE->getType()); return; @@ -4357,17 +4359,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, // If the address is *not* known to be non-null, we need to switch. llvm::Value *finalArgument; - bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), - CGF.CGM.getDataLayout()); + bool provablyNonNull = isProvablyNonNull(srcAddr, CGF); + if (provablyNonNull) { - finalArgument = temp.getPointer(); + finalArgument = temp.emitRawPointer(CGF); } else { - llvm::Value *isNull = - CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); + llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); - finalArgument = CGF.Builder.CreateSelect(isNull, - llvm::ConstantPointerNull::get(destType), - temp.getPointer(), "icr.argument"); + finalArgument = CGF.Builder.CreateSelect( + isNull, llvm::ConstantPointerNull::get(destType), + temp.emitRawPointer(CGF), "icr.argument"); // If we need to copy, then the load has to be conditional, which // means we need control flow. @@ -4492,6 +4493,16 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt); } +void CodeGenFunction::EmitNonNullArgCheck(Address Addr, QualType ArgType, + SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum) { + if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || + SanOpts.has(SanitizerKind::NullabilityArg))) + return; + + EmitNonNullArgCheck(RValue::get(Addr, *this), ArgType, ArgLoc, AC, ParmNum); +} + // Check if the call is going to use the inalloca convention. This needs to // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged // later, so we can't check it directly. @@ -4833,10 +4844,20 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const llvm::Twine &name) { - return EmitNounwindRuntimeCall(callee, std::nullopt, name); + return EmitNounwindRuntimeCall(callee, ArrayRef(), name); } /// Emits a call to the given nounwind runtime function. +llvm::CallInst * +CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, + ArrayRef
args, + const llvm::Twine &name) { + SmallVector values; + for (auto arg : args) + values.push_back(arg.emitRawPointer(*this)); + return EmitNounwindRuntimeCall(callee, values, name); +} + llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, @@ -5115,7 +5136,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! - Address ArgMemory = Address::invalid(); + RawAddress ArgMemory = RawAddress::invalid(); if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { const llvm::DataLayout &DL = CGM.getDataLayout(); llvm::Instruction *IP = CallArgs.getStackBase(); @@ -5131,7 +5152,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, AI->setAlignment(Align.getAsAlign()); AI->setUsedWithInAlloca(true); assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); - ArgMemory = Address(AI, ArgStruct, Align); + ArgMemory = RawAddress(AI, ArgStruct, Align); } ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); @@ -5140,11 +5161,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. Address SRetPtr = Address::invalid(); - Address SRetAlloca = Address::invalid(); + RawAddress SRetAlloca = RawAddress::invalid(); llvm::Value *UnusedReturnSizePtr = nullptr; if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { if (!ReturnValue.isNull()) { - SRetPtr = ReturnValue.getValue(); + SRetPtr = ReturnValue.getAddress(); } else { SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); if (HaveInsertPoint() && ReturnValue.isUnused()) { @@ -5154,15 +5175,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } } if (IRFunctionArgs.hasSRetArg()) { - IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); + IRCallArgs[IRFunctionArgs.getSRetArgNo()] = + getAsNaturalPointerTo(SRetPtr, RetTy); } else if (RetAI.isInAlloca()) { Address Addr = Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); - Builder.CreateStore(SRetPtr.getPointer(), Addr); + Builder.CreateStore(getAsNaturalPointerTo(SRetPtr, RetTy), Addr); } } - Address swiftErrorTemp = Address::invalid(); + RawAddress swiftErrorTemp = RawAddress::invalid(); Address swiftErrorArg = Address::invalid(); // When passing arguments using temporary allocas, we need to add the @@ -5195,9 +5217,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, assert(NumIRArgs == 0); assert(getTarget().getTriple().getArch() == llvm::Triple::x86); if (I->isAggregate()) { - Address Addr = I->hasLValue() - ? I->getKnownLValue().getAddress(*this) - : I->getKnownRValue().getAggregateAddress(); + RawAddress Addr = I->hasLValue() + ? I->getKnownLValue().getAddress(*this) + : I->getKnownRValue().getAggregateAddress(); llvm::Instruction *Placeholder = cast(Addr.getPointer()); @@ -5221,7 +5243,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } else if (ArgInfo.getInAllocaIndirect()) { // Make a temporary alloca and store the address of it into the argument // struct. - Address Addr = CreateMemTempWithoutCast( + RawAddress Addr = CreateMemTempWithoutCast( I->Ty, getContext().getTypeAlignInChars(I->Ty), "indirect-arg-temp"); I->copyInto(*this, Addr); @@ -5243,12 +5265,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, assert(NumIRArgs == 1); if (!I->isAggregate()) { // Make a temporary alloca to pass the argument. - Address Addr = CreateMemTempWithoutCast( + RawAddress Addr = CreateMemTempWithoutCast( I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); - llvm::Value *Val = Addr.getPointer(); + llvm::Value *Val = getAsNaturalPointerTo(Addr, I->Ty); if (ArgHasMaybeUndefAttr) - Val = Builder.CreateFreeze(Addr.getPointer()); + Val = Builder.CreateFreeze(Val); IRCallArgs[FirstIRArg] = Val; I->copyInto(*this, Addr); @@ -5264,7 +5286,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address Addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); - llvm::Value *V = Addr.getPointer(); CharUnits Align = ArgInfo.getIndirectAlign(); const llvm::DataLayout *TD = &CGM.getDataLayout(); @@ -5275,8 +5296,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, bool NeedCopy = false; if (Addr.getAlignment() < Align && - llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < - Align.getAsAlign()) { + llvm::getOrEnforceKnownAlignment(Addr.emitRawPointer(*this), + Align.getAsAlign(), + *TD) < Align.getAsAlign()) { NeedCopy = true; } else if (I->hasLValue()) { auto LV = I->getKnownLValue(); @@ -5307,11 +5329,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (NeedCopy) { // Create an aligned temporary, and copy to it. - Address AI = CreateMemTempWithoutCast( + RawAddress AI = CreateMemTempWithoutCast( I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); - llvm::Value *Val = AI.getPointer(); + llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty); if (ArgHasMaybeUndefAttr) - Val = Builder.CreateFreeze(AI.getPointer()); + Val = Builder.CreateFreeze(Val); IRCallArgs[FirstIRArg] = Val; // Emit lifetime markers for the temporary alloca. @@ -5328,6 +5350,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, I->copyInto(*this, AI); } else { // Skip the extra memcpy call. + llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty); auto *T = llvm::PointerType::get( CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace()); llvm::Value *Val = getTargetHooks().performAddrSpaceCast( @@ -5366,8 +5389,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); QualType pointeeTy = I->Ty->getPointeeType(); - swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy), - getContext().getTypeAlignInChars(pointeeTy)); + swiftErrorArg = makeNaturalAddressForPointer( + V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy)); swiftErrorTemp = CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); @@ -5511,7 +5534,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *tempSize = nullptr; Address addr = Address::invalid(); - Address AllocaAddr = Address::invalid(); + RawAddress AllocaAddr = RawAddress::invalid(); if (I->isAggregate()) { addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); @@ -5974,7 +5997,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, return RValue::getComplex(std::make_pair(Real, Imag)); } case TEK_Aggregate: { - Address DestPtr = ReturnValue.getValue(); + Address DestPtr = ReturnValue.getAddress(); bool DestIsVolatile = ReturnValue.isVolatile(); if (!DestPtr.isValid()) { diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h index 155042c8b0a50..b6d7b4d542a54 100644 --- a/clang/lib/CodeGen/CGCall.h +++ b/clang/lib/CodeGen/CGCall.h @@ -376,6 +376,7 @@ class ReturnValueSlot { Address getValue() const { return Addr; } bool isUnused() const { return IsUnused; } bool isExternallyDestructed() const { return IsExternallyDestructed; } + Address getAddress() const { return Addr; } }; /// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp index 69e602008a9a9..9aa3241129f75 100644 --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -139,8 +139,9 @@ Address CodeGenFunction::LoadCXXThisAddress() { CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent()); } - llvm::Type *Ty = ConvertType(MD->getFunctionObjectParameterType()); - return Address(LoadCXXThis(), Ty, CXXThisAlignment, KnownNonNull); + return makeNaturalAddressForPointer( + LoadCXXThis(), MD->getFunctionObjectParameterType(), CXXThisAlignment, + false, nullptr, nullptr, KnownNonNull); } /// Emit the address of a field using a member data pointer. @@ -271,7 +272,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr, } // Apply the base offset. - llvm::Value *ptr = addr.getPointer(); + llvm::Value *ptr = addr.emitRawPointer(CGF); ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr"); // If we have a virtual component, the alignment of the result will @@ -339,8 +340,8 @@ Address CodeGenFunction::GetAddressOfBaseClass( if (sanitizePerformTypeCheck()) { SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, !NullCheckValue); - EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(), - DerivedTy, DerivedAlign, SkippedChecks); + EmitTypeCheck(TCK_Upcast, Loc, Value.emitRawPointer(*this), DerivedTy, + DerivedAlign, SkippedChecks); } return Value.withElementType(BaseValueTy); } @@ -355,7 +356,7 @@ Address CodeGenFunction::GetAddressOfBaseClass( llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); endBB = createBasicBlock("cast.end"); - llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer()); + llvm::Value *isNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(isNull, endBB, notNullBB); EmitBlock(notNullBB); } @@ -364,14 +365,15 @@ Address CodeGenFunction::GetAddressOfBaseClass( SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, true); EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, - Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks); + Value.emitRawPointer(*this), DerivedTy, DerivedAlign, + SkippedChecks); } // Compute the virtual offset. llvm::Value *VirtualOffset = nullptr; if (VBase) { VirtualOffset = - CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); + CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); } // Apply both offsets. @@ -388,7 +390,7 @@ Address CodeGenFunction::GetAddressOfBaseClass( EmitBlock(endBB); llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result"); - PHI->addIncoming(Value.getPointer(), notNullBB); + PHI->addIncoming(Value.emitRawPointer(*this), notNullBB); PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB); Value = Value.withPointer(PHI, NotKnownNonNull); } @@ -425,15 +427,19 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, CastNotNull = createBasicBlock("cast.notnull"); CastEnd = createBasicBlock("cast.end"); - llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer()); + llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } // Apply the offset. - llvm::Value *Value = BaseAddr.getPointer(); - Value = Builder.CreateInBoundsGEP( - Int8Ty, Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr"); + Address Addr = BaseAddr.withElementType(Int8Ty); + Addr = Builder.CreateInBoundsGEP( + Addr, Builder.CreateNeg(NonVirtualOffset), Int8Ty, + CGM.getClassPointerAlignment(Derived), "sub.ptr"); + + // Just cast. + Addr = Addr.withElementType(DerivedValueTy); // Produce a PHI if we had a null-check. if (NullCheckValue) { @@ -442,13 +448,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, Builder.CreateBr(CastEnd); EmitBlock(CastEnd); + llvm::Value *Value = Addr.emitRawPointer(*this); llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); PHI->addIncoming(Value, CastNotNull); PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); - Value = PHI; + return Address(PHI, Addr.getElementType(), + CGM.getClassPointerAlignment(Derived)); } - return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived)); + return Addr; } llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, @@ -1720,7 +1728,7 @@ namespace { // Use the base class declaration location as inline DebugLocation. All // fields of the class are destroyed. DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass); - EmitSanitizerDtorFieldsCallback(CGF, Addr.getPointer(), + EmitSanitizerDtorFieldsCallback(CGF, Addr.emitRawPointer(CGF), BaseSize.getQuantity()); // Prevent the current stack frame from disappearing from the stack trace. @@ -2023,7 +2031,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, // Find the end of the array. llvm::Type *elementType = arrayBase.getElementType(); - llvm::Value *arrayBegin = arrayBase.getPointer(); + llvm::Value *arrayBegin = arrayBase.emitRawPointer(*this); llvm::Value *arrayEnd = Builder.CreateInBoundsGEP( elementType, arrayBegin, numElements, "arrayctor.end"); @@ -2119,14 +2127,15 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, Address This = ThisAVS.getAddress(); LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace(); - llvm::Value *ThisPtr = This.getPointer(); + llvm::Value *ThisPtr = + getAsNaturalPointerTo(This, D->getThisType()->getPointeeType()); if (SlotAS != ThisAS) { unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS); llvm::Type *NewType = llvm::PointerType::get(getLLVMContext(), TargetThisAS); - ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(), - ThisAS, SlotAS, NewType); + ThisPtr = getTargetHooks().performAddrSpaceCast(*this, ThisPtr, ThisAS, + SlotAS, NewType); } // Push the this ptr. @@ -2195,7 +2204,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, const CXXRecordDecl *ClassDecl = D->getParent(); if (!NewPointerIsChecked) - EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This, getContext().getRecordType(ClassDecl), CharUnits::Zero()); if (D->isTrivial() && D->isDefaultConstructor()) { @@ -2208,10 +2217,9 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, // model that copy. if (isMemcpyEquivalentSpecialMember(D)) { assert(Args.size() == 2 && "unexpected argcount for trivial ctor"); - QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType(); - Address Src = Address(Args[1].getRValue(*this).getScalarVal(), ConvertTypeForMem(SrcTy), - CGM.getNaturalTypeAlignment(SrcTy)); + Address Src = makeNaturalAddressForPointer( + Args[1].getRValue(*this).getScalarVal(), SrcTy); LValue SrcLVal = MakeAddrLValue(Src, SrcTy); QualType DestTy = getContext().getTypeDeclType(ClassDecl); LValue DestLVal = MakeAddrLValue(This, DestTy); @@ -2264,7 +2272,9 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall( const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { CallArgList Args; - CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType()); + CallArg ThisArg(RValue::get(getAsNaturalPointerTo( + This, D->getThisType()->getPointeeType())), + D->getThisType()); // Forward the parameters. if (InheritedFromVBase && @@ -2389,12 +2399,14 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, CallArgList Args; // Push the this ptr. - Args.add(RValue::get(This.getPointer()), D->getThisType()); + Args.add(RValue::get(getAsNaturalPointerTo(This, D->getThisType())), + D->getThisType()); // Push the src ptr. QualType QT = *(FPT->param_type_begin()); llvm::Type *t = CGM.getTypes().ConvertType(QT); - llvm::Value *SrcVal = Builder.CreateBitCast(Src.getPointer(), t); + llvm::Value *Val = getAsNaturalPointerTo(Src, D->getThisType()); + llvm::Value *SrcVal = Builder.CreateBitCast(Val, t); Args.add(RValue::get(SrcVal), QT); // Skip over first argument (Src). @@ -2419,7 +2431,9 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, // this Address This = LoadCXXThisAddress(); - DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType()); + DelegateArgs.add(RValue::get(getAsNaturalPointerTo( + This, (*I)->getType()->getPointeeType())), + (*I)->getType()); ++I; // FIXME: The location of the VTT parameter in the parameter list is @@ -2780,7 +2794,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived, if (MayBeNull) { llvm::Value *DerivedNotNull = - Builder.CreateIsNotNull(Derived.getPointer(), "cast.nonnull"); + Builder.CreateIsNotNull(Derived.emitRawPointer(*this), "cast.nonnull"); llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); ContBlock = createBasicBlock("cast.cont"); @@ -2981,7 +2995,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() { QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); Address ThisPtr = GetAddrOfBlockDecl(variable); - CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); + CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType); // Add the rest of the parameters. for (auto *param : BD->parameters()) @@ -3009,7 +3023,7 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { QualType LambdaType = getContext().getRecordType(Lambda); QualType ThisType = getContext().getPointerType(LambdaType); Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture"); - CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); + CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType); EmitLambdaDelegatingInvokeBody(MD, CallArgs); } diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp index 2fee6d3ebe4f7..e6f8e6873004f 100644 --- a/clang/lib/CodeGen/CGCleanup.cpp +++ b/clang/lib/CodeGen/CGCleanup.cpp @@ -27,7 +27,7 @@ bool DominatingValue::saved_type::needsSaving(RValue rv) { if (rv.isScalar()) return DominatingLLVMValue::needsSaving(rv.getScalarVal()); if (rv.isAggregate()) - return DominatingLLVMValue::needsSaving(rv.getAggregatePointer()); + return DominatingValue
::needsSaving(rv.getAggregateAddress()); return true; } @@ -35,69 +35,40 @@ DominatingValue::saved_type DominatingValue::saved_type::save(CodeGenFunction &CGF, RValue rv) { if (rv.isScalar()) { llvm::Value *V = rv.getScalarVal(); - - // These automatically dominate and don't need to be saved. - if (!DominatingLLVMValue::needsSaving(V)) - return saved_type(V, nullptr, ScalarLiteral); - - // Everything else needs an alloca. - Address addr = - CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); - CGF.Builder.CreateStore(V, addr); - return saved_type(addr.getPointer(), nullptr, ScalarAddress); + return saved_type(DominatingLLVMValue::save(CGF, V), + DominatingLLVMValue::needsSaving(V) ? ScalarAddress + : ScalarLiteral); } if (rv.isComplex()) { CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); - llvm::Type *ComplexTy = - llvm::StructType::get(V.first->getType(), V.second->getType()); - Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); - CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); - CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); - return saved_type(addr.getPointer(), nullptr, ComplexAddress); + return saved_type(DominatingLLVMValue::save(CGF, V.first), + DominatingLLVMValue::save(CGF, V.second)); } assert(rv.isAggregate()); - Address V = rv.getAggregateAddress(); // TODO: volatile? - if (!DominatingLLVMValue::needsSaving(V.getPointer())) - return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral, - V.getAlignment().getQuantity()); - - Address addr = - CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); - CGF.Builder.CreateStore(V.getPointer(), addr); - return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress, - V.getAlignment().getQuantity()); + Address V = rv.getAggregateAddress(); + return saved_type( + DominatingValue
::save(CGF, V), rv.isVolatileQualified(), + DominatingValue
::needsSaving(V) ? AggregateAddress + : AggregateLiteral); } /// Given a saved r-value produced by SaveRValue, perform the code /// necessary to restore it to usability at the current insertion /// point. RValue DominatingValue::saved_type::restore(CodeGenFunction &CGF) { - auto getSavingAddress = [&](llvm::Value *value) { - auto *AI = cast(value); - return Address(value, AI->getAllocatedType(), - CharUnits::fromQuantity(AI->getAlign().value())); - }; switch (K) { case ScalarLiteral: - return RValue::get(Value); case ScalarAddress: - return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value))); + return RValue::get(DominatingLLVMValue::restore(CGF, Vals.first)); case AggregateLiteral: + case AggregateAddress: return RValue::getAggregate( - Address(Value, ElementType, CharUnits::fromQuantity(Align))); - case AggregateAddress: { - auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value)); - return RValue::getAggregate( - Address(addr, ElementType, CharUnits::fromQuantity(Align))); - } + DominatingValue
::restore(CGF, AggregateAddr), IsVolatile); case ComplexAddress: { - Address address = getSavingAddress(Value); - llvm::Value *real = - CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0)); - llvm::Value *imag = - CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1)); + llvm::Value *real = DominatingLLVMValue::restore(CGF, Vals.first); + llvm::Value *imag = DominatingLLVMValue::restore(CGF, Vals.second); return RValue::getComplex(real, imag); } } @@ -294,14 +265,14 @@ void EHScopeStack::popNullFixups() { BranchFixups.pop_back(); } -Address CodeGenFunction::createCleanupActiveFlag() { +RawAddress CodeGenFunction::createCleanupActiveFlag() { // Create a variable to decide whether the cleanup needs to be run. - Address active = CreateTempAllocaWithoutCast( + RawAddress active = CreateTempAllocaWithoutCast( Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond"); // Initialize it to false at a site that's guaranteed to be run // before each evaluation. - setBeforeOutermostConditional(Builder.getFalse(), active); + setBeforeOutermostConditional(Builder.getFalse(), active, *this); // Initialize it to true at the current location. Builder.CreateStore(Builder.getTrue(), active); @@ -309,7 +280,7 @@ Address CodeGenFunction::createCleanupActiveFlag() { return active; } -void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { +void CodeGenFunction::initFullExprCleanupWithFlag(RawAddress ActiveFlag) { // Set that as the active flag in the cleanup. EHCleanupScope &cleanup = cast(*EHStack.begin()); assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); @@ -322,15 +293,17 @@ void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { void EHScopeStack::Cleanup::anchor() {} static void createStoreInstBefore(llvm::Value *value, Address addr, - llvm::Instruction *beforeInst) { - auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); + llvm::Instruction *beforeInst, + CodeGenFunction &CGF) { + auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF), beforeInst); store->setAlignment(addr.getAlignment().getAsAlign()); } static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, - llvm::Instruction *beforeInst) { - return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name, - false, addr.getAlignment().getAsAlign(), + llvm::Instruction *beforeInst, + CodeGenFunction &CGF) { + return new llvm::LoadInst(addr.getElementType(), addr.emitRawPointer(CGF), + name, false, addr.getAlignment().getAsAlign(), beforeInst); } @@ -357,8 +330,8 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF, // entry which we're currently popping. if (Fixup.OptimisticBranchBlock == nullptr) { createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex), - CGF.getNormalCleanupDestSlot(), - Fixup.InitialBranch); + CGF.getNormalCleanupDestSlot(), Fixup.InitialBranch, + CGF); Fixup.InitialBranch->setSuccessor(0, CleanupEntry); } @@ -385,7 +358,7 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, if (llvm::BranchInst *Br = dyn_cast(Term)) { assert(Br->isUnconditional()); auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(), - "cleanup.dest", Term); + "cleanup.dest", Term, CGF); llvm::SwitchInst *Switch = llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); Br->eraseFromParent(); @@ -513,8 +486,8 @@ void CodeGenFunction::PopCleanupBlocks( I += Header.getSize(); if (Header.isConditional()) { - Address ActiveFlag = - reinterpret_cast
(LifetimeExtendedCleanupStack[I]); + RawAddress ActiveFlag = + reinterpret_cast(LifetimeExtendedCleanupStack[I]); initFullExprCleanupWithFlag(ActiveFlag); I += sizeof(ActiveFlag); } @@ -694,8 +667,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); - bool HasFallthrough = - FallthroughSource != nullptr && (IsActive || HasExistingBranches); + bool HasFallthrough = (FallthroughSource != nullptr && IsActive); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -720,11 +692,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // If we have a prebranched fallthrough into an inactive normal // cleanup, rewrite it so that it leads to the appropriate place. - if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && - !RequiresNormalCleanup) { - // FIXME: Come up with a program which would need forwarding prebranched - // fallthrough and add tests. Otherwise delete this and assert against it. - assert(!IsActive); + if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { llvm::BasicBlock *prebranchDest; // If the prebranch is semantically branching through the next @@ -797,7 +765,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { EmitSehCppScopeEnd(); } destroyOptimisticNormalEntry(*this, Scope); - Scope.MarkEmitted(); EHStack.popCleanup(); } else { // If we have a fallthrough and no other need for the cleanup, @@ -814,7 +781,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } destroyOptimisticNormalEntry(*this, Scope); - Scope.MarkEmitted(); EHStack.popCleanup(); EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); @@ -895,7 +861,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (NormalCleanupDestSlot->hasOneUse()) { NormalCleanupDestSlot->user_back()->eraseFromParent(); NormalCleanupDestSlot->eraseFromParent(); - NormalCleanupDest = Address::invalid(); + NormalCleanupDest = RawAddress::invalid(); } llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); @@ -919,9 +885,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // pass the abnormal exit flag to Fn (SEH cleanup) cleanupFlags.setHasExitSwitch(); - llvm::LoadInst *Load = - createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest", - nullptr); + llvm::LoadInst *Load = createLoadInstBefore( + getNormalCleanupDestSlot(), "cleanup.dest", nullptr, *this); llvm::SwitchInst *Switch = llvm::SwitchInst::Create(Load, Default, SwitchCapacity); @@ -951,7 +916,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } // IV. Pop the cleanup and emit it. - Scope.MarkEmitted(); EHStack.popCleanup(); assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); @@ -969,8 +933,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { if (!Fixup.Destination) continue; if (!Fixup.OptimisticBranchBlock) { createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex), - getNormalCleanupDestSlot(), - Fixup.InitialBranch); + getNormalCleanupDestSlot(), Fixup.InitialBranch, + *this); Fixup.InitialBranch->setSuccessor(0, NormalEntry); } Fixup.OptimisticBranchBlock = NormalExit; @@ -1143,7 +1107,7 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { // Store the index at the start. llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); - createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI); + createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI, *this); // Adjust BI to point to the first cleanup block. { @@ -1277,9 +1241,9 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF, // If we're in a conditional block, ignore the dominating IP and // use the outermost conditional branch. if (CGF.isInConditionalBranch()) { - CGF.setBeforeOutermostConditional(value, var); + CGF.setBeforeOutermostConditional(value, var, CGF); } else { - createStoreInstBefore(value, var, dominatingIP); + createStoreInstBefore(value, var, dominatingIP, CGF); } } @@ -1329,7 +1293,7 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, Scope.setActive(false); } -Address CodeGenFunction::getNormalCleanupDestSlot() { +RawAddress CodeGenFunction::getNormalCleanupDestSlot() { if (!NormalCleanupDest.isValid()) NormalCleanupDest = CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h index 49364cf58de9a..03e4a29d7b3db 100644 --- a/clang/lib/CodeGen/CGCleanup.h +++ b/clang/lib/CodeGen/CGCleanup.h @@ -16,11 +16,8 @@ #include "EHScopeStack.h" #include "Address.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/IR/Instruction.h" namespace llvm { class BasicBlock; @@ -269,51 +266,6 @@ class alignas(8) EHCleanupScope : public EHScope { }; mutable struct ExtInfo *ExtInfo; - /// Erases auxillary allocas and their usages for an unused cleanup. - /// Cleanups should mark these allocas as 'used' if the cleanup is - /// emitted, otherwise these instructions would be erased. - struct AuxillaryAllocas { - SmallVector AuxAllocas; - bool used = false; - - // Records a potentially unused instruction to be erased later. - void Add(llvm::AllocaInst *Alloca) { AuxAllocas.push_back(Alloca); } - - // Mark all recorded instructions as used. These will not be erased later. - void MarkUsed() { - used = true; - AuxAllocas.clear(); - } - - ~AuxillaryAllocas() { - if (used) - return; - llvm::SetVector Uses; - for (auto *Inst : llvm::reverse(AuxAllocas)) - CollectUses(Inst, Uses); - // Delete uses in the reverse order of insertion. - for (auto *I : llvm::reverse(Uses)) - I->eraseFromParent(); - } - - private: - void CollectUses(llvm::Instruction *I, - llvm::SetVector &Uses) { - if (!I || !Uses.insert(I)) - return; - for (auto *User : I->users()) - CollectUses(cast(User), Uses); - } - }; - mutable struct AuxillaryAllocas *AuxAllocas; - - AuxillaryAllocas &getAuxillaryAllocas() { - if (!AuxAllocas) { - AuxAllocas = new struct AuxillaryAllocas(); - } - return *AuxAllocas; - } - /// The number of fixups required by enclosing scopes (not including /// this one). If this is the top cleanup scope, all the fixups /// from this index onwards belong to this scope. @@ -346,7 +298,7 @@ class alignas(8) EHCleanupScope : public EHScope { EHScopeStack::stable_iterator enclosingEH) : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal), NormalBlock(nullptr), - ActiveFlag(Address::invalid()), ExtInfo(nullptr), AuxAllocas(nullptr), + ActiveFlag(Address::invalid()), ExtInfo(nullptr), FixupDepth(fixupDepth) { CleanupBits.IsNormalCleanup = isNormal; CleanupBits.IsEHCleanup = isEH; @@ -360,15 +312,8 @@ class alignas(8) EHCleanupScope : public EHScope { } void Destroy() { - if (AuxAllocas) - delete AuxAllocas; delete ExtInfo; } - void AddAuxAllocas(llvm::SmallVector Allocas) { - for (auto *Alloca : Allocas) - getAuxillaryAllocas().Add(Alloca); - } - void MarkEmitted() { getAuxillaryAllocas().MarkUsed(); } // Objects of EHCleanupScope are not destructed. Use Destroy(). ~EHCleanupScope() = delete; @@ -388,7 +333,7 @@ class alignas(8) EHCleanupScope : public EHScope { Address getActiveFlag() const { return ActiveFlag; } - void setActiveFlag(Address Var) { + void setActiveFlag(RawAddress Var) { assert(Var.getAlignment().isOne()); ActiveFlag = Var; } diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp index b7142ec08af98..93ca711f716fc 100644 --- a/clang/lib/CodeGen/CGCoroutine.cpp +++ b/clang/lib/CodeGen/CGCoroutine.cpp @@ -867,8 +867,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) { EmitStmt(S.getPromiseDeclStmt()); Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl()); - auto *PromiseAddrVoidPtr = - new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId); + auto *PromiseAddrVoidPtr = new llvm::BitCastInst( + PromiseAddr.emitRawPointer(*this), VoidPtrTy, "", CoroId); // Update CoroId to refer to the promise. We could not do it earlier because // promise local variable was not emitted yet. CoroId->setArgOperand(1, PromiseAddrVoidPtr); diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 6a7b54942eaaf..4dbecebb70029 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -20,7 +20,6 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" -#include "EHScopeStack.h" #include "PatternInit.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" @@ -1420,7 +1419,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions( // For each dimension stores its QualType and corresponding // size-expression Value. SmallVector Dimensions; - SmallVector VLAExprNames; + SmallVector VLAExprNames; // Break down the array into individual dimensions. QualType Type1D = D.getType(); @@ -1457,7 +1456,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions( MD = llvm::ConstantAsMetadata::get(C); else { // Create an artificial VarDecl to generate debug info for. - IdentifierInfo *NameIdent = VLAExprNames[NameIdx++]; + const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++]; auto QT = getContext().getIntTypeForBitwidth( SizeTy->getScalarSizeInBits(), false); auto *ArtificialDecl = VarDecl::Create( @@ -1498,7 +1497,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo(); Address address = Address::invalid(); - Address AllocaAddr = Address::invalid(); + RawAddress AllocaAddr = RawAddress::invalid(); Address OpenMPLocalAddr = Address::invalid(); if (CGM.getLangOpts().OpenMPIRBuilder) OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D); @@ -1561,7 +1560,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { // return slot, so that we can elide the copy when returning this // variable (C++0x [class.copy]p34). address = ReturnValue; - AllocaAddr = ReturnValue; + AllocaAddr = + RawAddress(ReturnValue.emitRawPointer(*this), + ReturnValue.getElementType(), ReturnValue.getAlignment()); + ; if (const RecordType *RecordTy = Ty->getAs()) { const auto *RD = RecordTy->getDecl(); @@ -1572,7 +1574,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { // to this variable. Set it to zero to indicate that NRVO was not // applied. llvm::Value *Zero = Builder.getFalse(); - Address NRVOFlag = + RawAddress NRVOFlag = CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo"); EnsureInsertPoint(); Builder.CreateStore(Zero, NRVOFlag); @@ -1719,7 +1721,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { SmallString<256> AnnotStr; CGM.generateIntelFPGAAnnotation(&D, AnnotStr); if (!AnnotStr.empty()) { - llvm::Value *V = address.getPointer(); + llvm::Value *V = address.emitRawPointer(*this); llvm::Type *DestPtrTy = llvm::PointerType::get( CGM.getLLVMContext(), address.getAddressSpace()); llvm::Value *Arg = Builder.CreateBitCast(V, DestPtrTy, V->getName()); @@ -1733,7 +1735,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { } if (D.hasAttr() && HaveInsertPoint()) - EmitVarAnnotations(&D, address.getPointer()); + EmitVarAnnotations(&D, address.emitRawPointer(*this)); // Make sure we call @llvm.lifetime.end. if (emission.useLifetimeMarkers()) @@ -1906,12 +1908,13 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type, llvm::Value *BaseSizeInChars = llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity()); Address Begin = Loc.withElementType(Int8Ty); - llvm::Value *End = Builder.CreateInBoundsGEP( - Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end"); + llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(), + Begin.emitRawPointer(*this), + SizeVal, "vla.end"); llvm::BasicBlock *OriginBB = Builder.GetInsertBlock(); EmitBlock(LoopBB); llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur"); - Cur->addIncoming(Begin.getPointer(), OriginBB); + Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB); CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize); auto *I = Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign), @@ -2252,24 +2255,6 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, destroyer, useEHCleanupForArray); } -// Pushes a destroy and defers its deactivation until its -// CleanupDeactivationScope is exited. -void CodeGenFunction::pushDestroyAndDeferDeactivation( - QualType::DestructionKind dtorKind, Address addr, QualType type) { - assert(dtorKind && "cannot push destructor for trivial type"); - - CleanupKind cleanupKind = getCleanupKind(dtorKind); - pushDestroyAndDeferDeactivation( - cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup); -} - -void CodeGenFunction::pushDestroyAndDeferDeactivation( - CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, - bool useEHCleanupForArray) { - pushCleanupAndDeferDeactivation( - cleanupKind, addr, type, destroyer, useEHCleanupForArray); -} - void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { EHStack.pushCleanup(Kind, SPMem); } @@ -2286,19 +2271,16 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, // If we're not in a conditional branch, we don't need to bother generating a // conditional cleanup. if (!isInConditionalBranch()) { + // Push an EH-only cleanup for the object now. // FIXME: When popping normal cleanups, we need to keep this EH cleanup // around in case a temporary's destructor throws an exception. + if (cleanupKind & EHCleanup) + EHStack.pushCleanup( + static_cast(cleanupKind & ~NormalCleanup), addr, type, + destroyer, useEHCleanupForArray); - // Add the cleanup to the EHStack. After the full-expr, this would be - // deactivated before being popped from the stack. - pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer, - useEHCleanupForArray); - - // Since this is lifetime-extended, push it once again to the EHStack after - // the full expression. return pushCleanupAfterFullExprWithActiveFlag( - cleanupKind, Address::invalid(), addr, type, destroyer, - useEHCleanupForArray); + cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray); } // Otherwise, we should only destroy the object if it's been initialized. @@ -2313,12 +2295,13 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, Address ActiveFlag = createCleanupActiveFlag(); SavedType SavedAddr = saveValueInCond(addr); - pushCleanupAndDeferDeactivation( - cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray); - initFullExprCleanupWithFlag(ActiveFlag); + if (cleanupKind & EHCleanup) { + EHStack.pushCleanup( + static_cast(cleanupKind & ~NormalCleanup), SavedAddr, type, + destroyer, useEHCleanupForArray); + initFullExprCleanupWithFlag(ActiveFlag); + } - // Since this is lifetime-extended, push it once again to the EHStack after - // the full expression. pushCleanupAfterFullExprWithActiveFlag( cleanupKind, ActiveFlag, SavedAddr, type, destroyer, useEHCleanupForArray); @@ -2358,7 +2341,7 @@ void CodeGenFunction::emitDestroy(Address addr, QualType type, checkZeroLength = false; } - llvm::Value *begin = addr.getPointer(); + llvm::Value *begin = addr.emitRawPointer(*this); llvm::Value *end = Builder.CreateInBoundsGEP(addr.getElementType(), begin, length); emitArrayDestroy(begin, end, type, elementAlign, destroyer, @@ -2513,9 +2496,9 @@ namespace { }; } // end anonymous namespace -/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to -/// destroy already-constructed elements of the given array. The cleanup may be -/// popped with DeactivateCleanupBlock or PopCleanupBlock. +/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy +/// already-constructed elements of the given array. The cleanup +/// may be popped with DeactivateCleanupBlock or PopCleanupBlock. /// /// \param elementType - the immediate element type of the array; /// possibly still an array type @@ -2524,9 +2507,10 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, QualType elementType, CharUnits elementAlign, Destroyer *destroyer) { - pushFullExprCleanup( - NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType, - elementAlign, destroyer); + pushFullExprCleanup(EHCleanup, + arrayBegin, arrayEndPointer, + elementType, elementAlign, + destroyer); } /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy @@ -2617,7 +2601,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, } Address DeclPtr = Address::invalid(); - Address AllocaPtr = Address::invalid(); + RawAddress AllocaPtr = Address::invalid(); bool DoStore = false; bool IsScalar = hasScalarEvaluationKind(Ty); bool UseIndirectDebugAddress = false; @@ -2629,8 +2613,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, // Indirect argument is in alloca address space, which may be different // from the default address space. auto AllocaAS = CGM.getASTAllocaAddressSpace(); - auto *V = DeclPtr.getPointer(); - AllocaPtr = DeclPtr; + auto *V = DeclPtr.emitRawPointer(*this); + AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment()); auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS; auto DestLangAS = @@ -2655,7 +2639,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, auto PtrTy = getContext().getPointerType(Ty); AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy), D.getName() + ".indirect_addr"); - EmitStoreOfScalar(DeclPtr.getPointer(), AllocaPtr, /* Volatile */ false, + EmitStoreOfScalar(DeclPtr.emitRawPointer(*this), AllocaPtr, /* Volatile */ false, PtrTy); } @@ -2770,7 +2754,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, } if (D.hasAttr()) - EmitVarAnnotations(&D, DeclPtr.getPointer()); + EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this)); // We can only check return value nullability if all arguments to the // function satisfy their nullability preconditions. This makes it necessary diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp index f222b6e7a43c6..4991fdafff45a 100644 --- a/clang/lib/CodeGen/CGException.cpp +++ b/clang/lib/CodeGen/CGException.cpp @@ -397,7 +397,7 @@ namespace { void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) { // Make sure the exception object is cleaned up if there's an // exception during initialization. - pushFullExprCleanup(EHCleanup, addr.getPointer()); + pushFullExprCleanup(EHCleanup, addr.emitRawPointer(*this)); EHScopeStack::stable_iterator cleanup = EHStack.stable_begin(); // __cxa_allocate_exception returns a void*; we need to cast this @@ -416,8 +416,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) { /*IsInit*/ true); // Deactivate the cleanup block. - DeactivateCleanupBlock(cleanup, - cast(typedAddr.getPointer())); + DeactivateCleanupBlock( + cleanup, cast(typedAddr.emitRawPointer(*this))); } Address CodeGenFunction::getExceptionSlot() { @@ -1834,7 +1834,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, llvm::Value *ParentFP) { llvm::CallInst *RecoverCall = nullptr; CGBuilderTy Builder(*this, AllocaInsertPt); - if (auto *ParentAlloca = dyn_cast(ParentVar.getPointer())) { + if (auto *ParentAlloca = + dyn_cast_or_null(ParentVar.getBasePointer())) { // Mark the variable escaped if nobody else referenced it and compute the // localescape index. auto InsertPair = ParentCGF.EscapedLocals.insert( @@ -1851,8 +1852,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, // If the parent didn't have an alloca, we're doing some nested outlining. // Just clone the existing localrecover call, but tweak the FP argument to // use our FP value. All other arguments are constants. - auto *ParentRecover = - cast(ParentVar.getPointer()->stripPointerCasts()); + auto *ParentRecover = cast( + ParentVar.emitRawPointer(*this)->stripPointerCasts()); assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover && "expected alloca or localrecover in parent LocalDeclMap"); RecoverCall = cast(ParentRecover->clone()); @@ -1925,7 +1926,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF, if (isa(D) && D->getType() == getContext().VoidPtrTy) { assert(D->getName().starts_with("frame_pointer")); - FramePtrAddrAlloca = cast(I.second.getPointer()); + FramePtrAddrAlloca = + cast(I.second.getBasePointer()); break; } } @@ -1986,7 +1988,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF, LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); if (!LambdaThisCaptureField->getType()->isPointerType()) { - CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); + CXXThisValue = + ThisFieldLValue.getAddress(*this).emitRawPointer(*this); } else { CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation()) .getScalarVal(); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 9ed840b81af0b..08e424e86e542 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -71,21 +71,21 @@ static llvm::cl::opt ClSanitizeGuardChecks( /// CreateTempAlloca - This creates a alloca and inserts it into the entry /// block. -Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, - CharUnits Align, - const Twine &Name, - llvm::Value *ArraySize) { +RawAddress +CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, + const Twine &Name, + llvm::Value *ArraySize) { auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); Alloca->setAlignment(Align.getAsAlign()); - return Address(Alloca, Ty, Align, KnownNonNull); + return RawAddress(Alloca, Ty, Align, KnownNonNull); } /// CreateTempAlloca - This creates a alloca and inserts it into the entry /// block. The alloca is casted to default address space if necessary. -Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, - const Twine &Name, - llvm::Value *ArraySize, - Address *AllocaAddr) { +RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, + const Twine &Name, + llvm::Value *ArraySize, + RawAddress *AllocaAddr) { auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); if (AllocaAddr) *AllocaAddr = Alloca; @@ -107,7 +107,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); } - return Address(V, Ty, Align, KnownNonNull); + return RawAddress(V, Ty, Align, KnownNonNull); } /// CreateTempAlloca - This creates an alloca and inserts it into the entry @@ -116,44 +116,39 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, const Twine &Name, llvm::Value *ArraySize) { - llvm::AllocaInst *Alloca; if (ArraySize) - Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); - else - Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), - ArraySize, Name, AllocaInsertPt); - if (Allocas) { - Allocas->Add(Alloca); - } - return Alloca; + return Builder.CreateAlloca(Ty, ArraySize, Name); + return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), + ArraySize, Name, AllocaInsertPt); } /// CreateDefaultAlignTempAlloca - This creates an alloca with the /// default alignment of the corresponding LLVM type, which is *not* /// guaranteed to be related in any way to the expected alignment of /// an AST type that might have been lowered to Ty. -Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, - const Twine &Name) { +RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, + const Twine &Name) { CharUnits Align = CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty)); return CreateTempAlloca(Ty, Align, Name); } -Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { +RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { CharUnits Align = getContext().getTypeAlignInChars(Ty); return CreateTempAlloca(ConvertType(Ty), Align, Name); } -Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, - Address *Alloca) { +RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, + RawAddress *Alloca) { // FIXME: Should we prefer the preferred type alignment here? return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); } -Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, - const Twine &Name, Address *Alloca) { - Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, - /*ArraySize=*/nullptr, Alloca); +RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, + const Twine &Name, + RawAddress *Alloca) { + RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, + /*ArraySize=*/nullptr, Alloca); if (Ty->isConstantMatrixType()) { auto *ArrayTy = cast(Result.getElementType()); @@ -166,13 +161,14 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, return Result; } -Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, - const Twine &Name) { +RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, + CharUnits Align, + const Twine &Name) { return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); } -Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, - const Twine &Name) { +RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, + const Twine &Name) { return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), Name); } @@ -371,7 +367,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, } else { CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); - CleanupArg = cast(ReferenceTemporary.getPointer()); + CleanupArg = cast(ReferenceTemporary.emitRawPointer(CGF)); } CGF.CGM.getCXXABI().registerGlobalDtor( CGF, *cast(M->getExtendingDecl()), CleanupFn, CleanupArg); @@ -396,10 +392,10 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, } } -static Address createReferenceTemporary(CodeGenFunction &CGF, - const MaterializeTemporaryExpr *M, - const Expr *Inner, - Address *Alloca = nullptr) { +static RawAddress createReferenceTemporary(CodeGenFunction &CGF, + const MaterializeTemporaryExpr *M, + const Expr *Inner, + RawAddress *Alloca = nullptr) { auto &TCG = CGF.getTargetHooks(); switch (M->getStorageDuration()) { case SD_FullExpression: @@ -428,7 +424,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF, GV->getValueType()->getPointerTo( CGF.getContext().getTargetAddressSpace(LangAS::Default))); // FIXME: Should we put the new global into a COMDAT? - return Address(C, GV->getValueType(), alignment); + return RawAddress(C, GV->getValueType(), alignment); } return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); } @@ -460,7 +456,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { auto ownership = M->getType().getObjCLifetime(); if (ownership != Qualifiers::OCL_None && ownership != Qualifiers::OCL_ExplicitNone) { - Address Object = createReferenceTemporary(*this, M, E); + RawAddress Object = createReferenceTemporary(*this, M, E); if (auto *Var = dyn_cast(Object.getPointer())) { llvm::Type *Ty = ConvertTypeForMem(E->getType()); Object = Object.withElementType(Ty); @@ -514,8 +510,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { } // Create and initialize the reference temporary. - Address Alloca = Address::invalid(); - Address Object = createReferenceTemporary(*this, M, E, &Alloca); + RawAddress Alloca = Address::invalid(); + RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca); if (auto *Var = dyn_cast( Object.getPointer()->stripPointerCasts())) { llvm::Type *TemporaryType = ConvertTypeForMem(E->getType()); @@ -1123,12 +1119,12 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr( } else if (const MemberExpr *ME = dyn_cast(StructBase)) { LValue LV = EmitMemberExpr(ME); Address Addr = LV.getAddress(*this); - Res = Addr.getPointer(); + Res = Addr.emitRawPointer(*this); } else if (StructBase->getType()->isPointerType()) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo); - Res = Addr.getPointer(); + Res = Addr.emitRawPointer(*this); } else { return nullptr; } @@ -1294,8 +1290,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { if (BaseInfo) BaseInfo->mergeForCast(TargetTypeBaseInfo); - Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, - IsKnownNonNull); + Addr.setAlignment(Align); } } @@ -1312,8 +1307,8 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, CGF.ConvertTypeForMem(E->getType()->getPointeeType()); Addr = Addr.withElementType(ElemTy); if (CE->getCastKind() == CK_AddressSpaceConversion) - Addr = CGF.Builder.CreateAddrSpaceCast(Addr, - CGF.ConvertType(E->getType())); + Addr = CGF.Builder.CreateAddrSpaceCast( + Addr, CGF.ConvertType(E->getType()), ElemTy); return Addr; } break; @@ -1376,10 +1371,9 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, // TODO: conditional operators, comma. // Otherwise, use the alignment of the type. - CharUnits Align = - CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); - llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType()); - return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull); + return CGF.makeNaturalAddressForPointer( + CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(), + /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); } /// EmitPointerWithAlignment - Given an expression of pointer type, try to @@ -1480,8 +1474,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { if (IsBaseCXXThis || isa(ME->getBase())) SkippedChecks.set(SanitizerKind::Null, true); } - EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), - LV.getAlignment(), SkippedChecks); + EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks); } return LV; } @@ -1593,11 +1586,11 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E, // Defend against branches out of gnu statement expressions surrounded by // cleanups. Address Addr = LV.getAddress(*this); - llvm::Value *V = Addr.getPointer(); + llvm::Value *V = Addr.getBasePointer(); Scope.ForceCleanup({&V}); - return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()), - LV.getType(), getContext(), LV.getBaseInfo(), - LV.getTBAAInfo()); + Addr.replaceBasePointer(V); + return LValue::MakeAddr(Addr, LV.getType(), getContext(), + LV.getBaseInfo(), LV.getTBAAInfo()); } // FIXME: Is it possible to create an ExprWithCleanups that produces a // bitfield lvalue or some other non-simple lvalue? @@ -1941,7 +1934,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal) { - if (auto *GV = dyn_cast(Addr.getPointer())) + if (auto *GV = dyn_cast(Addr.getBasePointer())) if (GV->isThreadLocal()) Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), NotKnownNonNull); @@ -2051,8 +2044,9 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { // Convert the pointer of \p Addr to a pointer to a vector (the value type of // MatrixType), if it points to a array (the memory type of MatrixType). -static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF, - bool IsVector = true) { +static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, + CodeGenFunction &CGF, + bool IsVector = true) { auto *ArrayTy = dyn_cast(Addr.getElementType()); if (ArrayTy && IsVector) { auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), @@ -2089,7 +2083,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit, bool isNontemporal) { - if (auto *GV = dyn_cast(Addr.getPointer())) + if (auto *GV = dyn_cast(Addr.getBasePointer())) if (GV->isThreadLocal()) Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), NotKnownNonNull); @@ -2444,14 +2438,12 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); llvm::Type *ResultType = IntPtrTy; Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); - llvm::Value *RHS = dst.getPointer(); + llvm::Value *RHS = dst.emitRawPointer(*this); RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); - llvm::Value *LHS = - Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, - "sub.ptr.lhs.cast"); + llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this), + ResultType, "sub.ptr.lhs.cast"); llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); - CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, - BytesBetween); + CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween); } else if (Dst.isGlobalObjCRef()) { CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, Dst.isThreadLocalRef()); @@ -2782,12 +2774,9 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal, llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); - - QualType PointeeType = RefLVal.getType()->getPointeeType(); - CharUnits Align = CGM.getNaturalTypeAlignment( - PointeeType, PointeeBaseInfo, PointeeTBAAInfo, - /* forPointeeType= */ true); - return Address(Load, ConvertTypeForMem(PointeeType), Align); + return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(), + CharUnits(), /*ForPointeeType=*/true, + PointeeBaseInfo, PointeeTBAAInfo); } LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { @@ -2804,10 +2793,9 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { llvm::Value *Addr = Builder.CreateLoad(Ptr); - return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()), - CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo, - TBAAInfo, - /*forPointeeType=*/true)); + return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(), + CharUnits(), /*ForPointeeType=*/true, + BaseInfo, TBAAInfo); } LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, @@ -3003,7 +2991,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { /* BaseInfo= */ nullptr, /* TBAAInfo= */ nullptr, /* forPointeeType= */ true); - Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment); + Addr = makeNaturalAddressForPointer(Val, T, Alignment); } return MakeAddrLValue(Addr, T, AlignmentSource::Decl); } @@ -3035,11 +3023,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), CapturedStmtInfo->getContextValue()); Address LValueAddress = CapLVal.getAddress(*this); - CapLVal = MakeAddrLValue( - Address(LValueAddress.getPointer(), LValueAddress.getElementType(), - getContext().getDeclAlign(VD)), - CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), - CapLVal.getTBAAInfo()); + CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this), + LValueAddress.getElementType(), + getContext().getDeclAlign(VD)), + CapLVal.getType(), + LValueBaseInfo(AlignmentSource::Decl), + CapLVal.getTBAAInfo()); // Mark lvalue as nontemporal if the variable is marked as nontemporal // in simd context. if (getLangOpts().OpenMP && @@ -3103,7 +3092,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { // Handle threadlocal function locals. if (VD->getTLSKind() != VarDecl::TLS_None) addr = addr.withPointer( - Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull); + Builder.CreateThreadLocalAddress(addr.getBasePointer()), + NotKnownNonNull); // Check for OpenMP threadprivate variables. if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && @@ -3371,7 +3361,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { // Pointers are passed directly, everything else is passed by address. if (!V->getType()->isPointerTy()) { - Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); + RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType()); Builder.CreateStore(V, Ptr); V = Ptr.getPointer(); } @@ -3955,6 +3945,21 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, } } +static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, + ArrayRef indices, + llvm::Type *elementType, bool inbounds, + bool signedIndices, SourceLocation loc, + CharUnits align, + const llvm::Twine &name = "arrayidx") { + if (inbounds) { + return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices, + CodeGenFunction::NotSubtraction, loc, + align, name); + } else { + return CGF.Builder.CreateGEP(addr, indices, elementType, align, name); + } +} + static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize) { @@ -4012,7 +4017,7 @@ static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, llvm::Function *Fn = CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset); - llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()}); + llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)}); return Address(Call, Addr.getElementType(), Addr.getAlignment()); } @@ -4076,7 +4081,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, // We can use that to compute the best alignment of the element. CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); CharUnits eltAlign = - getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); + getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); if (hasBPFPreserveStaticOffset(Base)) addr = wrapWithBPFPreserveStaticOffset(CGF, addr); @@ -4085,20 +4090,20 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, auto LastIndex = dyn_cast(indices.back()); if (!LastIndex || (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) { - eltPtr = emitArraySubscriptGEP( - CGF, addr.getElementType(), addr.getPointer(), indices, inbounds, - signedIndices, loc, name); - AddIVDepMetadata(CGF, arrayDecl, eltPtr); + addr = emitArraySubscriptGEP(CGF, addr, indices, + CGF.ConvertTypeForMem(eltType), inbounds, + signedIndices, loc, eltAlign, name); + AddIVDepMetadata(CGF, arrayDecl, addr.emitRawPointer(CGF)); + return addr; } else { // Remember the original array subscript for bpf target unsigned idx = LastIndex->getZExtValue(); llvm::DIType *DbgInfo = nullptr; if (arrayType) DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); - eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(), - addr.getPointer(), - indices.size() - 1, - idx, DbgInfo); + eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( + addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1, + idx, DbgInfo); } return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign); @@ -4267,8 +4272,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, CharUnits EltAlign = getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); llvm::Value *EltPtr = - emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx, - false, SignedIndices, E->getExprLoc()); + emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this), + ScaledIdx, false, SignedIndices, E->getExprLoc()); Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { // If this is A[i] where A is an array, the frontend will have decayed the @@ -4320,7 +4325,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, llvm::Type *CountTy = ConvertType(CountFD->getType()); llvm::Value *Res = Builder.CreateInBoundsGEP( - Int8Ty, Addr.getPointer(), + Int8Ty, Addr.emitRawPointer(*this), Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep"); Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(), ".counted_by.load"); @@ -4575,9 +4580,9 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, BaseInfo = ArrayLV.getBaseInfo(); TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); } else { - Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, - TBAAInfo, BaseTy, ResultExprTy, - IsLowerBound); + Address Base = + emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy, + ResultExprTy, IsLowerBound); EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), /*signedIndices=*/false, E->getExprLoc()); @@ -4664,7 +4669,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(BaseExpr)) SkippedChecks.set(SanitizerKind::Null, true); - EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, + EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy, /*Alignment=*/CharUnits::Zero(), SkippedChecks); BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); } else @@ -4713,8 +4718,8 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(), AlignmentSource::Decl); else - LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(), - D->getType().getNonReferenceType()); + LambdaLV = MakeAddrLValue(AddrOfExplicitObject, + D->getType().getNonReferenceType()); } else { QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType); @@ -4735,7 +4740,7 @@ unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, for (auto *F : Rec->getDefinition()->fields()) { if (I == FieldIndex) break; - if (F->isUnnamedBitfield()) + if (F->isUnnamedBitField()) Skipped++; I++; } @@ -4904,7 +4909,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, // information provided by invariant.group. This is because accessing // fields may leak the real address of dynamic object, which could result // in miscompilation when leaked pointer would be compared. - auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); + auto *stripped = + Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this)); addr = Address(stripped, addr.getElementType(), addr.getAlignment()); } } @@ -4923,10 +4929,11 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, // Remember the original union field index llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), rec->getLocation()); - addr = Address( - Builder.CreatePreserveUnionAccessIndex( - addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), - addr.getElementType(), addr.getAlignment()); + addr = + Address(Builder.CreatePreserveUnionAccessIndex( + addr.emitRawPointer(*this), + getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), + addr.getElementType(), addr.getAlignment()); } if (FieldType->isReferenceType()) @@ -5174,11 +5181,9 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue( if (Info.LHS && Info.RHS) { Address lhsAddr = Info.LHS->getAddress(*this); Address rhsAddr = Info.RHS->getAddress(*this); - llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue"); - phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock); - phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock); - Address result(phi, lhsAddr.getElementType(), - std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment())); + Address result = mergeAddressesInConditionalExpr( + lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock, + Builder.GetInsertBlock(), expr->getType()); AlignmentSource alignSource = std::max(Info.LHS->getBaseInfo().getAlignmentSource(), Info.RHS->getBaseInfo().getAlignmentSource()); @@ -5266,7 +5271,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); Address V = LV.getAddress(*this); const auto *DCE = cast(E); - return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); + return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: @@ -5331,8 +5336,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (sanitizePerformTypeCheck()) - EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), - Derived.getPointer(), E->getType()); + EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived, + E->getType()); if (SanOpts.has(SanitizerKind::CFIDerivedCast)) EmitVTablePtrCheckForCast(E->getType(), Derived, @@ -5721,7 +5726,7 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { LValue CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { - return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); + return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType()); } Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 0b203b9decd2d..fad8d405e95c4 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -15,7 +15,6 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" -#include "EHScopeStack.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" @@ -25,7 +24,6 @@ #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" -#include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" using namespace clang; @@ -295,10 +293,10 @@ void AggExprEmitter::withReturnValueSlot( // Otherwise, EmitCall will emit its own, notice that it's "unused", and end // its lifetime before we have the chance to emit a proper destructor call. bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || - (RequiresDestruction && !Dest.getAddress().isValid()); + (RequiresDestruction && Dest.isIgnored()); Address RetAddr = Address::invalid(); - Address RetAllocaAddr = Address::invalid(); + RawAddress RetAllocaAddr = RawAddress::invalid(); EHScopeStack::stable_iterator LifetimeEndBlock; llvm::Value *LifetimeSizePtr = nullptr; @@ -330,7 +328,8 @@ void AggExprEmitter::withReturnValueSlot( if (!UseTemp) return; - assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); + assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) != + Src.getAggregatePointer(E->getType(), CGF)); EmitFinalDestCopy(E->getType(), Src); if (!RequiresDestruction && LifetimeStartInst) { @@ -449,7 +448,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); llvm::Value *IdxStart[] = { Zero, Zero }; llvm::Value *ArrayStart = Builder.CreateInBoundsGEP( - ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart"); + ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxStart, + "arraystart"); CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); ++Field; @@ -466,7 +466,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { // End pointer. llvm::Value *IdxEnd[] = { Zero, Size }; llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( - ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend"); + ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd, + "arrayend"); CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { // Length. @@ -517,9 +518,9 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, // down a level. llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); llvm::Value *indices[] = { zero, zero }; - llvm::Value *begin = Builder.CreateInBoundsGEP( - DestPtr.getElementType(), DestPtr.getPointer(), indices, - "arrayinit.begin"); + llvm::Value *begin = Builder.CreateInBoundsGEP(DestPtr.getElementType(), + DestPtr.emitRawPointer(CGF), + indices, "arrayinit.begin"); CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); CharUnits elementAlign = @@ -556,27 +557,24 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, // For that, we'll need an EH cleanup. QualType::DestructionKind dtorKind = elementType.isDestructedType(); Address endOfInit = Address::invalid(); - CodeGenFunction::CleanupDeactivationScope deactivation(CGF); - - if (dtorKind) { - CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF); + EHScopeStack::stable_iterator cleanup; + llvm::Instruction *cleanupDominator = nullptr; + if (CGF.needsEHCleanup(dtorKind)) { // In principle we could tell the cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. - llvm::Instruction *dominatingIP = - Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy)); endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), "arrayinit.endOfInit"); - Builder.CreateStore(begin, endOfInit); + cleanupDominator = Builder.CreateStore(begin, endOfInit); CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, elementAlign, CGF.getDestroyer(dtorKind)); - cast(*CGF.EHStack.find(CGF.EHStack.stable_begin())) - .AddAuxAllocas(allocaTracker.Take()); + cleanup = CGF.EHStack.stable_begin(); - CGF.DeferredDeactivationCleanupStack.push_back( - {CGF.EHStack.stable_begin(), dominatingIP}); + // Otherwise, remember that we didn't need a cleanup. + } else { + dtorKind = QualType::DK_none; } llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); @@ -672,6 +670,9 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, CGF.EmitBlock(endBB); } + + // Leave the partial-array cleanup if we entered one. + if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); } //===----------------------------------------------------------------------===// @@ -1063,7 +1064,7 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { if (RV.isScalar()) return {RV.getScalarVal(), nullptr}; if (RV.isAggregate()) - return {RV.getAggregatePointer(), nullptr}; + return {RV.getAggregatePointer(E->getType(), CGF), nullptr}; assert(RV.isComplex()); return RV.getComplexVal(); }; @@ -1372,8 +1373,9 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); // We'll need to enter cleanup scopes in case any of the element - // initializers throws an exception or contains branch out of the expressions. - CodeGenFunction::CleanupDeactivationScope scope(CGF); + // initializers throws an exception. + SmallVector Cleanups; + llvm::Instruction *CleanupDominator = nullptr; CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), @@ -1392,12 +1394,28 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { if (QualType::DestructionKind DtorKind = CurField->getType().isDestructedType()) { assert(LV.isSimple()); - if (DtorKind) - CGF.pushDestroyAndDeferDeactivation( - NormalAndEHCleanup, LV.getAddress(CGF), CurField->getType(), - CGF.getDestroyer(DtorKind), false); + if (CGF.needsEHCleanup(DtorKind)) { + if (!CleanupDominator) + CleanupDominator = CGF.Builder.CreateAlignedLoad( + CGF.Int8Ty, + llvm::Constant::getNullValue(CGF.Int8PtrTy), + CharUnits::One()); // placeholder + + CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), + CGF.getDestroyer(DtorKind), false); + Cleanups.push_back(CGF.EHStack.stable_begin()); + } } } + + // Deactivate all the partial cleanups in reverse order, which + // generally means popping them. + for (unsigned i = Cleanups.size(); i != 0; --i) + CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); + + // Destroy the placeholder if we made one. + if (CleanupDominator) + CleanupDominator->eraseFromParent(); } void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { @@ -1686,7 +1704,14 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // We'll need to enter cleanup scopes in case any of the element // initializers throws an exception. SmallVector cleanups; - CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF); + llvm::Instruction *cleanupDominator = nullptr; + auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { + cleanups.push_back(cleanup); + if (!cleanupDominator) // create placeholder once needed + cleanupDominator = CGF.Builder.CreateAlignedLoad( + CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), + CharUnits::One()); + }; unsigned curInitIndex = 0; @@ -1709,8 +1734,10 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot); if (QualType::DestructionKind dtorKind = - Base.getType().isDestructedType()) - CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType()); + Base.getType().isDestructedType()) { + CGF.pushDestroy(dtorKind, V, Base.getType()); + addCleanup(CGF.EHStack.stable_begin()); + } } } @@ -1727,7 +1754,9 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Make sure that it's really an empty and not a failure of // semantic analysis. for (const auto *Field : record->fields()) - assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed"); + assert( + (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) && + "Only unnamed bitfields or ananymous class allowed"); #endif return; } @@ -1755,7 +1784,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( break; // Always skip anonymous bitfields. - if (field->isUnnamedBitfield()) + if (field->isUnnamedBitField()) continue; // We're done if we reach the end of the explicit initializers, we @@ -1785,10 +1814,10 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( if (QualType::DestructionKind dtorKind = field->getType().isDestructedType()) { assert(LV.isSimple()); - if (dtorKind) { - CGF.pushDestroyAndDeferDeactivation( - NormalAndEHCleanup, LV.getAddress(CGF), field->getType(), - CGF.getDestroyer(dtorKind), false); + if (CGF.needsEHCleanup(dtorKind)) { + CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), + CGF.getDestroyer(dtorKind), false); + addCleanup(CGF.EHStack.stable_begin()); pushedCleanup = true; } } @@ -1797,10 +1826,21 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // else, clean it up for -O0 builds and general tidiness. if (!pushedCleanup && LV.isSimple()) if (llvm::GetElementPtrInst *GEP = - dyn_cast(LV.getPointer(CGF))) + dyn_cast(LV.emitRawPointer(CGF))) if (GEP->use_empty()) GEP->eraseFromParent(); } + + // Deactivate all the partial cleanups in reverse order, which + // generally means popping them. + assert((cleanupDominator || cleanups.empty()) && + "Missing cleanupDominator before deactivating cleanup blocks"); + for (unsigned i = cleanups.size(); i != 0; --i) + CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); + + // Destroy the placeholder if we made one. + if (cleanupDominator) + cleanupDominator->eraseFromParent(); } void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, @@ -1817,9 +1857,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, // destPtr is an array*. Construct an elementType* by drilling down a level. llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); llvm::Value *indices[] = {zero, zero}; - llvm::Value *begin = Builder.CreateInBoundsGEP( - destPtr.getElementType(), destPtr.getPointer(), indices, - "arrayinit.begin"); + llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(), + destPtr.emitRawPointer(CGF), + indices, "arrayinit.begin"); // Prepare to special-case multidimensional array initialization: we avoid // emitting multiple destructor loops in that case. @@ -1949,7 +1989,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { if (Field->getType()->isIncompleteArrayType() || ILEElement == ILE->getNumInits()) break; - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; const Expr *E = ILE->getInit(ILEElement++); diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 019b828850c70..673ccef84d678 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -280,7 +280,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); - This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo); + This = MakeAddrLValue(ThisValue, Base->getType()->getPointeeType(), + BaseInfo, TBAAInfo); } else { This = EmitLValue(Base); } @@ -353,10 +354,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( if (IsImplicitObjectCXXThis || isa(IOA)) SkippedChecks.set(SanitizerKind::Null, true); } - EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, - This.getPointer(*this), - C.getRecordType(CalleeDecl->getParent()), - /*Alignment=*/CharUnits::Zero(), SkippedChecks); + + if (sanitizePerformTypeCheck()) + EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, + This.emitRawPointer(*this), + C.getRecordType(CalleeDecl->getParent()), + /*Alignment=*/CharUnits::Zero(), SkippedChecks); // C++ [class.virtual]p12: // Explicit qualification with the scope operator (5.1) suppresses the @@ -455,7 +458,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, else This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this); - EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(), + EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this), QualType(MPT->getClass(), 0)); // Get the member function pointer. @@ -1005,8 +1008,8 @@ void CodeGenFunction::EmitNewArrayInitializer( const Expr *Init = E->getInitializer(); Address EndOfInit = Address::invalid(); QualType::DestructionKind DtorKind = ElementType.isDestructedType(); - CleanupDeactivationScope deactivation(*this); - bool pushedCleanup = false; + EHScopeStack::stable_iterator Cleanup; + llvm::Instruction *CleanupDominator = nullptr; CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); CharUnits ElementAlign = @@ -1102,24 +1105,19 @@ void CodeGenFunction::EmitNewArrayInitializer( } // Enter a partial-destruction Cleanup if necessary. - if (DtorKind) { - AllocaTrackerRAII AllocaTracker(*this); + if (needsEHCleanup(DtorKind)) { // In principle we could tell the Cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. - llvm::Instruction *DominatingIP = - Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy)); EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), "array.init.end"); - pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit, - ElementType, ElementAlign, + CleanupDominator = + Builder.CreateStore(BeginPtr.emitRawPointer(*this), EndOfInit); + pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this), + EndOfInit, ElementType, ElementAlign, getDestroyer(DtorKind)); - cast(*EHStack.find(EHStack.stable_begin())) - .AddAuxAllocas(AllocaTracker.Take()); - DeferredDeactivationCleanupStack.push_back( - {EHStack.stable_begin(), DominatingIP}); - pushedCleanup = true; + Cleanup = EHStack.stable_begin(); } CharUnits StartAlign = CurPtr.getAlignment(); @@ -1129,16 +1127,17 @@ void CodeGenFunction::EmitNewArrayInitializer( // element. TODO: some of these stores can be trivially // observed to be unnecessary. if (EndOfInit.isValid()) { - Builder.CreateStore(CurPtr.getPointer(), EndOfInit); + Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit); } // FIXME: If the last initializer is an incomplete initializer list for // an array, and we have an array filler, we can fold together the two // initialization loops. StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr, AggValueSlot::DoesNotOverlap); - CurPtr = Address(Builder.CreateInBoundsGEP( - CurPtr.getElementType(), CurPtr.getPointer(), - Builder.getSize(1), "array.exp.next"), + CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(), + CurPtr.emitRawPointer(*this), + Builder.getSize(1), + "array.exp.next"), CurPtr.getElementType(), StartAlign.alignmentAtOffset((++i) * ElementSize)); } @@ -1165,6 +1164,9 @@ void CodeGenFunction::EmitNewArrayInitializer( // initialization. llvm::ConstantInt *ConstNum = dyn_cast(NumElements); if (ConstNum && ConstNum->getZExtValue() <= InitListElements) { + // If there was a Cleanup, deactivate it. + if (CleanupDominator) + DeactivateCleanupBlock(Cleanup, CleanupDominator); return; } @@ -1189,7 +1191,7 @@ void CodeGenFunction::EmitNewArrayInitializer( // FIXME: Share this cleanup with the constructor call emission rather than // having it create a cleanup of its own. if (EndOfInit.isValid()) - Builder.CreateStore(CurPtr.getPointer(), EndOfInit); + Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit); // Emit a constructor call loop to initialize the remaining elements. if (InitListElements) @@ -1233,7 +1235,7 @@ void CodeGenFunction::EmitNewArrayInitializer( if (auto *CXXRD = dyn_cast(RType->getDecl())) NumElements = CXXRD->getNumBases(); for (auto *Field : RType->getDecl()->fields()) - if (!Field->isUnnamedBitfield()) + if (!Field->isUnnamedBitField()) ++NumElements; // FIXME: Recurse into nested InitListExprs. if (ILE->getNumInits() == NumElements) @@ -1252,15 +1254,15 @@ void CodeGenFunction::EmitNewArrayInitializer( llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end"); // Find the end of the array, hoisted out of the loop. - llvm::Value *EndPtr = - Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(), - NumElements, "array.end"); + llvm::Value *EndPtr = Builder.CreateInBoundsGEP( + BeginPtr.getElementType(), BeginPtr.emitRawPointer(*this), NumElements, + "array.end"); // If the number of elements isn't constant, we have to now check if there is // anything left to initialize. if (!ConstNum) { - llvm::Value *IsEmpty = - Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty"); + llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr.emitRawPointer(*this), + EndPtr, "array.isempty"); Builder.CreateCondBr(IsEmpty, ContBB, LoopBB); } @@ -1270,23 +1272,22 @@ void CodeGenFunction::EmitNewArrayInitializer( // Set up the current-element phi. llvm::PHINode *CurPtrPhi = Builder.CreatePHI(CurPtr.getType(), 2, "array.cur"); - CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB); + CurPtrPhi->addIncoming(CurPtr.emitRawPointer(*this), EntryBB); CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign); // Store the new Cleanup position for irregular Cleanups. if (EndOfInit.isValid()) - Builder.CreateStore(CurPtr.getPointer(), EndOfInit); + Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit); // Enter a partial-destruction Cleanup if necessary. - if (!pushedCleanup && needsEHCleanup(DtorKind)) { - llvm::Instruction *DominatingIP = - Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy)); - pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(), - ElementType, ElementAlign, - getDestroyer(DtorKind)); - DeferredDeactivationCleanupStack.push_back( - {EHStack.stable_begin(), DominatingIP}); + if (!CleanupDominator && needsEHCleanup(DtorKind)) { + llvm::Value *BeginPtrRaw = BeginPtr.emitRawPointer(*this); + llvm::Value *CurPtrRaw = CurPtr.emitRawPointer(*this); + pushRegularPartialArrayCleanup(BeginPtrRaw, CurPtrRaw, ElementType, + ElementAlign, getDestroyer(DtorKind)); + Cleanup = EHStack.stable_begin(); + CleanupDominator = Builder.CreateUnreachable(); } // Emit the initializer into this element. @@ -1294,12 +1295,14 @@ void CodeGenFunction::EmitNewArrayInitializer( AggValueSlot::DoesNotOverlap); // Leave the Cleanup if we entered one. - deactivation.ForceDeactivate(); + if (CleanupDominator) { + DeactivateCleanupBlock(Cleanup, CleanupDominator); + CleanupDominator->eraseFromParent(); + } // Advance to the next element by adjusting the pointer type as necessary. - llvm::Value *NextPtr = - Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1, - "array.next"); + llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32( + ElementTy, CurPtr.emitRawPointer(*this), 1, "array.next"); // Check whether we've gotten to the end of the array and, if so, // exit the loop. @@ -1525,14 +1528,9 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF, typedef CallDeleteDuringNew DirectCleanup; - DirectCleanup *Cleanup = CGF.EHStack - .pushCleanupWithExtra(EHCleanup, - E->getNumPlacementArgs(), - E->getOperatorDelete(), - NewPtr.getPointer(), - AllocSize, - E->passAlignment(), - AllocAlign); + DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra( + EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(), + NewPtr.emitRawPointer(CGF), AllocSize, E->passAlignment(), AllocAlign); for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { auto &Arg = NewArgs[I + NumNonPlacementArgs]; Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty); @@ -1543,7 +1541,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF, // Otherwise, we need to save all this stuff. DominatingValue::saved_type SavedNewPtr = - DominatingValue::save(CGF, RValue::get(NewPtr.getPointer())); + DominatingValue::save(CGF, RValue::get(NewPtr, CGF)); DominatingValue::saved_type SavedAllocSize = DominatingValue::save(CGF, RValue::get(AllocSize)); @@ -1620,14 +1618,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { // In these cases, discard the computed alignment and use the // formal alignment of the allocated type. if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) - allocation = allocation.withAlignment(allocAlign); + allocation.setAlignment(allocAlign); // Set up allocatorArgs for the call to operator delete if it's not // the reserved global operator. if (E->getOperatorDelete() && !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); - allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); + allocatorArgs.add(RValue::get(allocation, *this), arg->getType()); } } else { @@ -1715,8 +1713,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); contBB = createBasicBlock("new.cont"); - llvm::Value *isNull = - Builder.CreateIsNull(allocation.getPointer(), "new.isnull"); + llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); Builder.CreateCondBr(isNull, contBB, notNullBB); EmitBlock(notNullBB); } @@ -1762,12 +1759,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { SkippedChecks.set(SanitizerKind::Null, nullCheck); EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), - result.getPointer(), allocType, result.getAlignment(), - SkippedChecks, numElements); + result, allocType, result.getAlignment(), SkippedChecks, + numElements); EmitNewInitializer(*this, E, allocType, elementTy, result, numElements, allocSizeWithoutCookie); - llvm::Value *resultPtr = result.getPointer(); + llvm::Value *resultPtr = result.emitRawPointer(*this); if (E->isArray()) { // NewPtr is a pointer to the base element type. If we're // allocating an array of arrays, we'll need to cast back to the @@ -1911,7 +1908,8 @@ static void EmitDestroyingObjectDelete(CodeGenFunction &CGF, CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, Dtor); else - CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType); + CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.emitRawPointer(CGF), + ElementType); } /// Emit the code for deleting a single object. @@ -1927,8 +1925,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF, // dynamic type, the static type shall be a base class of the dynamic type // of the object to be deleted and the static type shall have a virtual // destructor or the behavior is undefined. - CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, - DE->getExprLoc(), Ptr.getPointer(), + CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr, ElementType); const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); @@ -1977,9 +1974,8 @@ static bool EmitObjectDelete(CodeGenFunction &CGF, // Make sure that we call delete even if the dtor throws. // This doesn't have to a conditional cleanup because we're going // to pop it off in a second. - CGF.EHStack.pushCleanup(NormalAndEHCleanup, - Ptr.getPointer(), - OperatorDelete, ElementType); + CGF.EHStack.pushCleanup( + NormalAndEHCleanup, Ptr.emitRawPointer(CGF), OperatorDelete, ElementType); if (Dtor) CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, @@ -2066,7 +2062,7 @@ static void EmitArrayDelete(CodeGenFunction &CGF, CharUnits elementAlign = deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); - llvm::Value *arrayBegin = deletedPtr.getPointer(); + llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF); llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP( deletedPtr.getElementType(), arrayBegin, numElements, "delete.end"); @@ -2097,7 +2093,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); - llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull"); + llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); EmitBlock(DeleteNotNull); @@ -2132,10 +2128,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { GEP.push_back(Zero); } - Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(), - Ptr.getPointer(), GEP, "del.first"), - ConvertTypeForMem(DeleteTy), Ptr.getAlignment(), - Ptr.isKnownNonNull()); + Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy), + Ptr.getAlignment(), "del.first"); } assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); @@ -2193,7 +2187,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, // destruction and the static type of the operand is neither the constructor // or destructor’s class nor one of its bases, the behavior is undefined. CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(), - ThisPtr.getPointer(), SrcRecordTy); + ThisPtr, SrcRecordTy); // C++ [expr.typeid]p2: // If the glvalue expression is obtained by applying the unary * operator to @@ -2209,7 +2203,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, CGF.createBasicBlock("typeid.bad_typeid"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end"); - llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer()); + llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); CGF.EmitBlock(BadTypeidBlock); @@ -2295,8 +2289,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, // construction or destruction and the static type of the operand is not a // pointer to or object of the constructor or destructor’s own class or one // of its bases, the dynamic_cast results in undefined behavior. - EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), - SrcRecordTy); + EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy); if (DCE->isAlwaysNull()) { if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) { @@ -2331,7 +2324,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, CastNull = createBasicBlock("dynamic_cast.null"); CastNotNull = createBasicBlock("dynamic_cast.notnull"); - llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer()); + llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index 53e191b0c2940..c924660c5a91c 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -706,7 +706,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) { continue; // Don't emit anonymous bitfields. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; // Get the initializer. A struct can include fields without initializers, @@ -800,8 +800,8 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, // Add a vtable pointer, if we need one and it hasn't already been added. if (Layout.hasOwnVFPtr()) { llvm::Constant *VTableAddressPoint = - CGM.getCXXABI().getVTableAddressPointForConstExpr( - BaseSubobject(CD, Offset), VTableClass); + CGM.getCXXABI().getVTableAddressPoint(BaseSubobject(CD, Offset), + VTableClass); if (!AppendBytes(Offset, VTableAddressPoint)) return false; } @@ -840,7 +840,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, continue; // Don't emit anonymous bitfields or zero-sized fields. - if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext())) + if (Field->isUnnamedBitField() || Field->isZeroSize(CGM.getContext())) continue; // Emit the value of the initializer. diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index bae8c7a266c2e..4bbc051b2c2a2 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2408,7 +2408,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // performed and the object is not of the derived type. if (CGF.sanitizePerformTypeCheck()) CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), - Derived.getPointer(), DestTy->getPointeeType()); + Derived, DestTy->getPointeeType()); if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, @@ -2416,13 +2416,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { CodeGenFunction::CFITCK_DerivedCast, CE->getBeginLoc()); - return Derived.getPointer(); + return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType()); } case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { // The EmitPointerWithAlignment path does this fine; just discard // the alignment. - return CGF.EmitPointerWithAlignment(CE).getPointer(); + return CGF.getAsNaturalPointerTo(CGF.EmitPointerWithAlignment(CE), + CE->getType()->getPointeeType()); } case CK_Dynamic: { @@ -2432,7 +2433,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } case CK_ArrayToPointerDecay: - return CGF.EmitArrayToPointerDecay(E).getPointer(); + return CGF.getAsNaturalPointerTo(CGF.EmitArrayToPointerDecay(E), + CE->getType()->getPointeeType()); case CK_FunctionToPointerDecay: return EmitLValue(E).getPointer(CGF); @@ -5811,3 +5813,16 @@ CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, return GEPVal; } + +Address CodeGenFunction::EmitCheckedInBoundsGEP( + Address Addr, ArrayRef IdxList, llvm::Type *elementType, + bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, + const Twine &Name) { + if (!SanOpts.has(SanitizerKind::PointerOverflow)) + return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name); + + return RawAddress( + EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this), + IdxList, SignedIndices, IsSubtraction, Loc, Name), + elementType, Align); +} diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h index 2b8073aef973f..506b364f5b2ec 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.h +++ b/clang/lib/CodeGen/CGHLSLRuntime.h @@ -73,6 +73,7 @@ class CGHLSLRuntime { //===----------------------------------------------------------------------===// GENERATE_HLSL_INTRINSIC_FUNCTION(All, all) + GENERATE_HLSL_INTRINSIC_FUNCTION(Any, any) GENERATE_HLSL_INTRINSIC_FUNCTION(ThreadId, thread_id) //===----------------------------------------------------------------------===// diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp index 75c1d7fbea840..8fade0fac21e9 100644 --- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp +++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp @@ -366,7 +366,7 @@ template struct GenFuncBase { llvm::Value *SizeInBytes = CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts); llvm::Value *DstArrayEnd = CGF.Builder.CreateInBoundsGEP( - CGF.Int8Ty, DstAddr.getPointer(), SizeInBytes); + CGF.Int8Ty, DstAddr.emitRawPointer(CGF), SizeInBytes); llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock(); // Create the header block and insert the phi instructions. @@ -376,7 +376,7 @@ template struct GenFuncBase { for (unsigned I = 0; I < N; ++I) { PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur"); - PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB); + PHIs[I]->addIncoming(StartAddrs[I].emitRawPointer(CGF), PreheaderBB); } // Create the exit and loop body blocks. @@ -410,7 +410,7 @@ template struct GenFuncBase { // Instrs to update the destination and source addresses. // Update phi instructions. NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize); - PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB); + PHIs[I]->addIncoming(NewAddrs[I].emitRawPointer(CGF), LoopBB); } // Insert an unconditional branch to the header block. @@ -488,7 +488,7 @@ template struct GenFuncBase { for (unsigned I = 0; I < N; ++I) { Alignments[I] = Addrs[I].getAlignment(); - Ptrs[I] = Addrs[I].getPointer(); + Ptrs[I] = Addrs[I].emitRawPointer(CallerCGF); } if (llvm::Function *F = diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp index df4f90e51c112..61d8f11c078aa 100644 --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -94,8 +94,8 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { // and cast value to correct type Address Temporary = CreateMemTemp(SubExpr->getType()); EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); - llvm::Value *BitCast = - Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT)); + llvm::Value *BitCast = Builder.CreateBitCast( + Temporary.emitRawPointer(*this), ConvertType(ArgQT)); Args.add(RValue::get(BitCast), ArgQT); // Create char array to store type encoding @@ -204,11 +204,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); const ParmVarDecl *argDecl = *PI++; QualType ArgQT = argDecl->getType().getUnqualifiedType(); - Args.add(RValue::get(Objects.getPointer()), ArgQT); + Args.add(RValue::get(Objects, *this), ArgQT); if (DLE) { argDecl = *PI++; ArgQT = argDecl->getType().getUnqualifiedType(); - Args.add(RValue::get(Keys.getPointer()), ArgQT); + Args.add(RValue::get(Keys, *this), ArgQT); } argDecl = *PI; ArgQT = argDecl->getType().getUnqualifiedType(); @@ -828,7 +828,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, // sizeof (Type of Ivar), isAtomic, false); CallArgList args; - llvm::Value *dest = CGF.ReturnValue.getPointer(); + llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF); args.add(RValue::get(dest), Context.VoidPtrTy); args.add(RValue::get(src), Context.VoidPtrTy); @@ -1148,8 +1148,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, callCStructCopyConstructor(Dst, Src); } else { ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); - emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar, - AtomicHelperFn); + emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), + ivar, AtomicHelperFn); } return; } @@ -1164,7 +1164,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, } else { ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); - emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), + emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this), ivar, AtomicHelperFn); } return; @@ -1288,7 +1288,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, case TEK_Scalar: { llvm::Value *value; if (propType->isReferenceType()) { - value = LV.getAddress(*this).getPointer(); + value = LV.getAddress(*this).emitRawPointer(*this); } else { // We want to load and autoreleaseReturnValue ARC __weak ivars. if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { @@ -1790,11 +1790,10 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ static const unsigned NumItems = 16; // Fetch the countByEnumeratingWithState:objects:count: selector. - IdentifierInfo *II[] = { - &CGM.getContext().Idents.get("countByEnumeratingWithState"), - &CGM.getContext().Idents.get("objects"), - &CGM.getContext().Idents.get("count") - }; + const IdentifierInfo *II[] = { + &CGM.getContext().Idents.get("countByEnumeratingWithState"), + &CGM.getContext().Idents.get("objects"), + &CGM.getContext().Idents.get("count")}; Selector FastEnumSel = CGM.getContext().Selectors.getSelector(std::size(II), &II[0]); @@ -1822,16 +1821,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ CallArgList Args; // The first argument is a temporary of the enumeration-state type. - Args.add(RValue::get(StatePtr.getPointer()), - getContext().getPointerType(StateTy)); + Args.add(RValue::get(StatePtr, *this), getContext().getPointerType(StateTy)); // The second argument is a temporary array with space for NumItems // pointers. We'll actually be loading elements from the array // pointer written into the control state; this buffer is so that // collections that *aren't* backed by arrays can still queue up // batches of elements. - Args.add(RValue::get(ItemsPtr.getPointer()), - getContext().getPointerType(ItemsTy)); + Args.add(RValue::get(ItemsPtr, *this), getContext().getPointerType(ItemsTy)); // The third argument is the capacity of that temporary array. llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); @@ -2199,7 +2196,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, if (!fn) fn = getARCIntrinsic(IntID, CGF.CGM); - return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); + return CGF.EmitNounwindRuntimeCall(fn, addr.emitRawPointer(CGF)); } /// Perform an operation having the following signature: @@ -2217,9 +2214,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, llvm::Type *origType = value->getType(); llvm::Value *args[] = { - CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), - CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) - }; + CGF.Builder.CreateBitCast(addr.emitRawPointer(CGF), CGF.Int8PtrPtrTy), + CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)}; llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); if (ignored) return nullptr; @@ -2238,9 +2234,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, fn = getARCIntrinsic(IntID, CGF.CGM); llvm::Value *args[] = { - CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), - CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) - }; + CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), CGF.Int8PtrPtrTy), + CGF.Builder.CreateBitCast(src.emitRawPointer(CGF), CGF.Int8PtrPtrTy)}; CGF.EmitNounwindRuntimeCall(fn, args); } @@ -2491,9 +2486,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); llvm::Value *args[] = { - Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), - Builder.CreateBitCast(value, Int8PtrTy) - }; + Builder.CreateBitCast(addr.emitRawPointer(*this), Int8PtrPtrTy), + Builder.CreateBitCast(value, Int8PtrTy)}; EmitNounwindRuntimeCall(fn, args); if (ignored) return nullptr; @@ -2644,7 +2638,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) { if (!fn) fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); - EmitNounwindRuntimeCall(fn, addr.getPointer()); + EmitNounwindRuntimeCall(fn, addr.emitRawPointer(*this)); } /// void \@objc_moveWeak(i8** %dest, i8** %src) @@ -2726,7 +2720,7 @@ llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { CGObjCRuntime &Runtime = CGM.getObjCRuntime(); llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); // [NSAutoreleasePool alloc] - IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); + const IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); Selector AllocSel = getContext().Selectors.getSelector(0, &II); CallArgList Args; RValue AllocRV = @@ -2773,7 +2767,7 @@ llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, /// Produce the code to do a primitive release. /// [tmp drain]; void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { - IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); + const IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); Selector DrainSel = getContext().Selectors.getSelector(0, &II); CallArgList Args; CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), @@ -3721,8 +3715,8 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) return HelperFn; - IdentifierInfo *II - = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); + const IdentifierInfo *II = + &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); QualType ReturnTy = C.VoidTy; QualType DestTy = C.getPointerType(Ty); @@ -3819,7 +3813,7 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) return HelperFn; - IdentifierInfo *II = + const IdentifierInfo *II = &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); QualType ReturnTy = C.VoidTy; @@ -3913,10 +3907,10 @@ llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( llvm::Value * CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { // Get selectors for retain/autorelease. - IdentifierInfo *CopyID = &getContext().Idents.get("copy"); + const IdentifierInfo *CopyID = &getContext().Idents.get("copy"); Selector CopySelector = getContext().Selectors.getNullarySelector(CopyID); - IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); + const IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); Selector AutoreleaseSelector = getContext().Selectors.getNullarySelector(AutoreleaseID); diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index a36b0cdddaf0a..4e7f777ba1d91 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -706,7 +706,8 @@ class CGObjCGCC : public CGObjCGNU { llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *lookupArgs[] = { - EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd}; + EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy), + cmd}; return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs); } @@ -761,8 +762,8 @@ class CGObjCGNUstep : public CGObjCGNU { llvm::FunctionCallee LookupFn = SlotLookupFn; // Store the receiver on the stack so that we can reload it later - Address ReceiverPtr = - CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign()); + RawAddress ReceiverPtr = + CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign()); Builder.CreateStore(Receiver, ReceiverPtr); llvm::Value *self; @@ -778,9 +779,9 @@ class CGObjCGNUstep : public CGObjCGNU { LookupFn2->addParamAttr(0, llvm::Attribute::NoCapture); llvm::Value *args[] = { - EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy), - EnforceType(Builder, cmd, SelectorTy), - EnforceType(Builder, self, IdTy) }; + EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy), + EnforceType(Builder, cmd, SelectorTy), + EnforceType(Builder, self, IdTy)}; llvm::CallBase *slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args); slot->setOnlyReadsMemory(); slot->setMetadata(msgSendMDKind, node); @@ -800,7 +801,7 @@ class CGObjCGNUstep : public CGObjCGNU { llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; - llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd}; + llvm::Value *lookupArgs[] = {ObjCSuper.emitRawPointer(CGF), cmd}; llvm::CallInst *slot = CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs); @@ -1221,10 +1222,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep { llvm::Value *cmd, MessageSendInfo &MSI) override { // Don't access the slot unless we're trying to cache the result. CGBuilderTy &Builder = CGF.Builder; - llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder, - ObjCSuper.getPointer(), - PtrToObjCSuperTy), - cmd}; + llvm::Value *lookupArgs[] = { + CGObjCGNU::EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), + PtrToObjCSuperTy), + cmd}; return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs); } @@ -2186,7 +2187,8 @@ class CGObjCObjFW: public CGObjCGNU { llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *lookupArgs[] = { - EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd, + EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy), + cmd, }; if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) @@ -4201,15 +4203,15 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF, llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF, Address AddrWeakObj) { CGBuilderTy &B = CGF.Builder; - return B.CreateCall(WeakReadFn, - EnforceType(B, AddrWeakObj.getPointer(), PtrToIdTy)); + return B.CreateCall( + WeakReadFn, EnforceType(B, AddrWeakObj.emitRawPointer(CGF), PtrToIdTy)); } void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF, llvm::Value *src, Address dst) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); - llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy); + llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy); B.CreateCall(WeakAssignFn, {src, dstVal}); } @@ -4218,7 +4220,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF, bool threadlocal) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); - llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy); + llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy); // FIXME. Add threadloca assign API assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI"); B.CreateCall(GlobalAssignFn, {src, dstVal}); @@ -4229,7 +4231,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *ivarOffset) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); - llvm::Value *dstVal = EnforceType(B, dst.getPointer(), IdTy); + llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), IdTy); B.CreateCall(IvarAssignFn, {src, dstVal, ivarOffset}); } @@ -4237,7 +4239,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF, llvm::Value *src, Address dst) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); - llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy); + llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy); B.CreateCall(StrongCastAssignFn, {src, dstVal}); } @@ -4246,8 +4248,8 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF, Address SrcPtr, llvm::Value *Size) { CGBuilderTy &B = CGF.Builder; - llvm::Value *DestPtrVal = EnforceType(B, DestPtr.getPointer(), PtrTy); - llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.getPointer(), PtrTy); + llvm::Value *DestPtrVal = EnforceType(B, DestPtr.emitRawPointer(CGF), PtrTy); + llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.emitRawPointer(CGF), PtrTy); B.CreateCall(MemMoveFn, {DestPtrVal, SrcPtrVal, Size}); } diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index ed8d7b9a065d7..042cd5d46da4b 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -1310,7 +1310,7 @@ class CGObjCMac : public CGObjCCommonMac { /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel); - Address EmitSelectorAddr(Selector Sel); + ConstantAddress EmitSelectorAddr(Selector Sel); public: CGObjCMac(CodeGen::CodeGenModule &cgm); @@ -1538,7 +1538,7 @@ class CGObjCNonFragileABIMac : public CGObjCCommonMac { /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel); - Address EmitSelectorAddr(Selector Sel); + ConstantAddress EmitSelectorAddr(Selector Sel); /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C /// interface. The return value has type EHTypePtrTy. @@ -1555,12 +1555,12 @@ class CGObjCNonFragileABIMac : public CGObjCCommonMac { // Shamelessly stolen from Analysis/CFRefCount.cpp Selector GetNullarySelector(const char* name) const { - IdentifierInfo* II = &CGM.getContext().Idents.get(name); + const IdentifierInfo *II = &CGM.getContext().Idents.get(name); return CGM.getContext().Selectors.getSelector(0, &II); } Selector GetUnarySelector(const char* name) const { - IdentifierInfo* II = &CGM.getContext().Idents.get(name); + const IdentifierInfo *II = &CGM.getContext().Idents.get(name); return CGM.getContext().Selectors.getSelector(1, &II); } @@ -2064,9 +2064,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, const ObjCMethodDecl *Method) { // Create and init a super structure; this is a (receiver, class) // pair we will pass to objc_msgSendSuper. - Address ObjCSuper = - CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(), - "objc_super"); + RawAddress ObjCSuper = CGF.CreateTempAlloca( + ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super"); llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); CGF.Builder.CreateStore(ReceiverAsObject, @@ -4259,7 +4258,7 @@ namespace { CGF.EmitBlock(FinallyCallExit); CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(), - ExceptionData.getPointer()); + ExceptionData.emitRawPointer(CGF)); CGF.EmitBlock(FinallyNoCallExit); @@ -4425,7 +4424,9 @@ void FragileHazards::emitHazardsInNewBlocks() { } static void addIfPresent(llvm::DenseSet &S, Address V) { - if (V.isValid()) S.insert(V.getPointer()); + if (V.isValid()) + if (llvm::Value *Ptr = V.getBasePointer()) + S.insert(Ptr); } void FragileHazards::collectLocals() { @@ -4628,13 +4629,13 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // - Call objc_exception_try_enter to push ExceptionData on top of // the EH stack. CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), - ExceptionData.getPointer()); + ExceptionData.emitRawPointer(CGF)); // - Call setjmp on the exception data buffer. llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0); llvm::Value *GEPIndexes[] = { Zero, Zero, Zero }; llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP( - ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes, + ObjCTypes.ExceptionDataTy, ExceptionData.emitRawPointer(CGF), GEPIndexes, "setjmp_buffer"); llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall( ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result"); @@ -4673,9 +4674,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, } else { // Retrieve the exception object. We may emit multiple blocks but // nothing can cross this so the value is already in SSA form. - llvm::CallInst *Caught = - CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), - ExceptionData.getPointer(), "caught"); + llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall( + ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF), + "caught"); // Push the exception to rethrow onto the EH value stack for the // benefit of any @throws in the handlers. @@ -4698,7 +4699,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // Enter a new exception try block (in case a @catch block // throws an exception). CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), - ExceptionData.getPointer()); + ExceptionData.emitRawPointer(CGF)); llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(), @@ -4829,9 +4830,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // Extract the new exception and save it to the // propagating-exception slot. assert(PropagatingExnVar.isValid()); - llvm::CallInst *NewCaught = - CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), - ExceptionData.getPointer(), "caught"); + llvm::CallInst *NewCaught = CGF.EmitNounwindRuntimeCall( + ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF), + "caught"); CGF.Builder.CreateStore(NewCaught, PropagatingExnVar); // Don't pop the catch handler; the throw already did. @@ -4861,9 +4862,8 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // Otherwise, just look in the buffer for the exception to throw. } else { - llvm::CallInst *Caught = - CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), - ExceptionData.getPointer()); + llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall( + ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF)); PropagatingExn = Caught; } @@ -4906,7 +4906,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj) { llvm::Type* DestTy = AddrWeakObj.getElementType(); llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast( - AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy); + AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy); llvm::Value *read_weak = CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(), AddrWeakObjVal, "weakread"); @@ -4928,8 +4928,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dstVal }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(), args, "weakassign"); @@ -4950,8 +4950,8 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal}; if (!threadlocal) CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(), @@ -4977,8 +4977,8 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal, ivarOffset}; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args); } @@ -4997,8 +4997,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal}; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(), args, "strongassign"); @@ -5007,7 +5007,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *size) { - llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size }; + llvm::Value *args[] = {DestPtr.emitRawPointer(CGF), + SrcPtr.emitRawPointer(CGF), size}; CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args); } @@ -5243,7 +5244,7 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) { return CGF.Builder.CreateLoad(EmitSelectorAddr(Sel)); } -Address CGObjCMac::EmitSelectorAddr(Selector Sel) { +ConstantAddress CGObjCMac::EmitSelectorAddr(Selector Sel) { CharUnits Align = CGM.getPointerAlign(); llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; @@ -5254,7 +5255,7 @@ Address CGObjCMac::EmitSelectorAddr(Selector Sel) { Entry->setExternallyInitialized(true); } - return Address(Entry, ObjCTypes.SelectorPtrTy, Align); + return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align); } llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) { @@ -6267,11 +6268,10 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) { VTableDispatchMethods.insert(GetUnarySelector("addObject")); // "countByEnumeratingWithState:objects:count" - IdentifierInfo *KeyIdents[] = { - &CGM.getContext().Idents.get("countByEnumeratingWithState"), - &CGM.getContext().Idents.get("objects"), - &CGM.getContext().Idents.get("count") - }; + const IdentifierInfo *KeyIdents[] = { + &CGM.getContext().Idents.get("countByEnumeratingWithState"), + &CGM.getContext().Idents.get("objects"), + &CGM.getContext().Idents.get("count")}; VTableDispatchMethods.insert( CGM.getContext().Selectors.getSelector(3, KeyIdents)); } @@ -7323,7 +7323,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF, ObjCTypes.MessageRefTy, CGF.getPointerAlign()); // Update the message ref argument. - args[1].setRValue(RValue::get(mref.getPointer())); + args[1].setRValue(RValue::get(mref, CGF)); // Load the function to call from the message ref table. Address calleeAddr = CGF.Builder.CreateStructGEP(mref, 0); @@ -7552,9 +7552,8 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, // ... // Create and init a super structure; this is a (receiver, class) // pair we will pass to objc_msgSendSuper. - Address ObjCSuper = - CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(), - "objc_super"); + RawAddress ObjCSuper = CGF.CreateTempAlloca( + ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super"); llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); @@ -7594,7 +7593,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF, return LI; } -Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) { +ConstantAddress CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) { llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; CharUnits Align = CGM.getPointerAlign(); if (!Entry) { @@ -7610,7 +7609,7 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) { CGM.addCompilerUsedGlobal(Entry); } - return Address(Entry, ObjCTypes.SelectorPtrTy, Align); + return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align); } /// EmitObjCIvarAssign - Code gen for assigning to a __strong object. @@ -7629,8 +7628,8 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal, ivarOffset}; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args); } @@ -7650,8 +7649,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign( src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal}; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(), args, "weakassign"); @@ -7660,7 +7659,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign( void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable( CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr, llvm::Value *Size) { - llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size }; + llvm::Value *args[] = {DestPtr.emitRawPointer(CGF), + SrcPtr.emitRawPointer(CGF), Size}; CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args); } @@ -7672,7 +7672,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead( Address AddrWeakObj) { llvm::Type *DestTy = AddrWeakObj.getElementType(); llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast( - AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy); + AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy); llvm::Value *read_weak = CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(), AddrWeakObjVal, "weakread"); @@ -7694,8 +7694,8 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal}; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(), args, "weakassign"); @@ -7716,8 +7716,8 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); - llvm::Value *dstVal = - CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy); + llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), + ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = {src, dstVal}; if (!threadlocal) CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(), diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp index 424564f975999..01d0f35da1964 100644 --- a/clang/lib/CodeGen/CGObjCRuntime.cpp +++ b/clang/lib/CodeGen/CGObjCRuntime.cpp @@ -67,7 +67,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr"); if (!Ivar->isBitField()) { - LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy); + LValue LV = CGF.MakeNaturalAlignRawAddrLValue(V, IvarTy); return LV; } @@ -233,7 +233,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, llvm::Instruction *CPICandidate = Handler.Block->getFirstNonPHI(); if (auto *CPI = dyn_cast_or_null(CPICandidate)) { CGF.CurrentFuncletPad = CPI; - CPI->setOperand(2, CGF.getExceptionSlot().getPointer()); + CPI->setOperand(2, CGF.getExceptionSlot().emitRawPointer(CGF)); CGF.EHStack.pushCleanup(NormalCleanup, CPI); } } @@ -405,7 +405,7 @@ bool CGObjCRuntime::canMessageReceiverBeNull(CodeGenFunction &CGF, auto self = curMethod->getSelfDecl(); if (self->getType().isConstQualified()) { if (auto LI = dyn_cast(receiver->stripPointerCasts())) { - llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).getPointer(); + llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).emitRawPointer(CGF); if (selfAddr == LI->getPointerOperand()) { return false; } diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index e299a2d5e4718..28271160649af 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -622,7 +622,7 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF, auto *GV = new llvm::GlobalVariable( CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, Name); - LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty); + LValue LV = CGF.MakeNaturalAlignRawAddrLValue(GV, Ty); RValue InitRVal; switch (CGF.getEvaluationKind(Ty)) { case TEK_Scalar: @@ -668,8 +668,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr, llvm::Value *SrcBegin = nullptr; if (DRD) - SrcBegin = SrcAddr.getPointer(); - llvm::Value *DestBegin = DestAddr.getPointer(); + SrcBegin = SrcAddr.emitRawPointer(CGF); + llvm::Value *DestBegin = DestAddr.emitRawPointer(CGF); // Cast from pointer to array type to pointer to single element. llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements); @@ -912,7 +912,7 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, Address OriginalBaseAddress, llvm::Value *Addr) { - Address Tmp = Address::invalid(); + RawAddress Tmp = RawAddress::invalid(); Address TopTmp = Address::invalid(); Address MostTopTmp = Address::invalid(); BaseTy = BaseTy.getNonReferenceType(); @@ -971,10 +971,10 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address SharedAddr = SharedAddresses[N].first.getAddress(CGF); llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff( SharedAddr.getElementType(), BaseLValue.getPointer(CGF), - SharedAddr.getPointer()); + SharedAddr.emitRawPointer(CGF)); llvm::Value *PrivatePointer = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - PrivateAddr.getPointer(), SharedAddr.getType()); + PrivateAddr.emitRawPointer(CGF), SharedAddr.getType()); llvm::Value *Ptr = CGF.Builder.CreateGEP( SharedAddr.getElementType(), PrivatePointer, Adjustment); return castToBase(CGF, OrigVD->getType(), @@ -1557,7 +1557,7 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc( return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName); } -Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { +ConstantAddress CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); }; auto LinkageForVariable = [&VD, this]() { @@ -1579,8 +1579,8 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { LinkageForVariable); if (!addr) - return Address::invalid(); - return Address(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD)); + return ConstantAddress::invalid(); + return ConstantAddress(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD)); } llvm::Constant * @@ -1604,7 +1604,7 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, llvm::Type *VarTy = VDAddr.getElementType(); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), - CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy), + CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy), CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)), getOrCreateThreadPrivateCache(VD)}; return Address( @@ -1627,7 +1627,8 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit( // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor) // to register constructor/destructor for variable. llvm::Value *Args[] = { - OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy), + OMPLoc, + CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.VoidPtrTy), Ctor, CopyCtor, Dtor}; CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction( @@ -1900,13 +1901,13 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, // OutlinedFn(>id, &zero_bound, CapturedStruct); Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc); - Address ZeroAddrBound = + RawAddress ZeroAddrBound = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, /*Name=*/".bound.zero.addr"); CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound); llvm::SmallVector OutlinedFnArgs; // ThreadId for serialized parallels is 0. - OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); + OutlinedFnArgs.push_back(ThreadIDAddr.emitRawPointer(CGF)); OutlinedFnArgs.push_back(ZeroAddrBound.getPointer()); OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); @@ -2272,7 +2273,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF, emitUpdateLocation(CGF, Loc), // ident_t * getThreadID(CGF, Loc), // i32 BufSize, // size_t - CL.getPointer(), // void * + CL.emitRawPointer(CGF), // void * CpyFn, // void (*) (void *, void *) DidItVal // i32 did_it }; @@ -2591,10 +2592,10 @@ static void emitForStaticInitCall( ThreadId, CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1, M2)), // Schedule type - Values.IL.getPointer(), // &isLastIter - Values.LB.getPointer(), // &LB - Values.UB.getPointer(), // &UB - Values.ST.getPointer(), // &Stride + Values.IL.emitRawPointer(CGF), // &isLastIter + Values.LB.emitRawPointer(CGF), // &LB + Values.UB.emitRawPointer(CGF), // &UB + Values.ST.emitRawPointer(CGF), // &Stride CGF.Builder.getIntN(Values.IVSize, 1), // Incr Chunk // Chunk }; @@ -2655,11 +2656,12 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF, // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid); llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, - isOpenMPDistributeDirective(DKind) + isOpenMPDistributeDirective(DKind) || + (DKind == OMPD_target_teams_loop) ? OMP_IDENT_WORK_DISTRIBUTE - : isOpenMPLoopDirective(DKind) - ? OMP_IDENT_WORK_LOOP - : OMP_IDENT_WORK_SECTIONS), + : isOpenMPLoopDirective(DKind) + ? OMP_IDENT_WORK_LOOP + : OMP_IDENT_WORK_SECTIONS), getThreadID(CGF, Loc)}; auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc); if (isOpenMPDistributeDirective(DKind) && @@ -2697,12 +2699,11 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, // kmp_int[32|64] *p_stride); llvm::Value *Args[] = { - emitUpdateLocation(CGF, Loc), - getThreadID(CGF, Loc), - IL.getPointer(), // &isLastIter - LB.getPointer(), // &Lower - UB.getPointer(), // &Upper - ST.getPointer() // &Stride + emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), + IL.emitRawPointer(CGF), // &isLastIter + LB.emitRawPointer(CGF), // &Lower + UB.emitRawPointer(CGF), // &Upper + ST.emitRawPointer(CGF) // &Stride }; llvm::Value *Call = CGF.EmitRuntimeCall( OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args); @@ -3047,7 +3048,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, CGF.Builder .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF), CGF.VoidPtrTy, CGF.Int8Ty) - .getPointer()}; + .emitRawPointer(CGF)}; SmallVector CallArgs(std::begin(CommonArgs), std::end(CommonArgs)); if (isOpenMPTaskLoopDirective(Kind)) { @@ -3574,7 +3575,8 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) { CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false); Address UpAddrAddress = UpAddrLVal.getAddress(CGF); llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32( - UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1); + UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF), + /*Idx0=*/1); llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy); llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy); SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr); @@ -3888,8 +3890,9 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *Size; std::tie(Addr, Size) = getPointerAndSize(CGF, E); llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); - LValue Base = CGF.MakeAddrLValue( - CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy); + LValue Base = + CGF.MakeAddrLValue(CGF.Builder.CreateGEP(CGF, AffinitiesArray, Idx), + KmpTaskAffinityInfoTy); // affs[i].base_addr = &; LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr)); @@ -3910,7 +3913,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *LocRef = emitUpdateLocation(CGF, Loc); llvm::Value *GTid = getThreadID(CGF, Loc); llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - AffinitiesArray.getPointer(), CGM.VoidPtrTy); + AffinitiesArray.emitRawPointer(CGF), CGM.VoidPtrTy); // FIXME: Emit the function and ignore its result for now unless the // runtime function is properly implemented. (void)CGF.EmitRuntimeCall( @@ -3921,8 +3924,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( NewTask, KmpTaskTWithPrivatesPtrTy); - LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy, - KmpTaskTWithPrivatesQTy); + LValue Base = CGF.MakeNaturalAlignRawAddrLValue(NewTaskNewTaskTTy, + KmpTaskTWithPrivatesQTy); LValue TDBase = CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin()); // Fill the data in the resulting kmp_task_t record. @@ -4047,7 +4050,7 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, CGF.ConvertTypeForMem(KmpDependInfoPtrTy)), KmpDependInfoPtrTy->castAs()); Address DepObjAddr = CGF.Builder.CreateGEP( - Base.getAddress(CGF), + CGF, Base.getAddress(CGF), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); LValue NumDepsBase = CGF.MakeAddrLValue( DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); @@ -4097,7 +4100,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy, LValue &PosLVal = *Pos.get(); llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); Base = CGF.MakeAddrLValue( - CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy); + CGF.Builder.CreateGEP(CGF, DependenciesArray, Idx), KmpDependInfoTy); } // deps[i].base_addr = &; LValue BaseAddrLVal = CGF.EmitLValueForField( @@ -4195,7 +4198,7 @@ void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF, ElSize, CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false)); llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc()); - Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos); + Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos); CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size); // Increase pos. @@ -4430,7 +4433,7 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy), CGF.ConvertTypeForMem(KmpDependInfoTy)); llvm::Value *DepObjAddr = CGF.Builder.CreateGEP( - Addr.getElementType(), Addr.getPointer(), + Addr.getElementType(), Addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true)); DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr, CGF.VoidPtrTy); @@ -4460,8 +4463,8 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, Address Begin = Base.getAddress(CGF); // Cast from pointer to array type to pointer to single element. - llvm::Value *End = CGF.Builder.CreateGEP( - Begin.getElementType(), Begin.getPointer(), NumDeps); + llvm::Value *End = CGF.Builder.CreateGEP(Begin.getElementType(), + Begin.emitRawPointer(CGF), NumDeps); // The basic structure here is a while-do loop. llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body"); llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done"); @@ -4469,7 +4472,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, CGF.EmitBlock(BodyBB); llvm::PHINode *ElementPHI = CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast"); - ElementPHI->addIncoming(Begin.getPointer(), EntryBB); + ElementPHI->addIncoming(Begin.emitRawPointer(CGF), EntryBB); Begin = Begin.withPointer(ElementPHI, KnownNonNull); Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo()); @@ -4483,12 +4486,12 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, FlagsLVal); // Shift the address forward by one element. - Address ElementNext = - CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext"); - ElementPHI->addIncoming(ElementNext.getPointer(), - CGF.Builder.GetInsertBlock()); + llvm::Value *ElementNext = + CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext") + .emitRawPointer(CGF); + ElementPHI->addIncoming(ElementNext, CGF.Builder.GetInsertBlock()); llvm::Value *IsEmpty = - CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty"); + CGF.Builder.CreateICmpEQ(ElementNext, End, "omp.isempty"); CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); // Done. CGF.EmitBlock(DoneBB, /*IsFinished=*/true); @@ -4531,7 +4534,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, DepTaskArgs[1] = ThreadID; DepTaskArgs[2] = NewTask; DepTaskArgs[3] = NumOfElements; - DepTaskArgs[4] = DependenciesArray.getPointer(); + DepTaskArgs[4] = DependenciesArray.emitRawPointer(CGF); DepTaskArgs[5] = CGF.Builder.getInt32(0); DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); } @@ -4563,7 +4566,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, DepWaitTaskArgs[0] = UpLoc; DepWaitTaskArgs[1] = ThreadID; DepWaitTaskArgs[2] = NumOfElements; - DepWaitTaskArgs[3] = DependenciesArray.getPointer(); + DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF); DepWaitTaskArgs[4] = CGF.Builder.getInt32(0); DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); DepWaitTaskArgs[6] = @@ -4725,8 +4728,8 @@ static void EmitOMPAggregateReduction( const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe(); llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr); - llvm::Value *RHSBegin = RHSAddr.getPointer(); - llvm::Value *LHSBegin = LHSAddr.getPointer(); + llvm::Value *RHSBegin = RHSAddr.emitRawPointer(CGF); + llvm::Value *LHSBegin = LHSAddr.emitRawPointer(CGF); // Cast from pointer to array type to pointer to single element. llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements); @@ -4990,7 +4993,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc, QualType ReductionArrayTy = C.getConstantArrayType( C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal, /*IndexTypeQuals=*/0); - Address ReductionList = + RawAddress ReductionList = CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); const auto *IPriv = Privates.begin(); unsigned Idx = 0; @@ -5462,7 +5465,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( C.getConstantArrayType(RDType, ArraySize, nullptr, ArraySizeModifier::Normal, /*IndexTypeQuals=*/0); // kmp_task_red_input_t .rd_input.[Size]; - Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input."); + RawAddress TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input."); ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs, Data.ReductionCopies, Data.ReductionOps); for (unsigned Cnt = 0; Cnt < Size; ++Cnt) { @@ -5473,7 +5476,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs, /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc, ".rd_input.gep."); - LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType); + LValue ElemLVal = CGF.MakeNaturalAlignRawAddrLValue(GEP, RDType); // ElemLVal.reduce_shar = &Shareds[Cnt]; LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD); RCG.emitSharedOrigLValue(CGF, Cnt); @@ -5631,7 +5634,7 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, DepWaitTaskArgs[0] = UpLoc; DepWaitTaskArgs[1] = ThreadID; DepWaitTaskArgs[2] = NumOfElements; - DepWaitTaskArgs[3] = DependenciesArray.getPointer(); + DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF); DepWaitTaskArgs[4] = CGF.Builder.getInt32(0); DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); DepWaitTaskArgs[6] = @@ -5854,7 +5857,7 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF, AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy, AllocatorTraitsLVal.getBaseInfo(), AllocatorTraitsLVal.getTBAAInfo()); - llvm::Value *Traits = Addr.getPointer(); + llvm::Value *Traits = Addr.emitRawPointer(CGF); llvm::Value *AllocatorVal = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( @@ -7314,17 +7317,19 @@ class MappableExprsHandler { CGF.EmitOMPSharedLValue(MC.getAssociatedExpression()) .getAddress(CGF); } - Size = CGF.Builder.CreatePtrDiff( - CGF.Int8Ty, ComponentLB.getPointer(), LB.getPointer()); + llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF); + llvm::Value *LBPtr = LB.emitRawPointer(CGF); + Size = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr, + LBPtr); break; } } assert(Size && "Failed to determine structure size"); CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); - CombinedInfo.BasePointers.push_back(BP.getPointer()); + CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF)); CombinedInfo.DevicePtrDecls.push_back(nullptr); CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None); - CombinedInfo.Pointers.push_back(LB.getPointer()); + CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF)); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back(Flags); @@ -7334,13 +7339,14 @@ class MappableExprsHandler { LB = CGF.Builder.CreateConstGEP(ComponentLB, 1); } CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); - CombinedInfo.BasePointers.push_back(BP.getPointer()); + CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF)); CombinedInfo.DevicePtrDecls.push_back(nullptr); CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None); - CombinedInfo.Pointers.push_back(LB.getPointer()); + CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF)); + llvm::Value *LBPtr = LB.emitRawPointer(CGF); Size = CGF.Builder.CreatePtrDiff( - CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(), - LB.getPointer()); + CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF), + LBPtr); CombinedInfo.Sizes.push_back( CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.Types.push_back(Flags); @@ -7358,20 +7364,21 @@ class MappableExprsHandler { (Next == CE && MapType != OMPC_MAP_unknown)) { if (!IsMappingWholeStruct) { CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); - CombinedInfo.BasePointers.push_back(BP.getPointer()); + CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF)); CombinedInfo.DevicePtrDecls.push_back(nullptr); CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None); - CombinedInfo.Pointers.push_back(LB.getPointer()); + CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF)); CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( Size, CGF.Int64Ty, /*isSigned=*/true)); CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1); } else { StructBaseCombinedInfo.Exprs.emplace_back(MapDecl, MapExpr); - StructBaseCombinedInfo.BasePointers.push_back(BP.getPointer()); + StructBaseCombinedInfo.BasePointers.push_back( + BP.emitRawPointer(CGF)); StructBaseCombinedInfo.DevicePtrDecls.push_back(nullptr); StructBaseCombinedInfo.DevicePointers.push_back(DeviceInfoTy::None); - StructBaseCombinedInfo.Pointers.push_back(LB.getPointer()); + StructBaseCombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF)); StructBaseCombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast( Size, CGF.Int64Ty, /*isSigned=*/true)); StructBaseCombinedInfo.NonContigInfo.Dims.push_back( @@ -8213,11 +8220,11 @@ class MappableExprsHandler { } CombinedInfo.Exprs.push_back(VD); // Base is the base of the struct - CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer()); + CombinedInfo.BasePointers.push_back(PartialStruct.Base.emitRawPointer(CGF)); CombinedInfo.DevicePtrDecls.push_back(nullptr); CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None); // Pointer is the address of the lowest element - llvm::Value *LB = LBAddr.getPointer(); + llvm::Value *LB = LBAddr.emitRawPointer(CGF); const CXXMethodDecl *MD = CGF.CurFuncDecl ? dyn_cast(CGF.CurFuncDecl) : nullptr; const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr; @@ -8231,7 +8238,7 @@ class MappableExprsHandler { // if the this[:1] expression had appeared in a map clause with a map-type // of tofrom. // Emit this[:1] - CombinedInfo.Pointers.push_back(PartialStruct.Base.getPointer()); + CombinedInfo.Pointers.push_back(PartialStruct.Base.emitRawPointer(CGF)); QualType Ty = MD->getFunctionObjectParameterType(); llvm::Value *Size = CGF.Builder.CreateIntCast(CGF.getTypeSize(Ty), CGF.Int64Ty, @@ -8240,7 +8247,7 @@ class MappableExprsHandler { } else { CombinedInfo.Pointers.push_back(LB); // Size is (addr of {highest+1} element) - (addr of lowest element) - llvm::Value *HB = HBAddr.getPointer(); + llvm::Value *HB = HBAddr.emitRawPointer(CGF); llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32( HBAddr.getElementType(), HB, /*Idx0=*/1); llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy); @@ -8749,7 +8756,7 @@ class MappableExprsHandler { Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue( CV, ElementType, CGF.getContext().getDeclAlign(VD), AlignmentSource::Decl)); - CombinedInfo.Pointers.push_back(PtrAddr.getPointer()); + CombinedInfo.Pointers.push_back(PtrAddr.emitRawPointer(CGF)); } else { CombinedInfo.Pointers.push_back(CV); } @@ -8881,7 +8888,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) { OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); switch (D.getDirectiveKind()) { case OMPD_target: - // For now, just treat 'target teams loop' as if it's distributed. + // For now, treat 'target' with nested 'teams loop' as if it's + // distributed (target teams distribute). if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop) return NestedDir; if (DKind == OMPD_teams) { @@ -9365,7 +9373,8 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall( SizeEmitter) { OpenMPDirectiveKind Kind = D.getDirectiveKind(); const OMPExecutableDirective *TD = &D; - // Get nested teams distribute kind directive, if any. + // Get nested teams distribute kind directive, if any. For now, treat + // 'target_teams_loop' as if it's really a target_teams_distribute. if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) && Kind != OMPD_target_teams_loop) TD = getNestedDistributeDirective(CGM.getContext(), D); @@ -9560,10 +9569,11 @@ static void emitTargetCallKernelLaunch( bool HasNoWait = D.hasClausesOfKind(); unsigned NumTargetItems = InputInfo.NumberOfTargetItems; - llvm::Value *BasePointersArray = InputInfo.BasePointersArray.getPointer(); - llvm::Value *PointersArray = InputInfo.PointersArray.getPointer(); - llvm::Value *SizesArray = InputInfo.SizesArray.getPointer(); - llvm::Value *MappersArray = InputInfo.MappersArray.getPointer(); + llvm::Value *BasePointersArray = + InputInfo.BasePointersArray.emitRawPointer(CGF); + llvm::Value *PointersArray = InputInfo.PointersArray.emitRawPointer(CGF); + llvm::Value *SizesArray = InputInfo.SizesArray.emitRawPointer(CGF); + llvm::Value *MappersArray = InputInfo.MappersArray.emitRawPointer(CGF); auto &&EmitTargetCallFallbackCB = [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS, @@ -10311,15 +10321,16 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall( // Source location for the ident struct llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc()); - llvm::Value *OffloadingArgs[] = {RTLoc, - DeviceID, - PointerNum, - InputInfo.BasePointersArray.getPointer(), - InputInfo.PointersArray.getPointer(), - InputInfo.SizesArray.getPointer(), - MapTypesArray, - MapNamesArray, - InputInfo.MappersArray.getPointer()}; + llvm::Value *OffloadingArgs[] = { + RTLoc, + DeviceID, + PointerNum, + InputInfo.BasePointersArray.emitRawPointer(CGF), + InputInfo.PointersArray.emitRawPointer(CGF), + InputInfo.SizesArray.emitRawPointer(CGF), + MapTypesArray, + MapNamesArray, + InputInfo.MappersArray.emitRawPointer(CGF)}; // Select the right runtime function call for each standalone // directive. @@ -11130,7 +11141,7 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF, getThreadID(CGF, D.getBeginLoc()), llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()), CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(), + CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).emitRawPointer(CGF), CGM.VoidPtrTy)}; llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction( @@ -11164,7 +11175,8 @@ static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM, /*Volatile=*/false, Int64Ty); } llvm::Value *Args[] = { - ULoc, ThreadID, CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()}; + ULoc, ThreadID, + CGF.Builder.CreateConstArrayGEP(CntAddr, 0).emitRawPointer(CGF)}; llvm::FunctionCallee RTLFn; llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); OMPDoacrossKind ODK; @@ -11334,7 +11346,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF, Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID( CGF, SourceLocation::getFromRawEncoding(LocEncoding)); Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - Addr.getPointer(), CGF.VoidPtrTy); + Addr.emitRawPointer(CGF), CGF.VoidPtrTy); llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr); Args[2] = AllocVal; CGF.EmitRuntimeCall(RTLFn, Args); @@ -11692,15 +11704,17 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LLIVTy, getName({UniqueDeclName, "iv"})); cast(LastIV)->setAlignment( IVLVal.getAlignment().getAsAlign()); - LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType()); + LValue LastIVLVal = + CGF.MakeNaturalAlignRawAddrLValue(LastIV, IVLVal.getType()); // Last value of the lastprivate conditional. // decltype(priv_a) last_a; llvm::GlobalVariable *Last = OMPBuilder.getOrCreateInternalVariable( CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName); - Last->setAlignment(LVal.getAlignment().getAsAlign()); - LValue LastLVal = CGF.MakeAddrLValue( - Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType()); + cast(Last)->setAlignment( + LVal.getAlignment().getAsAlign()); + LValue LastLVal = + CGF.MakeRawAddrLValue(Last, LVal.getType(), LVal.getAlignment()); // Global loop counter. Required to handle inner parallel-for regions. // iv @@ -11873,9 +11887,8 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate( // The variable was not updated in the region - exit. if (!GV) return; - LValue LPLVal = CGF.MakeAddrLValue( - Address(GV, GV->getValueType(), PrivLVal.getAlignment()), - PrivLVal.getType().getNonReferenceType()); + LValue LPLVal = CGF.MakeRawAddrLValue( + GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment()); llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc); CGF.EmitStoreOfScalar(Res, PrivLVal); } diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h index c3206427b143e..522ae3d35d22d 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.h +++ b/clang/lib/CodeGen/CGOpenMPRuntime.h @@ -1068,13 +1068,12 @@ class CGOpenMPRuntime { /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, - const VarDecl *VD, - Address VDAddr, + const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. - virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); + virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 299ee1460b3db..eb716520e5ff5 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -646,7 +646,6 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx, case OMPD_target: case OMPD_target_teams: return hasNestedSPMDDirective(Ctx, D); - case OMPD_target_teams_loop: case OMPD_target_parallel_loop: case OMPD_target_parallel: case OMPD_target_parallel_for: @@ -658,6 +657,12 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx, return true; case OMPD_target_teams_distribute: return false; + case OMPD_target_teams_loop: + // Whether this is true or not depends on how the directive will + // eventually be emitted. + if (auto *TTLD = dyn_cast(&D)) + return TTLD->canBeParallelFor(); + return false; case OMPD_parallel: case OMPD_for: case OMPD_parallel_for: @@ -1096,7 +1101,8 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF, llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo(); llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( VoidPtr, VarPtrTy, VD->getName() + "_on_stack"); - LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy); + LValue VarAddr = + CGF.MakeNaturalAlignPointeeRawAddrLValue(CastedVoidPtr, VarTy); Rec.second.PrivateAddr = VarAddr.getAddress(CGF); Rec.second.GlobalizedVal = VoidPtr; @@ -1206,8 +1212,8 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, bool IsBareKernel = D.getSingleClause(); - Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, - /*Name=*/".zero.addr"); + RawAddress ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, + /*Name=*/".zero.addr"); CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); llvm::SmallVector OutlinedFnArgs; // We don't emit any thread id function call in bare kernel, but because the @@ -1215,7 +1221,7 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, if (IsBareKernel) OutlinedFnArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); else - OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); + OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).emitRawPointer(CGF)); OutlinedFnArgs.push_back(ZeroAddr.getPointer()); OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); @@ -1289,7 +1295,7 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF, llvm::ConstantInt::get(CGF.Int32Ty, -1), FnPtr, ID, - Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(), + Bld.CreateBitOrPointerCast(CapturedVarsAddrs.emitRawPointer(CGF), CGF.VoidPtrPtrTy), llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( @@ -1503,17 +1509,18 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, CGF.EmitBlock(PreCondBB); llvm::PHINode *PhiSrc = Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); - PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); + PhiSrc->addIncoming(Ptr.emitRawPointer(CGF), CurrentBB); llvm::PHINode *PhiDest = Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); - PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); + PhiDest->addIncoming(ElemPtr.emitRawPointer(CGF), CurrentBB); Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment()); ElemPtr = Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment()); + llvm::Value *PtrEndRaw = PtrEnd.emitRawPointer(CGF); + llvm::Value *PtrRaw = Ptr.emitRawPointer(CGF); llvm::Value *PtrDiff = Bld.CreatePtrDiff( - CGF.Int8Ty, PtrEnd.getPointer(), - Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(), - CGF.VoidPtrTy)); + CGF.Int8Ty, PtrEndRaw, + Bld.CreatePointerBitCastOrAddrSpaceCast(PtrRaw, CGF.VoidPtrTy)); Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), ThenBB, ExitBB); CGF.EmitBlock(ThenBB); @@ -1528,8 +1535,8 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, TBAAAccessInfo()); Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); - PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); - PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); + PhiSrc->addIncoming(LocalPtr.emitRawPointer(CGF), ThenBB); + PhiDest->addIncoming(LocalElemPtr.emitRawPointer(CGF), ThenBB); CGF.EmitBranch(PreCondBB); CGF.EmitBlock(ExitBB); } else { @@ -1676,10 +1683,10 @@ static void emitReductionListCopy( // scope and that of functions it invokes (i.e., reduce_function). // RemoteReduceData[i] = (void*)&RemoteElem if (UpdateDestListPtr) { - CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( - DestElementAddr.getPointer(), CGF.VoidPtrTy), - DestElementPtrAddr, /*Volatile=*/false, - C.VoidPtrTy); + CGF.EmitStoreOfScalar( + Bld.CreatePointerBitCastOrAddrSpaceCast( + DestElementAddr.emitRawPointer(CGF), CGF.VoidPtrTy), + DestElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy); } ++Idx; @@ -1830,7 +1837,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, // elemptr = ((CopyType*)(elemptrptr)) + I Address ElemPtr(ElemPtrPtr, CopyType, Align); if (NumIters > 1) - ElemPtr = Bld.CreateGEP(ElemPtr, Cnt); + ElemPtr = Bld.CreateGEP(CGF, ElemPtr, Cnt); // Get pointer to location in transfer medium. // MediumPtr = &medium[warp_id] @@ -1894,7 +1901,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); Address TargetElemPtr(TargetElemPtrVal, CopyType, Align); if (NumIters > 1) - TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt); + TargetElemPtr = Bld.CreateGEP(CGF, TargetElemPtr, Cnt); // *TargetElemPtr = SrcMediumVal; llvm::Value *SrcMediumValue = @@ -2105,9 +2112,9 @@ static llvm::Function *emitShuffleAndReduceFunction( CGF.EmitBlock(ThenBB); // reduce_function(LocalReduceList, RemoteReduceList) llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( - LocalReduceList.getPointer(), CGF.VoidPtrTy); + LocalReduceList.emitRawPointer(CGF), CGF.VoidPtrTy); llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( - RemoteReduceList.getPointer(), CGF.VoidPtrTy); + RemoteReduceList.emitRawPointer(CGF), CGF.VoidPtrTy); CGM.getOpenMPRuntime().emitOutlinedFunctionCall( CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); Bld.CreateBr(MergeBB); @@ -2218,9 +2225,9 @@ static llvm::Value *emitListToGlobalCopyFunction( llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); LValue GlobLVal = CGF.EmitLValueForField( - CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); + CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD); Address GlobAddr = GlobLVal.getAddress(CGF); - GlobLVal.setAddress(Address(GlobAddr.getPointer(), + GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF), CGF.ConvertTypeForMem(Private->getType()), GlobAddr.getAlignment())); switch (CGF.getEvaluationKind(Private->getType())) { @@ -2304,7 +2311,7 @@ static llvm::Value *emitListToGlobalReduceFunction( // 1. Build a list of reduction variables. // void *RedList[] = {[0], ..., [-1]}; - Address ReductionList = + RawAddress ReductionList = CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); auto IPriv = Privates.begin(); llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), @@ -2319,10 +2326,10 @@ static llvm::Value *emitListToGlobalReduceFunction( llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); LValue GlobLVal = CGF.EmitLValueForField( - CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); + CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD); Address GlobAddr = GlobLVal.getAddress(CGF); - CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false, - C.VoidPtrTy); + CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem, + /*Volatile=*/false, C.VoidPtrTy); if ((*IPriv)->getType()->isVariablyModifiedType()) { // Store array size. ++Idx; @@ -2425,9 +2432,9 @@ static llvm::Value *emitGlobalToListCopyFunction( llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); LValue GlobLVal = CGF.EmitLValueForField( - CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); + CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD); Address GlobAddr = GlobLVal.getAddress(CGF); - GlobLVal.setAddress(Address(GlobAddr.getPointer(), + GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF), CGF.ConvertTypeForMem(Private->getType()), GlobAddr.getAlignment())); switch (CGF.getEvaluationKind(Private->getType())) { @@ -2526,10 +2533,10 @@ static llvm::Value *emitGlobalToListReduceFunction( llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); LValue GlobLVal = CGF.EmitLValueForField( - CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); + CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD); Address GlobAddr = GlobLVal.getAddress(CGF); - CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false, - C.VoidPtrTy); + CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem, + /*Volatile=*/false, C.VoidPtrTy); if ((*IPriv)->getType()->isVariablyModifiedType()) { // Store array size. ++Idx; @@ -2545,7 +2552,7 @@ static llvm::Value *emitGlobalToListReduceFunction( } // Call reduce_function(ReduceList, GlobalReduceList) - llvm::Value *GlobalReduceList = ReductionList.getPointer(); + llvm::Value *GlobalReduceList = ReductionList.emitRawPointer(CGF); Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); @@ -2876,7 +2883,7 @@ void CGOpenMPRuntimeGPU::emitReduction( } llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - ReductionList.getPointer(), CGF.VoidPtrTy); + ReductionList.emitRawPointer(CGF), CGF.VoidPtrTy); llvm::Function *ReductionFn = emitReductionFunction( CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy), Privates, LHSExprs, RHSExprs, ReductionOps); @@ -3106,15 +3113,15 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper( // Get the array of arguments. SmallVector Args; - Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); - Args.emplace_back(ZeroAddr.getPointer()); + Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).emitRawPointer(CGF)); + Args.emplace_back(ZeroAddr.emitRawPointer(CGF)); CGBuilderTy &Bld = CGF.Builder; auto CI = CS.capture_begin(); // Use global memory for data sharing. // Handle passing of global args to workers. - Address GlobalArgs = + RawAddress GlobalArgs = CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; @@ -3400,7 +3407,7 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas( VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType().getCanonicalType()) .getAddress(CGF); - CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); + CGF.EmitStoreOfScalar(VDAddr.emitRawPointer(CGF), VarLVal); } } } @@ -3459,7 +3466,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective( case CudaArch::SM_20: case CudaArch::SM_21: case CudaArch::SM_30: - case CudaArch::SM_32: + case CudaArch::SM_32_: case CudaArch::SM_35: case CudaArch::SM_37: case CudaArch::SM_50: diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 634a55fec5182..868b1ab98e048 100644 --- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -41,10 +41,11 @@ namespace { /// contains enough information to determine where the runs break. Microsoft /// and Itanium follow different rules and use different codepaths. /// * It is desired that, when possible, bitfields use the appropriate iN type -/// when lowered to llvm types. For example unsigned x : 24 gets lowered to +/// when lowered to llvm types. For example unsigned x : 24 gets lowered to /// i24. This isn't always possible because i24 has storage size of 32 bit -/// and if it is possible to use that extra byte of padding we must use -/// [i8 x 3] instead of i24. The function clipTailPadding does this. +/// and if it is possible to use that extra byte of padding we must use [i8 x +/// 3] instead of i24. This is computed when accumulating bitfields in +/// accumulateBitfields. /// C++ examples that require clipping: /// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3 /// struct A { int a : 24; ~A(); }; // a must be clipped because: @@ -62,11 +63,7 @@ namespace { /// that the tail padding is not used in the complete class.) However, /// because LLVM reads from the complete type it can generate incorrect code /// if we do not clip the tail padding off of the bitfield in the complete -/// layout. This introduces a somewhat awkward extra unnecessary clip stage. -/// The location of the clip is stored internally as a sentinel of type -/// SCISSOR. If LLVM were updated to read base types (which it probably -/// should because locations of things such as VBases are bogus in the llvm -/// type anyway) then we could eliminate the SCISSOR. +/// layout. /// * Itanium allows nearly empty primary virtual bases. These bases don't get /// get their own storage because they're laid out as part of another base /// or at the beginning of the structure. Determining if a VBase actually @@ -200,9 +197,7 @@ struct CGRecordLowering { const CXXRecordDecl *Query) const; void calculateZeroInit(); CharUnits calculateTailClippingOffset(bool isNonVirtualBaseType) const; - /// Lowers bitfield storage types to I8 arrays for bitfields with tail - /// padding that is or can potentially be used. - void clipTailPadding(); + void checkBitfieldClipping() const; /// Determines if we need a packed llvm struct. void determinePacked(bool NVBaseType); /// Inserts padding everywhere it's needed. @@ -305,7 +300,7 @@ void CGRecordLowering::lower(bool NVBaseType) { } llvm::stable_sort(Members); Members.push_back(StorageInfo(Size, getIntNType(8))); - clipTailPadding(); + checkBitfieldClipping(); determinePacked(NVBaseType); insertPadding(); Members.pop_back(); @@ -531,6 +526,7 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType, // available padding characters. RecordDecl::field_iterator BestEnd = Begin; CharUnits BestEndOffset; + bool BestClipped; // Whether the representation must be in a byte array. for (;;) { // AtAlignedBoundary is true iff Field is the (potential) start of a new @@ -593,10 +589,9 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType, // this is the best seen so far. BestEnd = Field; BestEndOffset = BeginOffset + AccessSize; - if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses) - // Fine-grained access, so no merging of spans. - InstallBest = true; - else if (!BitSizeSinceBegin) + // Assume clipped until proven not below. + BestClipped = true; + if (!BitSizeSinceBegin) // A zero-sized initial span -- this will install nothing and reset // for another. InstallBest = true; @@ -624,6 +619,12 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType, // The access unit is not at a naturally aligned offset within the // structure. InstallBest = true; + + if (InstallBest && BestEnd == Field) + // We're installing the first span, whose clipping was presumed + // above. Compute it correctly. + if (getSize(Type) == AccessSize) + BestClipped = false; } if (!InstallBest) { @@ -656,11 +657,15 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType, // access unit. BestEndOffset = BeginOffset + TypeSize; BestEnd = Field; + BestClipped = false; } if (Barrier) // The next field is a barrier that we cannot merge across. InstallBest = true; + else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses) + // Fine-grained access, so no merging of spans. + InstallBest = true; else // Otherwise, we're not installing. Update the bit size // of the current span to go all the way to LimitOffset, which is @@ -679,7 +684,17 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType, // Add the storage member for the access unit to the record. The // bitfields get the offset of their storage but come afterward and // remain there after a stable sort. - llvm::Type *Type = getIntNType(Context.toBits(AccessSize)); + llvm::Type *Type; + if (BestClipped) { + assert(getSize(getIntNType(Context.toBits(AccessSize))) > + AccessSize && + "Clipped access need not be clipped"); + Type = getByteArrayType(AccessSize); + } else { + Type = getIntNType(Context.toBits(AccessSize)); + assert(getSize(Type) == AccessSize && + "Unclipped access must be clipped"); + } Members.push_back(StorageInfo(BeginOffset, Type)); for (; Begin != BestEnd; ++Begin) if (!Begin->isZeroLengthBitField(Context)) @@ -934,32 +949,21 @@ void CGRecordLowering::calculateZeroInit() { } } -void CGRecordLowering::clipTailPadding() { - std::vector::iterator Prior = Members.begin(); - CharUnits Tail = getSize(Prior->Data); - for (std::vector::iterator Member = Prior + 1, - MemberEnd = Members.end(); - Member != MemberEnd; ++Member) { +// Verify accumulateBitfields computed the correct storage representations. +void CGRecordLowering::checkBitfieldClipping() const { +#ifndef NDEBUG + auto Tail = CharUnits::Zero(); + for (const auto &M : Members) { // Only members with data and the scissor can cut into tail padding. - if (!Member->Data && Member->Kind != MemberInfo::Scissor) + if (!M.Data && M.Kind != MemberInfo::Scissor) continue; - if (Member->Offset < Tail) { - assert(Prior->Kind == MemberInfo::Field && - "Only storage fields have tail padding!"); - if (!Prior->FD || Prior->FD->isBitField()) - Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo( - cast(Prior->Data)->getIntegerBitWidth(), 8))); - else { - assert(Prior->FD->hasAttr() && - "should not have reused this field's tail padding"); - Prior->Data = getByteArrayType( - Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width); - } - } - if (Member->Data) - Prior = Member; - Tail = Prior->Offset + getSize(Prior->Data); + + assert(M.Offset >= Tail && "Bitfield access unit is not clipped"); + Tail = M.Offset; + if (M.Data) + Tail += getSize(M.Data); } +#endif } void CGRecordLowering::determinePacked(bool NVBaseType) { diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index cb5a004e4f4a6..576fe2f7a2d46 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2294,7 +2294,7 @@ std::pair CodeGenFunction::EmitAsmInputLValue( Address Addr = InputValue.getAddress(*this); ConstraintStr += '*'; - return {Addr.getPointer(), Addr.getElementType()}; + return {InputValue.getPointer(*this), Addr.getElementType()}; } std::pair @@ -2701,7 +2701,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { ArgTypes.push_back(DestAddr.getType()); ArgElemTypes.push_back(DestAddr.getElementType()); - Args.push_back(DestAddr.getPointer()); + Args.push_back(DestAddr.emitRawPointer(*this)); Constraints += "=*"; Constraints += OutputConstraint; ReadOnly = ReadNone = false; @@ -3076,8 +3076,8 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); // Initialize variable-length arrays. - LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), - Ctx.getTagDeclType(RD)); + LValue Base = MakeNaturalAlignRawAddrLValue( + CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD)); for (auto *FD : RD->fields()) { if (FD->hasCapturedVLAType()) { auto *ExprArg = diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp index ce3004473b903..c294b6e69ace3 100644 --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -24,6 +24,7 @@ #include "clang/AST/StmtVisitor.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PrettyStackTrace.h" +#include "clang/Basic/SourceManager.h" #include "llvm/ADT/SmallSet.h" #include "llvm/BinaryFormat/Dwarf.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" @@ -34,11 +35,14 @@ #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Metadata.h" #include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/Debug.h" #include using namespace clang; using namespace CodeGen; using namespace llvm::omp; +#define TTL_CODEGEN_TYPE "target-teams-loop-codegen" + static const VarDecl *getBaseDecl(const Expr *Ref); namespace { @@ -350,7 +354,8 @@ void CodeGenFunction::GenerateOpenMPCapturedVars( LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); llvm::Value *SrcAddrVal = EmitScalarConversion( - DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), + DstAddr.emitRawPointer(*this), + Ctx.getPointerType(Ctx.getUIntPtrType()), Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); LValue SrcLV = MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); @@ -364,7 +369,8 @@ void CodeGenFunction::GenerateOpenMPCapturedVars( CapturedVars.push_back(CV); } else { assert(CurCap->capturesVariable() && "Expected capture by reference."); - CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); + CapturedVars.push_back( + EmitLValue(*I).getAddress(*this).emitRawPointer(*this)); } } } @@ -375,8 +381,9 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, ASTContext &Ctx = CGF.getContext(); llvm::Value *CastedPtr = CGF.EmitScalarConversion( - AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), + AddrLV.getAddress(CGF).emitRawPointer(CGF), Ctx.getUIntPtrType(), Ctx.getPointerType(DstType), Loc); + // FIXME: should the pointee type (DstType) be passed? Address TmpAddr = CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF); return TmpAddr; @@ -702,8 +709,8 @@ void CodeGenFunction::EmitOMPAggregateAssign( llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); SrcAddr = SrcAddr.withElementType(DestAddr.getElementType()); - llvm::Value *SrcBegin = SrcAddr.getPointer(); - llvm::Value *DestBegin = DestAddr.getPointer(); + llvm::Value *SrcBegin = SrcAddr.emitRawPointer(*this); + llvm::Value *DestBegin = DestAddr.emitRawPointer(*this); // Cast from pointer to array type to pointer to single element. llvm::Value *DestEnd = Builder.CreateInBoundsGEP(DestAddr.getElementType(), DestBegin, NumElements); @@ -1007,10 +1014,10 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { CopyBegin = createBasicBlock("copyin.not.master"); CopyEnd = createBasicBlock("copyin.not.master.end"); // TODO: Avoid ptrtoint conversion. - auto *MasterAddrInt = - Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy); - auto *PrivateAddrInt = - Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy); + auto *MasterAddrInt = Builder.CreatePtrToInt( + MasterAddr.emitRawPointer(*this), CGM.IntPtrTy); + auto *PrivateAddrInt = Builder.CreatePtrToInt( + PrivateAddr.emitRawPointer(*this), CGM.IntPtrTy); Builder.CreateCondBr( Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin, CopyEnd); @@ -1429,9 +1436,12 @@ void CodeGenFunction::EmitOMPReductionClauseFinal( *this, D.getBeginLoc(), isOpenMPWorksharingDirective(D.getDirectiveKind())); } + bool TeamsLoopCanBeParallel = false; + if (auto *TTLD = dyn_cast(&D)) + TeamsLoopCanBeParallel = TTLD->canBeParallelFor(); bool WithNowait = D.getSingleClause() || isOpenMPParallelDirective(D.getDirectiveKind()) || - ReductionKind == OMPD_simd; + TeamsLoopCanBeParallel || ReductionKind == OMPD_simd; bool SimpleReduction = ReductionKind == OMPD_simd; // Emit nowait reduction if nowait clause is present or directive is a // parallel directive (it always has implicit barrier). @@ -1666,7 +1676,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( llvm::Type *VarTy = VDAddr.getElementType(); llvm::Value *Data = - CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); + CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy); llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); std::string Suffix = getNameWithSeparators({"cache", ""}); llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); @@ -2045,7 +2055,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { ->getParam(0) ->getType() .getNonReferenceType(); - Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); + RawAddress CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); @@ -2061,7 +2071,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { LValue LCVal = EmitLValue(LoopVarRef); Address LoopVarAddress = LCVal.getAddress(*this); emitCapturedStmtCall(*this, LoopVarClosure, - {LoopVarAddress.getPointer(), IndVar}); + {LoopVarAddress.emitRawPointer(*this), IndVar}); RunCleanupsScope BodyScope(*this); EmitStmt(BodyStmt); @@ -4795,7 +4805,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( ParamTypes.push_back(PrivatesPtr->getType()); for (const Expr *E : Data.PrivateVars) { const auto *VD = cast(cast(E)->getDecl()); - Address PrivatePtr = CGF.CreateMemTemp( + RawAddress PrivatePtr = CGF.CreateMemTemp( CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); PrivatePtrs.emplace_back(VD, PrivatePtr); CallArgs.push_back(PrivatePtr.getPointer()); @@ -4803,7 +4813,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( } for (const Expr *E : Data.FirstprivateVars) { const auto *VD = cast(cast(E)->getDecl()); - Address PrivatePtr = + RawAddress PrivatePtr = CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), ".firstpriv.ptr.addr"); PrivatePtrs.emplace_back(VD, PrivatePtr); @@ -4813,7 +4823,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( } for (const Expr *E : Data.LastprivateVars) { const auto *VD = cast(cast(E)->getDecl()); - Address PrivatePtr = + RawAddress PrivatePtr = CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), ".lastpriv.ptr.addr"); PrivatePtrs.emplace_back(VD, PrivatePtr); @@ -4826,7 +4836,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( Ty = CGF.getContext().getPointerType(Ty); if (isAllocatableDecl(VD)) Ty = CGF.getContext().getPointerType(Ty); - Address PrivatePtr = CGF.CreateMemTemp( + RawAddress PrivatePtr = CGF.CreateMemTemp( CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); auto Result = UntiedLocalVars.insert( std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid()))); @@ -4859,7 +4869,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( if (auto *DI = CGF.getDebugInfo()) if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo()) (void)DI->EmitDeclareOfAutoVariable( - Pair.first, Pair.second.getPointer(), CGF.Builder, + Pair.first, Pair.second.getBasePointer(), CGF.Builder, /*UsePointerValue*/ true); } // Adjust mapping for internal locals by mapping actual memory instead of @@ -4912,14 +4922,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( RedCG, Cnt); Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); - Replacement = - Address(CGF.EmitScalarConversion( - Replacement.getPointer(), CGF.getContext().VoidPtrTy, - CGF.getContext().getPointerType( - Data.ReductionCopies[Cnt]->getType()), - Data.ReductionCopies[Cnt]->getExprLoc()), - CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()), - Replacement.getAlignment()); + Replacement = Address( + CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF), + CGF.getContext().VoidPtrTy, + CGF.getContext().getPointerType( + Data.ReductionCopies[Cnt]->getType()), + Data.ReductionCopies[Cnt]->getExprLoc()), + CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()), + Replacement.getAlignment()); Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement); } @@ -4970,7 +4980,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); Replacement = Address( CGF.EmitScalarConversion( - Replacement.getPointer(), CGF.getContext().VoidPtrTy, + Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy, CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), InRedPrivs[Cnt]->getExprLoc()), CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()), @@ -5089,7 +5099,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective( // If there is no user-defined mapper, the mapper array will be nullptr. In // this case, we don't need to privatize it. if (!isa_and_nonnull( - InputInfo.MappersArray.getPointer())) { + InputInfo.MappersArray.emitRawPointer(*this))) { MVD = createImplicitFirstprivateForType( getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); TargetScope.addPrivate(MVD, InputInfo.MappersArray); @@ -5115,7 +5125,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective( ParamTypes.push_back(PrivatesPtr->getType()); for (const Expr *E : Data.FirstprivateVars) { const auto *VD = cast(cast(E)->getDecl()); - Address PrivatePtr = + RawAddress PrivatePtr = CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), ".firstpriv.ptr.addr"); PrivatePtrs.emplace_back(VD, PrivatePtr); @@ -5194,14 +5204,14 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S, RedCG, Cnt); Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); - Replacement = - Address(CGF.EmitScalarConversion( - Replacement.getPointer(), CGF.getContext().VoidPtrTy, - CGF.getContext().getPointerType( - Data.ReductionCopies[Cnt]->getType()), - Data.ReductionCopies[Cnt]->getExprLoc()), - CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()), - Replacement.getAlignment()); + Replacement = Address( + CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF), + CGF.getContext().VoidPtrTy, + CGF.getContext().getPointerType( + Data.ReductionCopies[Cnt]->getType()), + Data.ReductionCopies[Cnt]->getExprLoc()), + CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()), + Replacement.getAlignment()); Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement); } @@ -5247,7 +5257,7 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S, CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); Replacement = Address( CGF.EmitScalarConversion( - Replacement.getPointer(), CGF.getContext().VoidPtrTy, + Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy, CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), InRedPrivs[Cnt]->getExprLoc()), CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()), @@ -5394,7 +5404,7 @@ void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( *this, Dependencies, DC->getBeginLoc()); - EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); + EmitStoreOfScalar(DepAddr.emitRawPointer(*this), DOLVal); return; } if (const auto *DC = S.getSingleClause()) { @@ -6471,21 +6481,21 @@ static void emitOMPAtomicCompareExpr( D->getType()->hasSignedIntegerRepresentation()); llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{ - XAddr.getPointer(), XAddr.getElementType(), + XAddr.emitRawPointer(CGF), XAddr.getElementType(), X->getType()->hasSignedIntegerRepresentation(), X->getType().isVolatileQualified()}; llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal; if (V) { LValue LV = CGF.EmitLValue(V); Address Addr = LV.getAddress(CGF); - VOpVal = {Addr.getPointer(), Addr.getElementType(), + VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(), V->getType()->hasSignedIntegerRepresentation(), V->getType().isVolatileQualified()}; } if (R) { LValue LV = CGF.EmitLValue(R); Address Addr = LV.getAddress(CGF); - ROpVal = {Addr.getPointer(), Addr.getElementType(), + ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(), R->getType()->hasSignedIntegerRepresentation(), R->getType().isVolatileQualified()}; } @@ -7029,7 +7039,7 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) { std::tie(NumDependences, DependenciesArray) = CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences, S.getBeginLoc()); - DependenceList = DependenciesArray.getPointer(); + DependenceList = DependenciesArray.emitRawPointer(*this); } Data.HasNowaitClause = S.hasClausesOfKind(); @@ -7925,11 +7935,9 @@ void CodeGenFunction::EmitOMPParallelGenericLoopDirective( void CodeGenFunction::EmitOMPTeamsGenericLoopDirective( const OMPTeamsGenericLoopDirective &S) { // To be consistent with current behavior of 'target teams loop', emit - // 'teams loop' as if its constituent constructs are 'distribute, - // 'parallel, and 'for'. + // 'teams loop' as if its constituent constructs are 'teams' and 'distribute'. auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { - CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, - S.getDistInc()); + CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); }; // Emit teams region as a standalone region. @@ -7943,15 +7951,33 @@ void CodeGenFunction::EmitOMPTeamsGenericLoopDirective( CodeGenDistribute); CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); }; - emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); + emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); emitPostUpdateForReductionClause(*this, S, [](CodeGenFunction &) { return nullptr; }); } -static void -emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF, - const OMPTargetTeamsGenericLoopDirective &S, - PrePostActionTy &Action) { +#ifndef NDEBUG +static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF, + std::string StatusMsg, + const OMPExecutableDirective &D) { + bool IsDevice = CGF.CGM.getLangOpts().OpenMPIsTargetDevice; + if (IsDevice) + StatusMsg += ": DEVICE"; + else + StatusMsg += ": HOST"; + SourceLocation L = D.getBeginLoc(); + auto &SM = CGF.getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(L); + const char *FileName = PLoc.isValid() ? PLoc.getFilename() : nullptr; + unsigned LineNo = + PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L); + llvm::dbgs() << StatusMsg << ": " << FileName << ": " << LineNo << "\n"; +} +#endif + +static void emitTargetTeamsGenericLoopRegionAsParallel( + CodeGenFunction &CGF, PrePostActionTy &Action, + const OMPTargetTeamsGenericLoopDirective &S) { Action.Enter(CGF); // Emit 'teams loop' as if its constituent constructs are 'distribute, // 'parallel, and 'for'. @@ -7971,19 +7997,50 @@ emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF, CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); }; - + DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE, + emitTargetTeamsLoopCodegenStatus( + CGF, TTL_CODEGEN_TYPE " as parallel for", S)); emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, CodeGenTeams); emitPostUpdateForReductionClause(CGF, S, [](CodeGenFunction &) { return nullptr; }); } -/// Emit combined directive 'target teams loop' as if its constituent -/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'. +static void emitTargetTeamsGenericLoopRegionAsDistribute( + CodeGenFunction &CGF, PrePostActionTy &Action, + const OMPTargetTeamsGenericLoopDirective &S) { + Action.Enter(CGF); + // Emit 'teams loop' as if its constituent construct is 'distribute'. + auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { + CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); + }; + + // Emit teams region as a standalone region. + auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, + PrePostActionTy &Action) { + Action.Enter(CGF); + CodeGenFunction::OMPPrivateScope PrivateScope(CGF); + CGF.EmitOMPReductionClauseInit(S, PrivateScope); + (void)PrivateScope.Privatize(); + CGF.CGM.getOpenMPRuntime().emitInlinedDirective( + CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); + CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); + }; + DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE, + emitTargetTeamsLoopCodegenStatus( + CGF, TTL_CODEGEN_TYPE " as distribute", S)); + emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); + emitPostUpdateForReductionClause(CGF, S, + [](CodeGenFunction &) { return nullptr; }); +} + void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective( const OMPTargetTeamsGenericLoopDirective &S) { auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { - emitTargetTeamsGenericLoopRegion(CGF, S, Action); + if (S.canBeParallelFor()) + emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S); + else + emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S); }; emitCommonOMPTargetDirective(*this, S, CodeGen); } @@ -7993,7 +8050,10 @@ void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction( const OMPTargetTeamsGenericLoopDirective &S) { // Emit SPMD target parallel loop region as a standalone region. auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { - emitTargetTeamsGenericLoopRegion(CGF, S, Action); + if (S.canBeParallelFor()) + emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S); + else + emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S); }; llvm::Function *Fn; llvm::Constant *Addr; diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp index 8dee3f74b44b4..862369ae009f4 100644 --- a/clang/lib/CodeGen/CGVTables.cpp +++ b/clang/lib/CodeGen/CGVTables.cpp @@ -201,14 +201,13 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn, // Find the first store of "this", which will be to the alloca associated // with "this". - Address ThisPtr = - Address(&*AI, ConvertTypeForMem(MD->getFunctionObjectParameterType()), - CGM.getClassPointerAlignment(MD->getParent())); + Address ThisPtr = makeNaturalAddressForPointer( + &*AI, MD->getFunctionObjectParameterType(), + CGM.getClassPointerAlignment(MD->getParent())); llvm::BasicBlock *EntryBB = &Fn->front(); llvm::BasicBlock::iterator ThisStore = llvm::find_if(*EntryBB, [&](llvm::Instruction &I) { - return isa(I) && - I.getOperand(0) == ThisPtr.getPointer(); + return isa(I) && I.getOperand(0) == &*AI; }); assert(ThisStore != EntryBB->end() && "Store of this should be in entry block?"); diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h index 1e6f67250583d..cc9ad10ae5969 100644 --- a/clang/lib/CodeGen/CGValue.h +++ b/clang/lib/CodeGen/CGValue.h @@ -14,12 +14,13 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H #define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H +#include "Address.h" +#include "CodeGenTBAA.h" +#include "EHScopeStack.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Type.h" -#include "llvm/IR/Value.h" #include "llvm/IR/Type.h" -#include "Address.h" -#include "CodeGenTBAA.h" +#include "llvm/IR/Value.h" namespace llvm { class Constant; @@ -28,57 +29,64 @@ namespace llvm { namespace clang { namespace CodeGen { - class AggValueSlot; - class CodeGenFunction; - struct CGBitFieldInfo; +class AggValueSlot; +class CGBuilderTy; +class CodeGenFunction; +struct CGBitFieldInfo; /// RValue - This trivial value class is used to represent the result of an /// expression that is evaluated. It can be one of three things: either a /// simple LLVM SSA value, a pair of SSA values for complex numbers, or the /// address of an aggregate value in memory. class RValue { - enum Flavor { Scalar, Complex, Aggregate }; + friend struct DominatingValue; - // The shift to make to an aggregate's alignment to make it look - // like a pointer. - enum { AggAlignShift = 4 }; + enum FlavorEnum { Scalar, Complex, Aggregate }; - // Stores first value and flavor. - llvm::PointerIntPair V1; - // Stores second value and volatility. - llvm::PointerIntPair V2; - // Stores element type for aggregate values. - llvm::Type *ElementType; + union { + // Stores first and second value. + struct { + llvm::Value *first; + llvm::Value *second; + } Vals; + + // Stores aggregate address. + Address AggregateAddr; + }; + + unsigned IsVolatile : 1; + unsigned Flavor : 2; public: - bool isScalar() const { return V1.getInt() == Scalar; } - bool isComplex() const { return V1.getInt() == Complex; } - bool isAggregate() const { return V1.getInt() == Aggregate; } + RValue() : Vals{nullptr, nullptr}, Flavor(Scalar) {} + + bool isScalar() const { return Flavor == Scalar; } + bool isComplex() const { return Flavor == Complex; } + bool isAggregate() const { return Flavor == Aggregate; } - bool isVolatileQualified() const { return V2.getInt(); } + bool isVolatileQualified() const { return IsVolatile; } /// getScalarVal() - Return the Value* of this scalar value. llvm::Value *getScalarVal() const { assert(isScalar() && "Not a scalar!"); - return V1.getPointer(); + return Vals.first; } /// getComplexVal - Return the real/imag components of this complex value. /// std::pair getComplexVal() const { - return std::make_pair(V1.getPointer(), V2.getPointer()); + return std::make_pair(Vals.first, Vals.second); } /// getAggregateAddr() - Return the Value* of the address of the aggregate. Address getAggregateAddress() const { assert(isAggregate() && "Not an aggregate!"); - auto align = reinterpret_cast(V2.getPointer()) >> AggAlignShift; - return Address( - V1.getPointer(), ElementType, CharUnits::fromQuantity(align)); + return AggregateAddr; } - llvm::Value *getAggregatePointer() const { - assert(isAggregate() && "Not an aggregate!"); - return V1.getPointer(); + + llvm::Value *getAggregatePointer(QualType PointeeType, + CodeGenFunction &CGF) const { + return getAggregateAddress().getBasePointer(); } static RValue getIgnored() { @@ -88,17 +96,19 @@ class RValue { static RValue get(llvm::Value *V) { RValue ER; - ER.V1.setPointer(V); - ER.V1.setInt(Scalar); - ER.V2.setInt(false); + ER.Vals.first = V; + ER.Flavor = Scalar; + ER.IsVolatile = false; return ER; } + static RValue get(Address Addr, CodeGenFunction &CGF) { + return RValue::get(Addr.emitRawPointer(CGF)); + } static RValue getComplex(llvm::Value *V1, llvm::Value *V2) { RValue ER; - ER.V1.setPointer(V1); - ER.V2.setPointer(V2); - ER.V1.setInt(Complex); - ER.V2.setInt(false); + ER.Vals = {V1, V2}; + ER.Flavor = Complex; + ER.IsVolatile = false; return ER; } static RValue getComplex(const std::pair &C) { @@ -107,15 +117,15 @@ class RValue { // FIXME: Aggregate rvalues need to retain information about whether they are // volatile or not. Remove default to find all places that probably get this // wrong. + + /// Convert an Address to an RValue. If the Address is not + /// signed, create an RValue using the unsigned address. Otherwise, resign the + /// address using the provided type. static RValue getAggregate(Address addr, bool isVolatile = false) { RValue ER; - ER.V1.setPointer(addr.getPointer()); - ER.V1.setInt(Aggregate); - ER.ElementType = addr.getElementType(); - - auto align = static_cast(addr.getAlignment().getQuantity()); - ER.V2.setPointer(reinterpret_cast(align << AggAlignShift)); - ER.V2.setInt(isVolatile); + ER.AggregateAddr = addr; + ER.Flavor = Aggregate; + ER.IsVolatile = isVolatile; return ER; } }; @@ -178,8 +188,10 @@ class LValue { MatrixElt // This is a matrix element, use getVector* } LVType; - llvm::Value *V; - llvm::Type *ElementType; + union { + Address Addr = Address::invalid(); + llvm::Value *V; + }; union { // Index into a vector subscript: V[i] @@ -197,10 +209,6 @@ class LValue { // 'const' is unused here Qualifiers Quals; - // The alignment to use when accessing this lvalue. (For vector elements, - // this is the alignment of the whole vector.) - unsigned Alignment; - // objective-c's ivar bool Ivar:1; @@ -234,23 +242,19 @@ class LValue { Expr *BaseIvarExp; private: - void Initialize(QualType Type, Qualifiers Quals, CharUnits Alignment, + void Initialize(QualType Type, Qualifiers Quals, Address Addr, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { - assert((!Alignment.isZero() || Type->isIncompleteType()) && - "initializing l-value with zero alignment!"); - if (isGlobalReg()) - assert(ElementType == nullptr && "Global reg does not store elem type"); - else - assert(ElementType != nullptr && "Must have elem type"); - this->Type = Type; this->Quals = Quals; const unsigned MaxAlign = 1U << 31; - this->Alignment = Alignment.getQuantity() <= MaxAlign - ? Alignment.getQuantity() - : MaxAlign; - assert(this->Alignment == Alignment.getQuantity() && - "Alignment exceeds allowed max!"); + CharUnits Alignment = Addr.getAlignment(); + assert((isGlobalReg() || !Alignment.isZero() || Type->isIncompleteType()) && + "initializing l-value with zero alignment!"); + if (Alignment.getQuantity() > MaxAlign) { + assert(false && "Alignment exceeds allowed max!"); + Alignment = CharUnits::fromQuantity(MaxAlign); + } + this->Addr = Addr; this->BaseInfo = BaseInfo; this->TBAAInfo = TBAAInfo; @@ -259,9 +263,20 @@ class LValue { this->ImpreciseLifetime = false; this->Nontemporal = false; this->ThreadLocalRef = false; + this->IsKnownNonNull = false; this->BaseIvarExp = nullptr; } + void initializeSimpleLValue(Address Addr, QualType Type, + LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, + ASTContext &Context) { + Qualifiers QS = Type.getQualifiers(); + QS.setObjCGCAttr(Context.getObjCGCAttrKind(Type)); + LVType = Simple; + Initialize(Type, QS, Addr, BaseInfo, TBAAInfo); + assert(Addr.getBasePointer()->getType()->isPointerTy()); + } + public: bool isSimple() const { return LVType == Simple; } bool isVectorElt() const { return LVType == VectorElt; } @@ -328,8 +343,8 @@ class LValue { LangAS getAddressSpace() const { return Quals.getAddressSpace(); } - CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); } - void setAlignment(CharUnits A) { Alignment = A.getQuantity(); } + CharUnits getAlignment() const { return Addr.getAlignment(); } + void setAlignment(CharUnits A) { Addr.setAlignment(A); } LValueBaseInfo getBaseInfo() const { return BaseInfo; } void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } @@ -345,28 +360,32 @@ class LValue { // simple lvalue llvm::Value *getPointer(CodeGenFunction &CGF) const { assert(isSimple()); - return V; + return Addr.getBasePointer(); } - Address getAddress(CodeGenFunction &CGF) const { - return Address(getPointer(CGF), ElementType, getAlignment(), - isKnownNonNull()); - } - void setAddress(Address address) { + llvm::Value *emitRawPointer(CodeGenFunction &CGF) const { assert(isSimple()); - V = address.getPointer(); - ElementType = address.getElementType(); - Alignment = address.getAlignment().getQuantity(); - IsKnownNonNull = address.isKnownNonNull(); + return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr; } + Address getAddress(CodeGenFunction &CGF) const { + // FIXME: remove parameter. + return Addr; + } + + void setAddress(Address address) { Addr = address; } + // vector elt lvalue Address getVectorAddress() const { - return Address(getVectorPointer(), ElementType, getAlignment(), - (KnownNonNull_t)isKnownNonNull()); + assert(isVectorElt()); + return Addr; + } + llvm::Value *getRawVectorPointer(CodeGenFunction &CGF) const { + assert(isVectorElt()); + return Addr.emitRawPointer(CGF); } llvm::Value *getVectorPointer() const { assert(isVectorElt()); - return V; + return Addr.getBasePointer(); } llvm::Value *getVectorIdx() const { assert(isVectorElt()); @@ -374,12 +393,12 @@ class LValue { } Address getMatrixAddress() const { - return Address(getMatrixPointer(), ElementType, getAlignment(), - (KnownNonNull_t)isKnownNonNull()); + assert(isMatrixElt()); + return Addr; } llvm::Value *getMatrixPointer() const { assert(isMatrixElt()); - return V; + return Addr.getBasePointer(); } llvm::Value *getMatrixIdx() const { assert(isMatrixElt()); @@ -388,12 +407,12 @@ class LValue { // extended vector elements. Address getExtVectorAddress() const { - return Address(getExtVectorPointer(), ElementType, getAlignment(), - (KnownNonNull_t)isKnownNonNull()); + assert(isExtVectorElt()); + return Addr; } - llvm::Value *getExtVectorPointer() const { + llvm::Value *getRawExtVectorPointer(CodeGenFunction &CGF) const { assert(isExtVectorElt()); - return V; + return Addr.emitRawPointer(CGF); } llvm::Constant *getExtVectorElts() const { assert(isExtVectorElt()); @@ -402,10 +421,14 @@ class LValue { // bitfield lvalue Address getBitFieldAddress() const { - return Address(getBitFieldPointer(), ElementType, getAlignment(), - (KnownNonNull_t)isKnownNonNull()); + assert(isBitField()); + return Addr; + } + llvm::Value *getRawBitFieldPointer(CodeGenFunction &CGF) const { + assert(isBitField()); + return Addr.emitRawPointer(CGF); } - llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; } + const CGBitFieldInfo &getBitFieldInfo() const { assert(isBitField()); return *BitFieldInfo; @@ -414,18 +437,13 @@ class LValue { // global register lvalue llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; } - static LValue MakeAddr(Address address, QualType type, ASTContext &Context, + static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { - Qualifiers qs = type.getQualifiers(); - qs.setObjCGCAttr(Context.getObjCGCAttrKind(type)); - LValue R; R.LVType = Simple; - assert(address.getPointer()->getType()->isPointerTy()); - R.V = address.getPointer(); - R.ElementType = address.getElementType(); - R.IsKnownNonNull = address.isKnownNonNull(); - R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo); + R.initializeSimpleLValue(Addr, type, BaseInfo, TBAAInfo, Context); + R.Addr = Addr; + assert(Addr.getType()->isPointerTy()); return R; } @@ -434,26 +452,18 @@ class LValue { TBAAAccessInfo TBAAInfo) { LValue R; R.LVType = VectorElt; - R.V = vecAddress.getPointer(); - R.ElementType = vecAddress.getElementType(); R.VectorIdx = Idx; - R.IsKnownNonNull = vecAddress.isKnownNonNull(); - R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), - BaseInfo, TBAAInfo); + R.Initialize(type, type.getQualifiers(), vecAddress, BaseInfo, TBAAInfo); return R; } - static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, + static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { LValue R; R.LVType = ExtVectorElt; - R.V = vecAddress.getPointer(); - R.ElementType = vecAddress.getElementType(); R.VectorElts = Elts; - R.IsKnownNonNull = vecAddress.isKnownNonNull(); - R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), - BaseInfo, TBAAInfo); + R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo); return R; } @@ -468,12 +478,8 @@ class LValue { TBAAAccessInfo TBAAInfo) { LValue R; R.LVType = BitField; - R.V = Addr.getPointer(); - R.ElementType = Addr.getElementType(); R.BitFieldInfo = &Info; - R.IsKnownNonNull = Addr.isKnownNonNull(); - R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo, - TBAAInfo); + R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo); return R; } @@ -481,11 +487,9 @@ class LValue { QualType type) { LValue R; R.LVType = GlobalReg; - R.V = V; - R.ElementType = nullptr; - R.IsKnownNonNull = true; - R.Initialize(type, type.getQualifiers(), alignment, + R.Initialize(type, type.getQualifiers(), Address::invalid(), LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo()); + R.V = V; return R; } @@ -494,12 +498,8 @@ class LValue { TBAAAccessInfo TBAAInfo) { LValue R; R.LVType = MatrixElt; - R.V = matAddress.getPointer(); - R.ElementType = matAddress.getElementType(); R.VectorIdx = Idx; - R.IsKnownNonNull = matAddress.isKnownNonNull(); - R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(), - BaseInfo, TBAAInfo); + R.Initialize(type, type.getQualifiers(), matAddress, BaseInfo, TBAAInfo); return R; } @@ -643,17 +643,17 @@ class AggValueSlot { return NeedsGCBarriers_t(ObjCGCFlag); } - llvm::Value *getPointer() const { - return Addr.getPointer(); + llvm::Value *getPointer(QualType PointeeTy, CodeGenFunction &CGF) const; + + llvm::Value *emitRawPointer(CodeGenFunction &CGF) const { + return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr; } Address getAddress() const { return Addr; } - bool isIgnored() const { - return !Addr.isValid(); - } + bool isIgnored() const { return !Addr.isValid(); } CharUnits getAlignment() const { return Addr.getAlignment(); diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp index e6d1c006d6019..ef0c491da20d6 100644 --- a/clang/lib/CodeGen/CodeGenAction.cpp +++ b/clang/lib/CodeGen/CodeGenAction.cpp @@ -24,8 +24,11 @@ #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Driver/DriverDiagnostic.h" #include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendActions.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Frontend/MultiplexConsumer.h" #include "clang/Lex/Preprocessor.h" +#include "clang/Serialization/ASTWriter.h" #include "llvm/ADT/Hashing.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" @@ -1113,6 +1116,12 @@ CodeGenerator *CodeGenAction::getCodeGenerator() const { return BEConsumer->getCodeGenerator(); } +bool CodeGenAction::BeginSourceFileAction(CompilerInstance &CI) { + if (CI.getFrontendOpts().GenReducedBMI) + CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface); + return true; +} + static std::unique_ptr GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) { switch (Action) { @@ -1171,6 +1180,16 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { CI.getPreprocessor().addPPCallbacks(std::move(Callbacks)); } + if (CI.getFrontendOpts().GenReducedBMI && + !CI.getFrontendOpts().ModuleOutputPath.empty()) { + std::vector> Consumers(2); + Consumers[0] = std::make_unique( + CI.getPreprocessor(), CI.getModuleCache(), + CI.getFrontendOpts().ModuleOutputPath); + Consumers[1] = std::move(Result); + return std::make_unique(std::move(Consumers)); + } + return std::move(Result); } diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 6d4b6680ae8f6..4befefcad187c 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -93,8 +93,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) CodeGenFunction::~CodeGenFunction() { assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); - assert(DeferredDeactivationCleanupStack.empty() && - "missed to deactivate a cleanup"); if (getLangOpts().OpenMP && CurFn) CGM.getOpenMPRuntime().functionFinished(*this); @@ -197,26 +195,35 @@ CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { CGF.Builder.setDefaultConstrainedRounding(OldRounding); } -LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { +static LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, + bool ForPointeeType, + CodeGenFunction &CGF) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; - CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); - Address Addr(V, ConvertTypeForMem(T), Alignment); - return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo); + CharUnits Alignment = + CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType); + Address Addr = Address(V, CGF.ConvertTypeForMem(T), Alignment); + return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); +} + +LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { + return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this); } -/// Given a value of type T* that may not be to a complete object, -/// construct an l-value with the natural pointee alignment of T. LValue CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { - LValueBaseInfo BaseInfo; - TBAAAccessInfo TBAAInfo; - CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, - /* forPointeeType= */ true); - Address Addr(V, ConvertTypeForMem(T), Align); - return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); + return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this); +} + +LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V, + QualType T) { + return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this); } +LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, + QualType T) { + return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this); +} llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { return CGM.getTypes().ConvertTypeForMem(T); @@ -341,10 +348,6 @@ static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { assert(BreakContinueStack.empty() && "mismatched push/pop in break/continue stack!"); - assert(LifetimeExtendedCleanupStack.empty() && - "mismatched push/pop of cleanups in EHStack!"); - assert(DeferredDeactivationCleanupStack.empty() && - "mismatched activate/deactivate of cleanups!"); bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 && NumSimpleReturnExprs == NumReturnExprs @@ -534,7 +537,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { ReturnBlock.getBlock()->eraseFromParent(); } if (ReturnValue.isValid()) { - auto *RetAlloca = dyn_cast(ReturnValue.getPointer()); + auto *RetAlloca = + dyn_cast(ReturnValue.emitRawPointer(*this)); if (RetAlloca && RetAlloca->use_empty()) { RetAlloca->eraseFromParent(); ReturnValue = Address::invalid(); @@ -1006,7 +1010,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. if (SanOpts.has(SanitizerKind::Thread)) { if (const auto *OMD = dyn_cast_or_null(D)) { - IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); + const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); if (OMD->getMethodFamily() == OMF_dealloc || OMD->getMethodFamily() == OMF_initialize || (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { @@ -1369,13 +1373,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, auto AI = CurFn->arg_begin(); if (CurFnInfo->getReturnInfo().isSRetAfterThis()) ++AI; - ReturnValue = - Address(&*AI, ConvertType(RetTy), - CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull); + ReturnValue = makeNaturalAddressForPointer( + &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false, + nullptr, nullptr, KnownNonNull); if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { - ReturnValuePointer = CreateDefaultAlignTempAlloca( - ReturnValue.getPointer()->getType(), "result.ptr"); - Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer); + ReturnValuePointer = + CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr"); + Builder.CreateStore(ReturnValue.emitRawPointer(*this), + ReturnValuePointer); } } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { @@ -1436,8 +1441,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // or contains the address of the enclosing object). LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); if (!LambdaThisCaptureField->getType()->isPointerType()) { - // If the enclosing object was captured by value, just use its address. - CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); + // If the enclosing object was captured by value, just use its + // address. Sign this pointer. + CXXThisValue = ThisFieldLValue.getPointer(*this); } else { // Load the lvalue pointed to by the field, since '*this' was captured // by reference. @@ -2299,8 +2305,9 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); Address begin = dest.withElementType(CGF.Int8Ty); - llvm::Value *end = Builder.CreateInBoundsGEP( - begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end"); + llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(), + begin.emitRawPointer(CGF), + sizeInChars, "vla.end"); llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); @@ -2311,7 +2318,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, CGF.EmitBlock(loopBB); llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); - cur->addIncoming(begin.getPointer(), originBB); + cur->addIncoming(begin.emitRawPointer(CGF), originBB); CharUnits curAlign = dest.getAlignment().alignmentOfArrayElement(baseSize); @@ -2504,10 +2511,10 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, addr = addr.withElementType(baseType); } else { // Create the actual GEP. - addr = Address(Builder.CreateInBoundsGEP( - addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"), - ConvertTypeForMem(eltType), - addr.getAlignment()); + addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(), + addr.emitRawPointer(*this), + gepIndices, "array.begin"), + ConvertTypeForMem(eltType), addr.getAlignment()); } baseType = eltType; @@ -2856,7 +2863,7 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, Address Addr) { assert(D->hasAttr() && "no annotate attribute"); - llvm::Value *V = Addr.getPointer(); + llvm::Value *V = Addr.emitRawPointer(*this); llvm::Type *VTy = V->getType(); auto *PTy = dyn_cast(VTy); unsigned AS = PTy ? PTy->getAddressSpace() : 0; @@ -2915,7 +2922,7 @@ Address CodeGenFunction::EmitFieldSYCLAnnotations(const FieldDecl *D, Address Addr) { const auto *SYCLAnnotAttr = D->getAttr(); assert(SYCLAnnotAttr && "no add_ir_annotations_member attribute"); - llvm::Value *V = Addr.getPointer(); + llvm::Value *V = Addr.emitRawPointer(*this); llvm::Type *VTy = V->getType(); auto *PTy = dyn_cast(VTy); unsigned AS = PTy ? PTy->getAddressSpace() : 0; @@ -2942,7 +2949,7 @@ Address CodeGenFunction::EmitIntelFPGAFieldAnnotations(const FieldDecl *D, Address CodeGenFunction::EmitIntelFPGAFieldAnnotations(SourceLocation Location, Address Addr, StringRef AnnotStr) { - llvm::Value *V = Addr.getPointer(); + llvm::Value *V = Addr.emitRawPointer(*this); llvm::Type *VTy = V->getType(); // llvm.ptr.annotation intrinsic accepts a pointer to integer of any width - // don't perform bitcasts if value is integer diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index a65f384b38425..652cbee655647 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -39,7 +39,6 @@ #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" -#include "llvm/IR/Instructions.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/SanitizerStats.h" @@ -152,6 +151,9 @@ struct DominatingLLVMValue { /// Answer whether the given value needs extra work to be saved. static bool needsSaving(llvm::Value *value) { + if (!value) + return false; + // If it's not an instruction, we don't need to save. if (!isa(value)) return false; @@ -178,21 +180,28 @@ template <> struct DominatingValue
{ typedef Address type; struct saved_type { - DominatingLLVMValue::saved_type SavedValue; + DominatingLLVMValue::saved_type BasePtr; llvm::Type *ElementType; CharUnits Alignment; + DominatingLLVMValue::saved_type Offset; + llvm::PointerType *EffectiveType; }; static bool needsSaving(type value) { - return DominatingLLVMValue::needsSaving(value.getPointer()); + if (DominatingLLVMValue::needsSaving(value.getBasePointer()) || + DominatingLLVMValue::needsSaving(value.getOffset())) + return true; + return false; } static saved_type save(CodeGenFunction &CGF, type value) { - return { DominatingLLVMValue::save(CGF, value.getPointer()), - value.getElementType(), value.getAlignment() }; + return {DominatingLLVMValue::save(CGF, value.getBasePointer()), + value.getElementType(), value.getAlignment(), + DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()}; } static type restore(CodeGenFunction &CGF, saved_type value) { - return Address(DominatingLLVMValue::restore(CGF, value.SavedValue), - value.ElementType, value.Alignment); + return Address(DominatingLLVMValue::restore(CGF, value.BasePtr), + value.ElementType, value.Alignment, + DominatingLLVMValue::restore(CGF, value.Offset)); } }; @@ -202,14 +211,26 @@ template <> struct DominatingValue { class saved_type { enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral, AggregateAddress, ComplexAddress }; - - llvm::Value *Value; - llvm::Type *ElementType; + union { + struct { + DominatingLLVMValue::saved_type first, second; + } Vals; + DominatingValue
::saved_type AggregateAddr; + }; LLVM_PREFERRED_TYPE(Kind) unsigned K : 3; - unsigned Align : 29; - saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0) - : Value(v), ElementType(e), K(k), Align(a) {} + unsigned IsVolatile : 1; + + saved_type(DominatingLLVMValue::saved_type Val1, unsigned K) + : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {} + + saved_type(DominatingLLVMValue::saved_type Val1, + DominatingLLVMValue::saved_type Val2) + : Vals{Val1, Val2}, K(ComplexAddress) {} + + saved_type(DominatingValue
::saved_type AggregateAddr, + bool IsVolatile, unsigned K) + : AggregateAddr(AggregateAddr), K(K) {} public: static bool needsSaving(RValue value); @@ -649,51 +670,6 @@ class CodeGenFunction : public CodeGenTypeCache { EHScopeStack EHStack; llvm::SmallVector LifetimeExtendedCleanupStack; - - // A stack of cleanups which were added to EHStack but have to be deactivated - // later before being popped or emitted. These are usually deactivated on - // exiting a `CleanupDeactivationScope` scope. For instance, after a - // full-expr. - // - // These are specially useful for correctly emitting cleanups while - // encountering branches out of expression (through stmt-expr or coroutine - // suspensions). - struct DeferredDeactivateCleanup { - EHScopeStack::stable_iterator Cleanup; - llvm::Instruction *DominatingIP; - }; - llvm::SmallVector DeferredDeactivationCleanupStack; - - // Enters a new scope for capturing cleanups which are deferred to be - // deactivated, all of which will be deactivated once the scope is exited. - struct CleanupDeactivationScope { - CodeGenFunction &CGF; - size_t OldDeactivateCleanupStackSize; - bool Deactivated; - CleanupDeactivationScope(CodeGenFunction &CGF) - : CGF(CGF), OldDeactivateCleanupStackSize( - CGF.DeferredDeactivationCleanupStack.size()), - Deactivated(false) {} - - void ForceDeactivate() { - assert(!Deactivated && "Deactivating already deactivated scope"); - auto &Stack = CGF.DeferredDeactivationCleanupStack; - for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) { - CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup, - Stack[I - 1].DominatingIP); - Stack[I - 1].DominatingIP->eraseFromParent(); - } - Stack.resize(OldDeactivateCleanupStackSize); - Deactivated = true; - } - - ~CleanupDeactivationScope() { - if (Deactivated) - return; - ForceDeactivate(); - } - }; - llvm::SmallVector SEHTryEpilogueStack; llvm::Instruction *CurrentFuncletPad = nullptr; @@ -705,7 +681,7 @@ class CodeGenFunction : public CodeGenTypeCache { llvm::Value *Size; public: - CallLifetimeEnd(Address addr, llvm::Value *size) + CallLifetimeEnd(RawAddress addr, llvm::Value *size) : Addr(addr.getPointer()), Size(size) {} void Emit(CodeGenFunction &CGF, Flags flags) override { @@ -730,7 +706,7 @@ class CodeGenFunction : public CodeGenTypeCache { }; /// i32s containing the indexes of the cleanup destinations. - Address NormalCleanupDest = Address::invalid(); + RawAddress NormalCleanupDest = RawAddress::invalid(); unsigned NextCleanupDestIndex = 1; @@ -865,10 +841,10 @@ class CodeGenFunction : public CodeGenTypeCache { template void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) { if (!isInConditionalBranch()) - return pushCleanupAfterFullExprWithActiveFlag(Kind, Address::invalid(), - A...); + return pushCleanupAfterFullExprWithActiveFlag( + Kind, RawAddress::invalid(), A...); - Address ActiveFlag = createCleanupActiveFlag(); + RawAddress ActiveFlag = createCleanupActiveFlag(); assert(!DominatingValue
::needsSaving(ActiveFlag) && "cleanup active flag should never need saving"); @@ -881,7 +857,7 @@ class CodeGenFunction : public CodeGenTypeCache { template void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, - Address ActiveFlag, As... A) { + RawAddress ActiveFlag, As... A) { LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind, ActiveFlag.isValid()}; @@ -896,20 +872,7 @@ class CodeGenFunction : public CodeGenTypeCache { new (Buffer) LifetimeExtendedCleanupHeader(Header); new (Buffer + sizeof(Header)) T(A...); if (Header.IsConditional) - new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag); - } - - // Push a cleanup onto EHStack and deactivate it later. It is usually - // deactivated when exiting a `CleanupDeactivationScope` (for example: after a - // full expression). - template - void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) { - // Placeholder dominating IP for this cleanup. - llvm::Instruction *DominatingIP = - Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy)); - EHStack.pushCleanup(Kind, A...); - DeferredDeactivationCleanupStack.push_back( - {EHStack.stable_begin(), DominatingIP}); + new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag); } /// Set up the last cleanup that was pushed as a conditional @@ -918,8 +881,8 @@ class CodeGenFunction : public CodeGenTypeCache { initFullExprCleanupWithFlag(createCleanupActiveFlag()); } - void initFullExprCleanupWithFlag(Address ActiveFlag); - Address createCleanupActiveFlag(); + void initFullExprCleanupWithFlag(RawAddress ActiveFlag); + RawAddress createCleanupActiveFlag(); /// PushDestructorCleanup - Push a cleanup to call the /// complete-object destructor of an object of the given type at the @@ -963,7 +926,6 @@ class CodeGenFunction : public CodeGenTypeCache { class RunCleanupsScope { EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth; size_t LifetimeExtendedCleanupStackSize; - CleanupDeactivationScope DeactivateCleanups; bool OldDidCallStackSave; protected: bool PerformCleanup; @@ -978,7 +940,8 @@ class CodeGenFunction : public CodeGenTypeCache { public: /// Enter a new cleanup scope. explicit RunCleanupsScope(CodeGenFunction &CGF) - : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) { + : PerformCleanup(true), CGF(CGF) + { CleanupStackDepth = CGF.EHStack.stable_begin(); LifetimeExtendedCleanupStackSize = CGF.LifetimeExtendedCleanupStack.size(); @@ -1008,7 +971,6 @@ class CodeGenFunction : public CodeGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); CGF.DidCallStackSave = OldDidCallStackSave; - DeactivateCleanups.ForceDeactivate(); CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, ValuesToReload); PerformCleanup = false; @@ -1108,7 +1070,7 @@ class CodeGenFunction : public CodeGenTypeCache { QualType VarTy = LocalVD->getType(); if (VarTy->isReferenceType()) { Address Temp = CGF.CreateMemTemp(VarTy); - CGF.Builder.CreateStore(TempAddr.getPointer(), Temp); + CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp); TempAddr = Temp; } SavedTempAddresses.try_emplace(LocalVD, TempAddr); @@ -1303,10 +1265,12 @@ class CodeGenFunction : public CodeGenTypeCache { /// one branch or the other of a conditional expression. bool isInConditionalBranch() const { return OutermostConditional != nullptr; } - void setBeforeOutermostConditional(llvm::Value *value, Address addr) { + void setBeforeOutermostConditional(llvm::Value *value, Address addr, + CodeGenFunction &CGF) { assert(isInConditionalBranch()); llvm::BasicBlock *block = OutermostConditional->getStartingBlock(); - auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back()); + auto store = + new llvm::StoreInst(value, addr.emitRawPointer(CGF), &block->back()); store->setAlignment(addr.getAlignment().getAsAlign()); } @@ -1661,7 +1625,7 @@ class CodeGenFunction : public CodeGenTypeCache { /// If \p StepV is null, the default increment is 1. void maybeUpdateMCDCTestVectorBitmap(const Expr *E) { if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) { - PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr); + PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this); PGO.setCurrentStmt(E); } } @@ -1669,7 +1633,7 @@ class CodeGenFunction : public CodeGenTypeCache { /// Update the MCDC temp value with the condition's evaluated result. void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) { if (isMCDCCoverageEnabled()) { - PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val); + PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this); PGO.setCurrentStmt(E); } } @@ -1786,7 +1750,7 @@ class CodeGenFunction : public CodeGenTypeCache { : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue), OldCXXThisAlignment(CGF.CXXThisAlignment), SourceLocScope(E, CGF.CurSourceLocExprScope) { - CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer(); + CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer(); CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment(); } ~CXXDefaultInitExprScope() { @@ -2172,7 +2136,7 @@ class CodeGenFunction : public CodeGenTypeCache { llvm::Value *getExceptionFromSlot(); llvm::Value *getSelectorFromSlot(); - Address getNormalCleanupDestSlot(); + RawAddress getNormalCleanupDestSlot(); llvm::BasicBlock *getUnreachableBlock() { if (!UnreachableBlock) { @@ -2218,11 +2182,6 @@ class CodeGenFunction : public CodeGenTypeCache { Address addr, QualType type); void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); - void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, - Address addr, QualType type); - void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, - QualType type, Destroyer *destroyer, - bool useEHCleanupForArray); void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); @@ -2666,10 +2625,40 @@ class CodeGenFunction : public CodeGenTypeCache { // Helpers //===--------------------------------------------------------------------===// + Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, + llvm::BasicBlock *LHSBlock, + llvm::BasicBlock *RHSBlock, + llvm::BasicBlock *MergeBlock, + QualType MergedType) { + Builder.SetInsertPoint(MergeBlock); + llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond"); + PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock); + PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock); + LHS.replaceBasePointer(PtrPhi); + LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment())); + return LHS; + } + + /// Construct an address with the natural alignment of T. If a pointer to T + /// is expected to be signed, the pointer passed to this function must have + /// been signed, and the returned Address will have the pointer authentication + /// information needed to authenticate the signed pointer. + Address makeNaturalAddressForPointer( + llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(), + bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr, + TBAAAccessInfo *TBAAInfo = nullptr, + KnownNonNull_t IsKnownNonNull = NotKnownNonNull) { + if (Alignment.isZero()) + Alignment = + CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType); + return Address(Ptr, ConvertTypeForMem(T), Alignment, nullptr, + IsKnownNonNull); + } + LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source = AlignmentSource::Type) { - return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source), - CGM.getTBAAAccessInfo(T)); + return MakeAddrLValue(Addr, T, LValueBaseInfo(Source), + CGM.getTBAAAccessInfo(T)); } LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, @@ -2679,6 +2668,14 @@ class CodeGenFunction : public CodeGenTypeCache { LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source = AlignmentSource::Type) { + return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T, + LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T)); + } + + /// Same as MakeAddrLValue above except that the pointer is known to be + /// unsigned. + LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, + AlignmentSource Source = AlignmentSource::Type) { Address Addr(V, ConvertTypeForMem(T), Alignment); return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T)); @@ -2691,9 +2688,18 @@ class CodeGenFunction : public CodeGenTypeCache { TBAAAccessInfo()); } + /// Given a value of type T* that may not be to a complete object, construct + /// an l-value with the natural pointee alignment of T. LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T); + LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T); + /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known + /// to be unsigned. + LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T); + + LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T); + Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo = nullptr, TBAAAccessInfo *PointeeTBAAInfo = nullptr); @@ -2714,33 +2720,6 @@ class CodeGenFunction : public CodeGenTypeCache { TBAAAccessInfo *TBAAInfo = nullptr); LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy); -private: - struct AllocaTracker { - void Add(llvm::AllocaInst *I) { Allocas.push_back(I); } - llvm::SmallVector Take() { return std::move(Allocas); } - - private: - llvm::SmallVector Allocas; - }; - AllocaTracker *Allocas = nullptr; - -public: - // Captures all the allocas created during the scope of its RAII object. - struct AllocaTrackerRAII { - AllocaTrackerRAII(CodeGenFunction &CGF) - : CGF(CGF), OldTracker(CGF.Allocas) { - CGF.Allocas = &Tracker; - } - ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; } - - llvm::SmallVector Take() { return Tracker.Take(); } - - private: - CodeGenFunction &CGF; - AllocaTracker *OldTracker; - AllocaTracker Tracker; - }; - /// CreateTempAlloca - This creates an alloca and inserts it into the entry /// block if \p ArraySize is nullptr, otherwise inserts it at the current /// insertion point of the builder. The caller is responsible for setting an @@ -2769,13 +2748,13 @@ class CodeGenFunction : public CodeGenTypeCache { /// more efficient if the caller knows that the address will not be exposed. llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp", llvm::Value *ArraySize = nullptr); - Address CreateTempAlloca(llvm::Type *Ty, CharUnits align, - const Twine &Name = "tmp", - llvm::Value *ArraySize = nullptr, - Address *Alloca = nullptr); - Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, - const Twine &Name = "tmp", - llvm::Value *ArraySize = nullptr); + RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align, + const Twine &Name = "tmp", + llvm::Value *ArraySize = nullptr, + RawAddress *Alloca = nullptr); + RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, + const Twine &Name = "tmp", + llvm::Value *ArraySize = nullptr); /// CreateDefaultAlignedTempAlloca - This creates an alloca with the /// default ABI alignment of the given LLVM type. @@ -2787,8 +2766,8 @@ class CodeGenFunction : public CodeGenTypeCache { /// not hand this address off to arbitrary IRGen routines, and especially /// do not pass it as an argument to a function that might expect a /// properly ABI-aligned value. - Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, - const Twine &Name = "tmp"); + RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, + const Twine &Name = "tmp"); /// CreateIRTemp - Create a temporary IR object of the given type, with /// appropriate alignment. This routine should only be used when an temporary @@ -2798,32 +2777,31 @@ class CodeGenFunction : public CodeGenTypeCache { /// /// That is, this is exactly equivalent to CreateMemTemp, but calling /// ConvertType instead of ConvertTypeForMem. - Address CreateIRTemp(QualType T, const Twine &Name = "tmp"); + RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp"); /// CreateMemTemp - Create a temporary memory object of the given type, with /// appropriate alignmen and cast it to the default address space. Returns /// the original alloca instruction by \p Alloca if it is not nullptr. - Address CreateMemTemp(QualType T, const Twine &Name = "tmp", - Address *Alloca = nullptr); - Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp", - Address *Alloca = nullptr); + RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp", + RawAddress *Alloca = nullptr); + RawAddress CreateMemTemp(QualType T, CharUnits Align, + const Twine &Name = "tmp", + RawAddress *Alloca = nullptr); /// CreateMemTemp - Create a temporary memory object of the given type, with /// appropriate alignmen without casting it to the default address space. - Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp"); - Address CreateMemTempWithoutCast(QualType T, CharUnits Align, - const Twine &Name = "tmp"); + RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp"); + RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align, + const Twine &Name = "tmp"); /// CreateAggTemp - Create a temporary memory object for the given /// aggregate type. AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp", - Address *Alloca = nullptr) { - return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca), - T.getQualifiers(), - AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap); + RawAddress *Alloca = nullptr) { + return AggValueSlot::forAddr( + CreateMemTemp(T, Name, Alloca), T.getQualifiers(), + AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); } /// EvaluateExprAsBool - Perform the usual unary conversions on the specified @@ -3212,6 +3190,25 @@ class CodeGenFunction : public CodeGenTypeCache { /// calls to EmitTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; + void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, + QualType Type, SanitizerSet SkippedChecks = SanitizerSet(), + llvm::Value *ArraySize = nullptr) { + if (!sanitizePerformTypeCheck()) + return; + EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(), + SkippedChecks, ArraySize); + } + + void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr, + QualType Type, CharUnits Alignment = CharUnits::Zero(), + SanitizerSet SkippedChecks = SanitizerSet(), + llvm::Value *ArraySize = nullptr) { + if (!sanitizePerformTypeCheck()) + return; + EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment, + SkippedChecks, ArraySize); + } + /// Emit a check that \p V is the address of storage of the /// appropriate size and alignment for an object of type \p Type /// (or if ArraySize is provided, for an array of that bound). @@ -3312,17 +3309,17 @@ class CodeGenFunction : public CodeGenTypeCache { /// Address with original alloca instruction. Invalid if the variable was /// emitted as a global constant. - Address AllocaAddr; + RawAddress AllocaAddr; struct Invalid {}; AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()), - AllocaAddr(Address::invalid()) {} + AllocaAddr(RawAddress::invalid()) {} AutoVarEmission(const VarDecl &variable) : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr), IsEscapingByRef(false), IsConstantAggregate(false), - SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {} + SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {} bool wasEmittedAsGlobal() const { return !Addr.isValid(); } @@ -3345,7 +3342,7 @@ class CodeGenFunction : public CodeGenTypeCache { } /// Returns the address for the original alloca instruction. - Address getOriginalAllocatedAddress() const { return AllocaAddr; } + RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; } /// Returns the address of the object within this declaration. /// Note that this does not chase the forwarding pointer for @@ -3375,23 +3372,32 @@ class CodeGenFunction : public CodeGenTypeCache { llvm::GlobalValue::LinkageTypes Linkage); class ParamValue { - llvm::Value *Value; - llvm::Type *ElementType; - unsigned Alignment; - ParamValue(llvm::Value *V, llvm::Type *T, unsigned A) - : Value(V), ElementType(T), Alignment(A) {} + union { + Address Addr; + llvm::Value *Value; + }; + + bool IsIndirect; + + ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {} + ParamValue(Address A) : Addr(A), IsIndirect(true) {} + public: static ParamValue forDirect(llvm::Value *value) { - return ParamValue(value, nullptr, 0); + return ParamValue(value); } static ParamValue forIndirect(Address addr) { assert(!addr.getAlignment().isZero()); - return ParamValue(addr.getPointer(), addr.getElementType(), - addr.getAlignment().getQuantity()); + return ParamValue(addr); } - bool isIndirect() const { return Alignment != 0; } - llvm::Value *getAnyValue() const { return Value; } + bool isIndirect() const { return IsIndirect; } + llvm::Value *getAnyValue() const { + if (!isIndirect()) + return Value; + assert(!Addr.hasOffset() && "unexpected offset"); + return Addr.getBasePointer(); + } llvm::Value *getDirectValue() const { assert(!isIndirect()); @@ -3400,8 +3406,7 @@ class CodeGenFunction : public CodeGenTypeCache { Address getIndirectAddress() const { assert(isIndirect()); - return Address(Value, ElementType, CharUnits::fromQuantity(Alignment), - KnownNonNull); + return Addr; } }; @@ -4311,6 +4316,9 @@ class CodeGenFunction : public CodeGenTypeCache { const Twine &name = ""); llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name = ""); + llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee, + ArrayRef
args, + const Twine &name = ""); llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, const Twine &name = ""); @@ -4337,6 +4345,12 @@ class CodeGenFunction : public CodeGenTypeCache { CXXDtorType Type, const CXXRecordDecl *RD); + llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) { + return Addr.getBasePointer(); + } + + bool isPointerKnownNonNull(const Expr *E); + // Return the copy constructor name with the prefix "__copy_constructor_" // removed. static std::string getNonTrivialCopyConstructorStr(QualType QT, @@ -4945,6 +4959,11 @@ class CodeGenFunction : public CodeGenTypeCache { SourceLocation Loc, const Twine &Name = ""); + Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef IdxList, + llvm::Type *elementType, bool SignedIndices, + bool IsSubtraction, SourceLocation Loc, + CharUnits Align, const Twine &Name = ""); + /// Specifies which type of sanitizer check to apply when handling a /// particular builtin. enum BuiltinCheckKind { @@ -5007,6 +5026,10 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum); + void EmitNonNullArgCheck(Address Addr, QualType ArgType, + SourceLocation ArgLoc, AbstractCallee AC, + unsigned ParmNum); + /// EmitCallArg - Emit a single call argument. void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType); @@ -5234,7 +5257,7 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) { CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save"); CGF.Builder.CreateStore(value, alloca); - return saved_type(alloca.getPointer(), true); + return saved_type(alloca.emitRawPointer(CGF), true); } inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF, diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 5c4feb4994431..6c96217b444e0 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -51,6 +51,7 @@ #include "clang/CodeGen/ConstantInitBuilder.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Sema/Sema.h" +#include "clang/Sema/SemaSYCL.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" @@ -2840,7 +2841,7 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD, addUsedGlobal(F); if (auto *SA = D->getAttr()) if (!D->getAttr()) - F->addFnAttr("implicit-section-name", SA->getName()); + F->setSection(SA->getName()); llvm::AttrBuilder Attrs(F->getContext()); if (GetCPUAndFeaturesAttributes(GD, Attrs)) { @@ -4378,8 +4379,20 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) { // behavior may break ABI compatibility of the current unit. if (const Module *M = F->getOwningModule(); M && M->getTopLevelModule()->isNamedModule() && - getContext().getCurrentNamedModule() != M->getTopLevelModule()) - return false; + getContext().getCurrentNamedModule() != M->getTopLevelModule()) { + // There are practices to mark template member function as always-inline + // and mark the template as extern explicit instantiation but not give + // the definition for member function. So we have to emit the function + // from explicitly instantiation with always-inline. + // + // See https://github.com/llvm/llvm-project/issues/86893 for details. + // + // TODO: Maybe it is better to give it a warning if we call a non-inline + // function from other module units which is marked as always-inline. + if (!F->isTemplateInstantiation() || !F->hasAttr()) { + return false; + } + } if (F->hasAttr()) return false; @@ -6254,7 +6267,7 @@ CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D, // is only one translation unit and can so mark them internal. if (getLangOpts().SYCLIsDevice && !D->hasAttr() && !D->hasAttr() && - !Sema::isTypeDecoratedWithDeclAttribute( + !SemaSYCL::isTypeDecoratedWithDeclAttribute( D->getType())) return getLangOpts().GPURelocatableDeviceCode ? llvm::Function::LinkOnceODRLinkage @@ -7264,7 +7277,7 @@ static bool AllTrivialInitializers(CodeGenModule &CGM, void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { // We might need a .cxx_destruct even if we don't have any ivar initializers. if (needsDestructMethod(D)) { - IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct"); + const IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct"); Selector cxxSelector = getContext().Selectors.getSelector(0, &II); ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create( getContext(), D->getLocation(), D->getLocation(), cxxSelector, @@ -7284,7 +7297,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) { AllTrivialInitializers(*this, D)) return; - IdentifierInfo *II = &getContext().Idents.get(".cxx_construct"); + const IdentifierInfo *II = &getContext().Idents.get(".cxx_construct"); Selector cxxSelector = getContext().Selectors.getSelector(0, &II); // The constructor returns 'self'. ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create( @@ -7852,7 +7865,7 @@ void CodeGenModule::EmitStaticExternCAliases() { if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases()) return; for (auto &I : StaticExternCValues) { - IdentifierInfo *Name = I.first; + const IdentifierInfo *Name = I.first; llvm::GlobalValue *Val = I.second; // If Val is null, that implies there were multiple declarations that each @@ -7913,7 +7926,7 @@ void CodeGenFunction::EmitDeclMetadata() { for (auto &I : LocalDeclMap) { const Decl *D = I.first; - llvm::Value *Addr = I.second.getPointer(); + llvm::Value *Addr = I.second.emitRawPointer(*this); if (auto *Alloca = dyn_cast(Addr)) { llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D); Alloca->setMetadata( diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp index 2619edfeb7dc7..76704c4d7be4a 100644 --- a/clang/lib/CodeGen/CodeGenPGO.cpp +++ b/clang/lib/CodeGen/CodeGenPGO.cpp @@ -1239,7 +1239,8 @@ void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) { void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, const Expr *S, - Address MCDCCondBitmapAddr) { + Address MCDCCondBitmapAddr, + CodeGenFunction &CGF) { if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState) return; @@ -1262,7 +1263,7 @@ void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, Builder.getInt64(FunctionHash), Builder.getInt32(RegionMCDCState->BitmapBytes), Builder.getInt32(MCDCTestVectorBitmapOffset), - MCDCCondBitmapAddr.getPointer()}; + MCDCCondBitmapAddr.emitRawPointer(CGF)}; Builder.CreateCall( CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_tvbitmap_update), Args); } @@ -1283,7 +1284,8 @@ void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S, void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S, Address MCDCCondBitmapAddr, - llvm::Value *Val) { + llvm::Value *Val, + CodeGenFunction &CGF) { if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState) return; @@ -1312,7 +1314,7 @@ void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S, llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy), Builder.getInt64(FunctionHash), Builder.getInt32(Branch.ID), - MCDCCondBitmapAddr.getPointer(), Val}; + MCDCCondBitmapAddr.emitRawPointer(CGF), Val}; Builder.CreateCall( CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_condbitmap_update), Args); diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h index 036fbf6815a49..9d66ffad6f435 100644 --- a/clang/lib/CodeGen/CodeGenPGO.h +++ b/clang/lib/CodeGen/CodeGenPGO.h @@ -113,12 +113,14 @@ class CodeGenPGO { void emitCounterSetOrIncrement(CGBuilderTy &Builder, const Stmt *S, llvm::Value *StepV); void emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, const Expr *S, - Address MCDCCondBitmapAddr); + Address MCDCCondBitmapAddr, + CodeGenFunction &CGF); void emitMCDCParameters(CGBuilderTy &Builder); void emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S, Address MCDCCondBitmapAddr); void emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S, - Address MCDCCondBitmapAddr, llvm::Value *Val); + Address MCDCCondBitmapAddr, llvm::Value *Val, + CodeGenFunction &CGF); /// Return the region count for the counter at the given index. uint64_t getRegionCount(const Stmt *S) { diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp index da689ee6a13d7..284421f494711 100644 --- a/clang/lib/CodeGen/CodeGenTBAA.cpp +++ b/clang/lib/CodeGen/CodeGenTBAA.cpp @@ -414,7 +414,7 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) { }); } for (FieldDecl *Field : RD->fields()) { - if (Field->isZeroSize(Context) || Field->isUnnamedBitfield()) + if (Field->isZeroSize(Context) || Field->isUnnamedBitField()) continue; QualType FieldQTy = Field->getType(); llvm::MDNode *TypeNode = isValidBaseType(FieldQTy) diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 27ceb23b942c4..207b12806546c 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -308,10 +308,6 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI { CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase); - llvm::Constant * - getVTableAddressPointForConstExpr(BaseSubobject Base, - const CXXRecordDecl *VTableClass) override; - llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; @@ -647,7 +643,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( // Apply the adjustment and cast back to the original struct type // for consistency. - llvm::Value *This = ThisAddr.getPointer(); + llvm::Value *This = ThisAddr.emitRawPointer(CGF); This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj); ThisPtrForCall = This; @@ -853,7 +849,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( CGBuilderTy &Builder = CGF.Builder; // Apply the offset, which we assume is non-null. - return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr, + return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr, "memptr.offset"); } @@ -1249,7 +1245,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, CGF.getPointerAlign()); // Apply the offset. - llvm::Value *CompletePtr = Ptr.getPointer(); + llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF); CompletePtr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset); @@ -1486,7 +1482,8 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastCall( computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); // Emit the call to __dynamic_cast. - llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint}; + llvm::Value *Args[] = {ThisAddr.emitRawPointer(CGF), SrcRTTI, DestRTTI, + OffsetHint}; llvm::Value *Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args); @@ -1575,7 +1572,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast( VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy)); llvm::Value *Success = CGF.Builder.CreateICmpEQ( VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl)); - llvm::Value *Result = ThisAddr.getPointer(); + llvm::Value *Result = ThisAddr.emitRawPointer(CGF); if (!Offset->isZero()) Result = CGF.Builder.CreateInBoundsGEP( CGF.CharTy, Result, @@ -1617,7 +1614,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); } // Finally, add the offset to the pointer. - return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(), + return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF), OffsetToTop); } @@ -1798,8 +1795,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, else Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); - CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, - nullptr); + CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy), + ThisTy, VTT, VTTTy, nullptr); } void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, @@ -1958,11 +1955,6 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( CGF.getPointerAlign()); } -llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( - BaseSubobject Base, const CXXRecordDecl *VTableClass) { - return getVTableAddressPoint(Base, VTableClass); -} - llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); @@ -2093,8 +2085,8 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( ThisTy = D->getDestroyedType(); } - CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr, - QualType(), nullptr); + CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy, + nullptr, QualType(), nullptr); return nullptr; } @@ -2167,7 +2159,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, int64_t VirtualAdjustment, bool IsReturnAdjustment) { if (!NonVirtualAdjustment && !VirtualAdjustment) - return InitialPtr.getPointer(); + return InitialPtr.emitRawPointer(CGF); Address V = InitialPtr.withElementType(CGF.Int8Ty); @@ -2200,10 +2192,10 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, CGF.getPointerAlign()); } // Adjust our pointer. - ResultPtr = CGF.Builder.CreateInBoundsGEP( - V.getElementType(), V.getPointer(), Offset); + ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(), + V.emitRawPointer(CGF), Offset); } else { - ResultPtr = V.getPointer(); + ResultPtr = V.emitRawPointer(CGF); } // In a derived-to-base conversion, the non-virtual adjustment is @@ -2289,7 +2281,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); llvm::FunctionCallee F = CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); - CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); + CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF)); } // Finally, compute a pointer to the actual data buffer by skipping @@ -2320,7 +2312,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, CGF.SizeTy, llvm::PointerType::getUnqual(CGF.getLLVMContext()), false); llvm::FunctionCallee F = CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); - return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); + return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF)); } CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { @@ -2632,7 +2624,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, // Call __cxa_guard_release. This cannot throw. CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), - guardAddr.getPointer()); + guardAddr.emitRawPointer(CGF)); } else if (D.isLocalVarDecl()) { // For local variables, store 1 into the first byte of the guard variable // after the object initialization completes so that initialization is @@ -3125,10 +3117,10 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, LValue LV; if (VD->getType()->isReferenceType()) - LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); + LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType); else - LV = CGF.MakeAddrLValue(CallVal, LValType, - CGF.getContext().getDeclAlign(VD)); + LV = CGF.MakeRawAddrLValue(CallVal, LValType, + CGF.getContext().getDeclAlign(VD)); // FIXME: need setObjCGCLValueClass? return LV; } @@ -4618,7 +4610,7 @@ static void InitCatchParam(CodeGenFunction &CGF, CGF.Builder.CreateStore(Casted, ExnPtrTmp); // Bind the reference to the temporary. - AdjustedExn = ExnPtrTmp.getPointer(); + AdjustedExn = ExnPtrTmp.emitRawPointer(CGF); } } diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index ddbac3ff49671..ca00324949f09 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -327,10 +327,6 @@ class MicrosoftCXXABI : public CGCXXABI { CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; - llvm::Constant * - getVTableAddressPointForConstExpr(BaseSubobject Base, - const CXXRecordDecl *VTableClass) override; - llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; @@ -937,7 +933,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF, } CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); - CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer()); + CPI->setArgOperand(2, var.getObjectAddress(CGF).emitRawPointer(CGF)); CGF.EHStack.pushCleanup(NormalCleanup, CPI); CGF.EmitAutoVarCleanups(var); } @@ -974,7 +970,7 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value, llvm::Value *Offset = GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase); llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP( - Value.getElementType(), Value.getPointer(), Offset); + Value.getElementType(), Value.emitRawPointer(CGF), Offset); CharUnits VBaseAlign = CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase); return std::make_tuple(Address(Ptr, CGF.Int8Ty, VBaseAlign), Offset, @@ -1011,7 +1007,7 @@ llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF, llvm::Type *StdTypeInfoPtrTy) { std::tie(ThisPtr, std::ignore, std::ignore) = performBaseAdjustment(CGF, ThisPtr, SrcRecordTy); - llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer()); + llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.emitRawPointer(CGF)); return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy); } @@ -1034,7 +1030,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastCall( llvm::Value *Offset; std::tie(This, Offset, std::ignore) = performBaseAdjustment(CGF, This, SrcRecordTy); - llvm::Value *ThisPtr = This.getPointer(); + llvm::Value *ThisPtr = This.emitRawPointer(CGF); Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty); // PVOID __RTDynamicCast( @@ -1066,7 +1062,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false), "__RTCastToVoid"); - llvm::Value *Args[] = {Value.getPointer()}; + llvm::Value *Args[] = {Value.emitRawPointer(CGF)}; return CGF.EmitRuntimeCall(Function, Args); } @@ -1494,7 +1490,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall( llvm::Value *VBaseOffset = GetVirtualBaseClassOffset(CGF, Result, Derived, VBase); llvm::Value *VBasePtr = CGF.Builder.CreateInBoundsGEP( - Result.getElementType(), Result.getPointer(), VBaseOffset); + Result.getElementType(), Result.emitRawPointer(CGF), VBaseOffset); CharUnits VBaseAlign = CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase); Result = Address(VBasePtr, CGF.Int8Ty, VBaseAlign); @@ -1661,7 +1657,8 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF, llvm::Value *Implicit = getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); // = nullptr - CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, + CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy), + ThisTy, /*ImplicitParam=*/Implicit, /*ImplicitParamTy=*/QualType(), nullptr); if (BaseDtorEndBB) { @@ -1792,13 +1789,6 @@ MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base, return VFTablesMap[ID]; } -llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr( - BaseSubobject Base, const CXXRecordDecl *VTableClass) { - llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass); - assert(VFTable && "Couldn't find a vftable for the given base?"); - return VFTable; -} - llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { // getAddrOfVTable may return 0 if asked to get an address of a vtable which @@ -2014,8 +2004,9 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall( } This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true); - RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, - ImplicitParam, Context.IntTy, CE); + RValue RV = + CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy, + ImplicitParam, Context.IntTy, CE); return RV.getScalarVal(); } @@ -2213,13 +2204,13 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This, const ThisAdjustment &TA) { if (TA.isEmpty()) - return This.getPointer(); + return This.emitRawPointer(CGF); This = This.withElementType(CGF.Int8Ty); llvm::Value *V; if (TA.Virtual.isEmpty()) { - V = This.getPointer(); + V = This.emitRawPointer(CGF); } else { assert(TA.Virtual.Microsoft.VtordispOffset < 0); // Adjust the this argument based on the vtordisp value. @@ -2228,7 +2219,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF, CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset)); VtorDispPtr = VtorDispPtr.withElementType(CGF.Int32Ty); llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp"); - V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(), + V = CGF.Builder.CreateGEP(This.getElementType(), This.emitRawPointer(CGF), CGF.Builder.CreateNeg(VtorDisp)); // Unfortunately, having applied the vtordisp means that we no @@ -2265,11 +2256,11 @@ llvm::Value * MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, const ReturnAdjustment &RA) { if (RA.isEmpty()) - return Ret.getPointer(); + return Ret.emitRawPointer(CGF); Ret = Ret.withElementType(CGF.Int8Ty); - llvm::Value *V = Ret.getPointer(); + llvm::Value *V = Ret.emitRawPointer(CGF); if (RA.Virtual.Microsoft.VBIndex) { assert(RA.Virtual.Microsoft.VBIndex > 0); int32_t IntSize = CGF.getIntSize().getQuantity(); @@ -2584,7 +2575,7 @@ struct ResetGuardBit final : EHScopeStack::Cleanup { struct CallInitThreadAbort final : EHScopeStack::Cleanup { llvm::Value *Guard; - CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {} + CallInitThreadAbort(RawAddress Guard) : Guard(Guard.getPointer()) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Calling _Init_thread_abort will reset the guard's state. @@ -3124,8 +3115,8 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF, llvm::Value **VBPtrOut) { CGBuilderTy &Builder = CGF.Builder; // Load the vbtable pointer from the vbptr in the instance. - llvm::Value *VBPtr = Builder.CreateInBoundsGEP(CGM.Int8Ty, This.getPointer(), - VBPtrOffset, "vbptr"); + llvm::Value *VBPtr = Builder.CreateInBoundsGEP( + CGM.Int8Ty, This.emitRawPointer(CGF), VBPtrOffset, "vbptr"); if (VBPtrOut) *VBPtrOut = VBPtr; @@ -3204,7 +3195,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase( Builder.CreateBr(SkipAdjustBB); CGF.EmitBlock(SkipAdjustBB); llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base"); - Phi->addIncoming(Base.getPointer(), OriginalBB); + Phi->addIncoming(Base.emitRawPointer(CGF), OriginalBB); Phi->addIncoming(AdjustedBase, VBaseAdjustBB); return Phi; } @@ -3239,7 +3230,7 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress( Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset, VBPtrOffset); } else { - Addr = Base.getPointer(); + Addr = Base.emitRawPointer(CGF); } // Apply the offset, which we assume is non-null. @@ -3527,7 +3518,7 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer( ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This, VirtualBaseAdjustmentOffset, VBPtrOffset); } else { - ThisPtrForCall = This.getPointer(); + ThisPtrForCall = This.emitRawPointer(CGF); } if (NonVirtualBaseAdjustment) @@ -4446,10 +4437,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { llvm::GlobalVariable *TI = getThrowInfo(ThrowType); // Call into the runtime to throw the exception. - llvm::Value *Args[] = { - AI.getPointer(), - TI - }; + llvm::Value *Args[] = {AI.emitRawPointer(CGF), TI}; CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args); } diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h index 6893b50a3cfe9..b1dfe5bf8f274 100644 --- a/clang/lib/CodeGen/TargetInfo.h +++ b/clang/lib/CodeGen/TargetInfo.h @@ -295,6 +295,11 @@ class TargetCodeGenInfo { /// Get the AST address space for alloca. virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; } + Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, + LangAS SrcAddr, LangAS DestAddr, + llvm::Type *DestTy, + bool IsNonNull = false) const; + /// Perform address space cast of an expression of pointer type. /// \param V is the LLVM value to be casted to another address space. /// \param SrcAddr is the language address space of \p V. diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp index 946d70dbe84ef..a814be9e20d8c 100644 --- a/clang/lib/CodeGen/Targets/NVPTX.cpp +++ b/clang/lib/CodeGen/Targets/NVPTX.cpp @@ -85,7 +85,7 @@ class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { LValue Src) { llvm::Value *Handle = nullptr; llvm::Constant *C = - llvm::dyn_cast(Src.getAddress(CGF).getPointer()); + llvm::dyn_cast(Src.getAddress(CGF).emitRawPointer(CGF)); // Lookup `addrspacecast` through the constant pointer if any. if (auto *ASC = llvm::dyn_cast_or_null(C)) C = llvm::cast(ASC->getPointerOperand()); diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp index 3eadb19bd2058..174fddabbbdb6 100644 --- a/clang/lib/CodeGen/Targets/PPC.cpp +++ b/clang/lib/CodeGen/Targets/PPC.cpp @@ -513,9 +513,10 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); llvm::Value *RegOffset = Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); - RegAddr = Address( - Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset), - DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); + RegAddr = Address(Builder.CreateInBoundsGEP( + CGF.Int8Ty, RegAddr.emitRawPointer(CGF), RegOffset), + DirectTy, + RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); // Increase the used-register count. NumRegs = @@ -551,7 +552,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, // Round up address of argument to alignment CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); if (Align > OverflowAreaAlign) { - llvm::Value *Ptr = OverflowArea.getPointer(); + llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF); OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), OverflowArea.getElementType(), Align); } @@ -560,7 +561,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, // Increase the overflow area. OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); - Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); + Builder.CreateStore(OverflowArea.emitRawPointer(CGF), OverflowAreaAddr); CGF.EmitBranch(Cont); } diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp index a337a52a94eca..9025a633f328e 100644 --- a/clang/lib/CodeGen/Targets/Sparc.cpp +++ b/clang/lib/CodeGen/Targets/Sparc.cpp @@ -326,7 +326,7 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, // Update VAList. Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); - Builder.CreateStore(NextPtr.getPointer(), VAListAddr); + Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr); return ArgAddr.withElementType(ArgTy); } diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp index 6eb0c6ef2f7d6..deaafc85a3157 100644 --- a/clang/lib/CodeGen/Targets/SystemZ.cpp +++ b/clang/lib/CodeGen/Targets/SystemZ.cpp @@ -306,7 +306,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, // Update overflow_arg_area_ptr pointer llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP( - OverflowArgArea.getElementType(), OverflowArgArea.getPointer(), + OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF), PaddedSizeV, "overflow_arg_area"); CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); @@ -382,10 +382,9 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, Address MemAddr = RawMemAddr.withElementType(DirectTy); // Update overflow_arg_area_ptr pointer - llvm::Value *NewOverflowArgArea = - CGF.Builder.CreateGEP(OverflowArgArea.getElementType(), - OverflowArgArea.getPointer(), PaddedSizeV, - "overflow_arg_area"); + llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP( + OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF), + PaddedSizeV, "overflow_arg_area"); CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); CGF.EmitBranch(ContBlock); diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp index abad6920c7872..2c8668520298a 100644 --- a/clang/lib/CodeGen/Targets/X86.cpp +++ b/clang/lib/CodeGen/Targets/X86.cpp @@ -2127,7 +2127,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, bool BitField = i->isBitField(); // Ignore padding bit-fields. - if (BitField && i->isUnnamedBitfield()) + if (BitField && i->isUnnamedBitField()) continue; // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than @@ -2168,7 +2168,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, // structure to be passed in memory even if unaligned, and // therefore they can straddle an eightbyte. if (BitField) { - assert(!i->isUnnamedBitfield()); + assert(!i->isUnnamedBitField()); uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); uint64_t Size = i->getBitWidthValue(getContext()); diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp index aeb48f851e169..88edb781a947b 100644 --- a/clang/lib/CodeGen/Targets/XCore.cpp +++ b/clang/lib/CodeGen/Targets/XCore.cpp @@ -180,7 +180,7 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, // Increment the VAList. if (!ArgSize.isZero()) { Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); - Builder.CreateStore(APN.getPointer(), VAListAddr); + Builder.CreateStore(APN.emitRawPointer(CGF), VAListAddr); } return Val; diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 8206c609e5805..f9df996df2e57 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -8049,6 +8049,14 @@ Action *Driver::ConstructPhaseAction( if (Args.hasArg(options::OPT_extract_api)) return C.MakeAction(Input, types::TY_API_INFO); + // With 'fexperimental-modules-reduced-bmi', we don't want to run the + // precompile phase unless the user specified '--precompile'. In the case + // the '--precompile' flag is enabled, we will try to emit the reduced BMI + // as a by product in GenerateModuleInterfaceAction. + if (Args.hasArg(options::OPT_modules_reduced_bmi) && + !Args.getLastArg(options::OPT__precompile)) + return Input; + types::ID OutputTy = getPrecompiledType(Input->getType()); assert(OutputTy != types::TY_INVALID && "Cannot precompile this input type!"); @@ -9475,8 +9483,10 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA, // If we're emitting a module output with the specified option // `-fmodule-output`. if (!AtTopLevel && isa(JA) && - JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput) + JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput) { + assert(!C.getArgs().hasArg(options::OPT_modules_reduced_bmi)); return GetModuleOutputPath(C, JA, BaseInput); + } // Output to a temporary file? if ((!AtTopLevel && !isSaveTempsEnabled() && diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp index ee148d8b462bc..2575c300c28cd 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.cpp +++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp @@ -670,6 +670,10 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D, options::OPT_mno_wavefrontsize64, false)) Features.push_back("+wavefrontsize64"); + if (Args.hasFlag(options::OPT_mamdgpu_precise_memory_op, + options::OPT_mno_amdgpu_precise_memory_op, false)) + Features.push_back("+precise-memory"); + handleTargetFeaturesGroup(D, Triple, Args, Features, options::OPT_m_amdgpu_Features_Group); } diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp index b1dd7c4372d47..96b3cc3bb8ffb 100644 --- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp +++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp @@ -68,8 +68,10 @@ static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A, << A->getSpelling() << Mcpu; } - if (llvm::RISCV::hasFastUnalignedAccess(Mcpu)) - Features.push_back("+fast-unaligned-access"); + if (llvm::RISCV::hasFastUnalignedAccess(Mcpu)) { + Features.push_back("+unaligned-scalar-mem"); + Features.push_back("+unaligned-vector-mem"); + } } void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple, @@ -168,12 +170,16 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple, } // Android requires fast unaligned access on RISCV64. - if (Triple.isAndroid()) - Features.push_back("+fast-unaligned-access"); + if (Triple.isAndroid()) { + Features.push_back("+unaligned-scalar-mem"); + Features.push_back("+unaligned-vector-mem"); + } // -mstrict-align is default, unless -mno-strict-align is specified. AddTargetFeature(Args, Features, options::OPT_mno_strict_align, - options::OPT_mstrict_align, "fast-unaligned-access"); + options::OPT_mstrict_align, "unaligned-scalar-mem"); + AddTargetFeature(Args, Features, options::OPT_mno_strict_align, + options::OPT_mstrict_align, "unaligned-vector-mem"); // Now add any that the user explicitly requested on the command line, // which may override the defaults. diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index fdabffdcf4f50..08d58127c3890 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -357,11 +357,14 @@ static bool addExceptionArgs(const ArgList &Args, types::ID InputType, bool EH = Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, false); - bool EHa = Args.hasFlag(options::OPT_fasync_exceptions, - options::OPT_fno_async_exceptions, false); - if (EHa) { - CmdArgs.push_back("-fasync-exceptions"); - EH = true; + // Async exceptions are Windows MSVC only. + if (Triple.isWindowsMSVCEnvironment()) { + bool EHa = Args.hasFlag(options::OPT_fasync_exceptions, + options::OPT_fno_async_exceptions, false); + if (EHa) { + CmdArgs.push_back("-fasync-exceptions"); + EH = true; + } } // Obj-C exceptions are enabled by default, regardless of -fexceptions. This @@ -4191,6 +4194,24 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D, // module fragment. CmdArgs.push_back("-fskip-odr-check-in-gmf"); + if (Args.hasArg(options::OPT_modules_reduced_bmi) && + (Input.getType() == driver::types::TY_CXXModule || + Input.getType() == driver::types::TY_PP_CXXModule)) { + CmdArgs.push_back("-fexperimental-modules-reduced-bmi"); + + if (Args.hasArg(options::OPT_fmodule_output_EQ)) + Args.AddLastArg(CmdArgs, options::OPT_fmodule_output_EQ); + else + CmdArgs.push_back(Args.MakeArgString( + "-fmodule-output=" + + getCXX20NamedModuleOutputPath(Args, Input.getBaseInput()))); + } + + // Noop if we see '-fexperimental-modules-reduced-bmi' with other translation + // units than module units. This is more user friendly to allow end uers to + // enable this feature without asking for help from build systems. + Args.ClaimAllArgs(options::OPT_modules_reduced_bmi); + // We need to include the case the input file is a module file here. // Since the default compilation model for C++ module interface unit will // create temporary module file and compile the temporary module file @@ -8900,7 +8921,8 @@ struct EHFlags { /// The 'a' modifier is unimplemented and fundamentally hard in LLVM IR. /// - c: Assume that extern "C" functions are implicitly nounwind. /// The default is /EHs-c-, meaning cleanups are disabled. -static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) { +static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args, + bool isWindowsMSVC) { EHFlags EH; std::vector EHArgs = @@ -8910,8 +8932,15 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) { switch (EHVal[I]) { case 'a': EH.Asynch = maybeConsumeDash(EHVal, I); - if (EH.Asynch) + if (EH.Asynch) { + // Async exceptions are Windows MSVC only. + if (!isWindowsMSVC) { + EH.Asynch = false; + D.Diag(clang::diag::warn_drv_unused_argument) << "/EHa" << EHVal; + continue; + } EH.Synch = false; + } continue; case 'c': EH.NoUnwindC = maybeConsumeDash(EHVal, I); @@ -8975,7 +9004,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType, const Driver &D = getToolChain().getDriver(); - EHFlags EH = parseClangCLEHFlags(D, Args); + bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment(); + EHFlags EH = parseClangCLEHFlags(D, Args, IsWindowsMSVC); if (!isNVPTX && (EH.Synch || EH.Asynch)) { if (types::isCXX(InputType)) CmdArgs.push_back("-fcxx-exceptions"); diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 5c88237c878c1..44f1e8167d287 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -114,6 +114,7 @@ static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args, case llvm::Triple::csky: case llvm::Triple::loongarch32: case llvm::Triple::loongarch64: + case llvm::Triple::m68k: return !clang::driver::tools::areOptimizationsEnabled(Args); default: break; diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index 2c83f70eb7887..b46bac24503ce 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -264,7 +264,7 @@ static void addVSDefines(const ToolChain &TC, const ArgList &Args, CmdArgs.push_back(Args.MakeArgString("-D_MSC_FULL_VER=" + Twine(ver))); CmdArgs.push_back(Args.MakeArgString("-D_WIN32")); - llvm::Triple triple = TC.getTriple(); + const llvm::Triple &triple = TC.getTriple(); if (triple.isAArch64()) { CmdArgs.push_back("-D_M_ARM64=1"); } else if (triple.isX86() && triple.isArch32Bit()) { @@ -589,7 +589,7 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, if (!HonorINFs && !HonorNaNs && AssociativeMath && ReciprocalMath && ApproxFunc && !SignedZeros && - (FPContract == "fast" || FPContract == "")) { + (FPContract == "fast" || FPContract.empty())) { CmdArgs.push_back("-ffast-math"); return; } @@ -679,7 +679,10 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(Args.MakeArgString(TripleStr)); if (isa(JA)) { - CmdArgs.push_back("-E"); + CmdArgs.push_back("-E"); + if (Args.getLastArg(options::OPT_dM)) { + CmdArgs.push_back("-dM"); + } } else if (isa(JA) || isa(JA)) { if (JA.getType() == types::TY_Nothing) { CmdArgs.push_back("-fsyntax-only"); @@ -783,6 +786,10 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, } } + // Pass the path to compiler resource files. + CmdArgs.push_back("-resource-dir"); + CmdArgs.push_back(D.ResourceDir.c_str()); + // Offloading related options addOffloadOptions(C, Inputs, JA, Args, CmdArgs); diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp index c151debdc5c07..dda4c9d517e5f 100644 --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -249,8 +249,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) // Android ARM uses max-page-size=4096 to reduce VMA usage. ExtraOpts.push_back("-z"); ExtraOpts.push_back("max-page-size=4096"); - } else if (Triple.isAArch64()) { + } else if (Triple.isAArch64() || Triple.getArch() == llvm::Triple::x86_64) { // Android AArch64 uses max-page-size=16384 to support 4k/16k page sizes. + // Android emulates a 16k page size for app testing on x86_64 machines. ExtraOpts.push_back("-z"); ExtraOpts.push_back("max-page-size=16384"); } diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp index 700bce35c8683..ad0e2c3c620c3 100644 --- a/clang/lib/Format/ContinuationIndenter.cpp +++ b/clang/lib/Format/ContinuationIndenter.cpp @@ -684,7 +684,13 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, // arguments to function calls. We do this by ensuring that either all // arguments (including any lambdas) go on the same line as the function // call, or we break before the first argument. - auto PrevNonComment = Current.getPreviousNonComment(); + const auto *Prev = Current.Previous; + if (!Prev) + return false; + // For example, `/*Newline=*/false`. + if (Prev->is(TT_BlockComment) && Current.SpacesRequiredBefore == 0) + return false; + const auto *PrevNonComment = Current.getPreviousNonComment(); if (!PrevNonComment || PrevNonComment->isNot(tok::l_paren)) return false; if (Current.isOneOf(tok::comment, tok::l_paren, TT_LambdaLSquare)) diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index 89e6c19b0af45..ccb2c9190e2ef 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -3891,7 +3891,11 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) { FileName.ends_with_insensitive(".protodevel")) { return FormatStyle::LK_Proto; } - if (FileName.ends_with_insensitive(".textpb") || + // txtpb is the canonical extension, and textproto is the legacy canonical + // extension + // https://protobuf.dev/reference/protobuf/textformat-spec/#text-format-files + if (FileName.ends_with_insensitive(".txtpb") || + FileName.ends_with_insensitive(".textpb") || FileName.ends_with_insensitive(".pb.txt") || FileName.ends_with_insensitive(".textproto") || FileName.ends_with_insensitive(".asciipb")) { diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h index 48b6a9092a8c0..f651e6228c206 100644 --- a/clang/lib/Format/FormatToken.h +++ b/clang/lib/Format/FormatToken.h @@ -35,6 +35,8 @@ namespace format { TYPE(BinaryOperator) \ TYPE(BitFieldColon) \ TYPE(BlockComment) \ + /* l_brace of a block that is not the body of a (e.g. loop) statement. */ \ + TYPE(BlockLBrace) \ TYPE(BracedListLBrace) \ /* The colon at the end of a case label. */ \ TYPE(CaseLabelColon) \ diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 628f70417866c..a679683077ac9 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -2912,6 +2912,8 @@ class AnnotatingParser { return TT_UnaryOperator; if (PrevToken->is(TT_TypeName)) return TT_PointerOrReference; + if (PrevToken->isOneOf(tok::kw_new, tok::kw_delete) && Tok.is(tok::ampamp)) + return TT_BinaryOperator; const FormatToken *NextToken = Tok.getNextNonComment(); @@ -5595,12 +5597,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, return true; if (Left.IsUnterminatedLiteral) return true; - // FIXME: Breaking after newlines seems useful in general. Turn this into an - // option and recognize more cases like endl etc, and break independent of - // what comes after operator lessless. - if (Right.is(tok::lessless) && Right.Next && - Right.Next->is(tok::string_literal) && Left.is(tok::string_literal) && - Left.TokenText.ends_with("\\n\"")) { + if (Right.is(tok::lessless) && Right.Next && Left.is(tok::string_literal) && + Right.Next->is(tok::string_literal)) { return true; } if (Right.is(TT_RequiresClause)) { diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp index fb31980ab9f49..4ae54e56331bd 100644 --- a/clang/lib/Format/UnwrappedLineFormatter.cpp +++ b/clang/lib/Format/UnwrappedLineFormatter.cpp @@ -796,8 +796,12 @@ class LineJoiner { } } - if (const auto *LastNonComment = Line.getLastNonComment(); - LastNonComment && LastNonComment->is(tok::l_brace)) { + if (Line.endsWith(tok::l_brace)) { + if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never && + Line.First->is(TT_BlockLBrace)) { + return 0; + } + if (IsSplitBlock && Line.First == Line.Last && I > AnnotatedLines.begin() && (I[-1]->endsWith(tok::kw_else) || IsCtrlStmt(*I[-1]))) { diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index c1f7e2874beb2..603268f771ac5 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -395,9 +395,10 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace, ParseDefault(); continue; } - if (!InRequiresExpression && FormatTok->isNot(TT_MacroBlockBegin) && - tryToParseBracedList()) { - continue; + if (!InRequiresExpression && FormatTok->isNot(TT_MacroBlockBegin)) { + if (tryToParseBracedList()) + continue; + FormatTok->setFinalizedType(TT_BlockLBrace); } parseBlock(); ++StatementCount; diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp index 642b14d8b09d9..04eb104132671 100644 --- a/clang/lib/Frontend/FrontendActions.cpp +++ b/clang/lib/Frontend/FrontendActions.cpp @@ -281,6 +281,13 @@ GenerateModuleInterfaceAction::CreateASTConsumer(CompilerInstance &CI, if (Consumers.empty()) return nullptr; + if (CI.getFrontendOpts().GenReducedBMI && + !CI.getFrontendOpts().ModuleOutputPath.empty()) { + Consumers.push_back(std::make_unique( + CI.getPreprocessor(), CI.getModuleCache(), + CI.getFrontendOpts().ModuleOutputPath)); + } + return std::make_unique(std::move(Consumers)); } diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp index 207077139aa30..d2e8fb2e8d6f2 100644 --- a/clang/lib/Frontend/InitPreprocessor.cpp +++ b/clang/lib/Frontend/InitPreprocessor.cpp @@ -780,6 +780,9 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts, Builder.defineMacro("__cpp_named_character_escapes", "202207L"); Builder.defineMacro("__cpp_placeholder_variables", "202306L"); + // C++26 features supported in earlier language modes. + Builder.defineMacro("__cpp_deleted_function", "202403L"); + if (LangOpts.Char8) Builder.defineMacro("__cpp_char8_t", "202207L"); Builder.defineMacro("__cpp_impl_destroying_delete", "201806L"); diff --git a/clang/lib/Frontend/MultiplexConsumer.cpp b/clang/lib/Frontend/MultiplexConsumer.cpp index 737877329c9ce..744ea70cc24de 100644 --- a/clang/lib/Frontend/MultiplexConsumer.cpp +++ b/clang/lib/Frontend/MultiplexConsumer.cpp @@ -20,6 +20,9 @@ using namespace clang; namespace clang { +class NamespaceDecl; +class TranslationUnitDecl; + MultiplexASTDeserializationListener::MultiplexASTDeserializationListener( const std::vector& L) : Listeners(L) { @@ -115,6 +118,11 @@ class MultiplexASTMutationListener : public ASTMutationListener { void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override; void AddedAttributeToRecord(const Attr *Attr, const RecordDecl *Record) override; + void EnteringModulePurview() override; + void AddedManglingNumber(const Decl *D, unsigned) override; + void AddedStaticLocalNumbers(const Decl *D, unsigned) override; + void AddedAnonymousNamespace(const TranslationUnitDecl *, + NamespaceDecl *AnonNamespace) override; private: std::vector Listeners; @@ -238,6 +246,27 @@ void MultiplexASTMutationListener::AddedAttributeToRecord( L->AddedAttributeToRecord(Attr, Record); } +void MultiplexASTMutationListener::EnteringModulePurview() { + for (auto *L : Listeners) + L->EnteringModulePurview(); +} + +void MultiplexASTMutationListener::AddedManglingNumber(const Decl *D, + unsigned Number) { + for (auto *L : Listeners) + L->AddedManglingNumber(D, Number); +} +void MultiplexASTMutationListener::AddedStaticLocalNumbers(const Decl *D, + unsigned Number) { + for (auto *L : Listeners) + L->AddedStaticLocalNumbers(D, Number); +} +void MultiplexASTMutationListener::AddedAnonymousNamespace( + const TranslationUnitDecl *TU, NamespaceDecl *AnonNamespace) { + for (auto *L : Listeners) + L->AddedAnonymousNamespace(TU, AnonNamespace); +} + } // end namespace clang MultiplexConsumer::MultiplexConsumer( diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp index 1f40db785981d..6ae955a2380b7 100644 --- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp +++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp @@ -592,7 +592,7 @@ namespace { } bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const { - IdentifierInfo* II = &Context->Idents.get("load"); + const IdentifierInfo *II = &Context->Idents.get("load"); Selector LoadSel = Context->Selectors.getSelector(0, &II); return OD->getClassMethod(LoadSel) != nullptr; } diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index 97104ccd8db59..e6ae4e19e81db 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -437,14 +437,14 @@ foreach( f ${generated_files} ) endforeach( f ) function(add_header_target target_name file_list) - add_custom_target(${target_name} DEPENDS ${file_list}) + add_library(${target_name} INTERFACE ${file_list}) set_target_properties(${target_name} PROPERTIES FOLDER "Misc" RUNTIME_OUTPUT_DIRECTORY "${output_dir}") endfunction() # The catch-all clang-resource-headers target -add_custom_target("clang-resource-headers" ALL DEPENDS ${out_files}) +add_library(clang-resource-headers INTERFACE ${out_files}) set_target_properties("clang-resource-headers" PROPERTIES FOLDER "Misc" RUNTIME_OUTPUT_DIRECTORY "${output_dir}") @@ -501,6 +501,10 @@ add_header_target("windows-resource-headers" ${windows_only_files}) add_header_target("utility-resource-headers" ${utility_files}) get_clang_resource_dir(header_install_dir SUBDIR include) +target_include_directories(clang-resource-headers INTERFACE + $ + $) +set_property(GLOBAL APPEND PROPERTY CLANG_EXPORTS clang-resource-headers) ############################################################# # Install rules for the catch-all clang-resource-headers target diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp index 9c7349ef94669..108786a12b855 100644 --- a/clang/lib/Index/USRGeneration.cpp +++ b/clang/lib/Index/USRGeneration.cpp @@ -267,10 +267,13 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) { Out << '>'; } + QualType CanonicalType = D->getType().getCanonicalType(); // Mangle in type information for the arguments. - for (auto *PD : D->parameters()) { - Out << '#'; - VisitType(PD->getType()); + if (const auto *FPT = CanonicalType->getAs()) { + for (QualType PT : FPT->param_types()) { + Out << '#'; + VisitType(PT); + } } if (D->isVariadic()) Out << '.'; diff --git a/clang/lib/InstallAPI/DylibVerifier.cpp b/clang/lib/InstallAPI/DylibVerifier.cpp index 4fa2d4e9292c7..84d9b5892e88d 100644 --- a/clang/lib/InstallAPI/DylibVerifier.cpp +++ b/clang/lib/InstallAPI/DylibVerifier.cpp @@ -176,7 +176,13 @@ void DylibVerifier::addSymbol(const Record *R, SymbolContext &SymCtx, bool DylibVerifier::shouldIgnoreObsolete(const Record *R, SymbolContext &SymCtx, const Record *DR) { - return SymCtx.FA->Avail.isObsoleted(); + if (!SymCtx.FA->Avail.isObsoleted()) + return false; + + if (Zippered) + DeferredZipperedSymbols[SymCtx.SymbolName].emplace_back(ZipperedDeclSource{ + SymCtx.FA, &Ctx.Diag->getSourceManager(), Ctx.Target}); + return true; } bool DylibVerifier::shouldIgnoreReexport(const Record *R, @@ -195,6 +201,28 @@ bool DylibVerifier::shouldIgnoreReexport(const Record *R, return false; } +bool DylibVerifier::shouldIgnoreInternalZipperedSymbol( + const Record *R, const SymbolContext &SymCtx) const { + if (!Zippered) + return false; + + return Exports->findSymbol(SymCtx.Kind, SymCtx.SymbolName, + SymCtx.ObjCIFKind) != nullptr; +} + +bool DylibVerifier::shouldIgnoreZipperedAvailability(const Record *R, + SymbolContext &SymCtx) { + if (!(Zippered && SymCtx.FA->Avail.isUnavailable())) + return false; + + // Collect source location incase there is an exported symbol to diagnose + // during `verifyRemainingSymbols`. + DeferredZipperedSymbols[SymCtx.SymbolName].emplace_back( + ZipperedDeclSource{SymCtx.FA, SourceManagers.back().get(), Ctx.Target}); + + return true; +} + bool DylibVerifier::compareObjCInterfaceSymbols(const Record *R, SymbolContext &SymCtx, const ObjCInterfaceRecord *DR) { @@ -294,6 +322,9 @@ DylibVerifier::Result DylibVerifier::compareVisibility(const Record *R, if (shouldIgnorePrivateExternAttr(SymCtx.FA->D)) return Result::Ignore; + if (shouldIgnoreInternalZipperedSymbol(R, SymCtx)) + return Result::Ignore; + unsigned ID; Result Outcome; if (Mode == VerificationMode::ErrorsAndWarnings) { @@ -321,6 +352,9 @@ DylibVerifier::Result DylibVerifier::compareAvailability(const Record *R, if (!SymCtx.FA->Avail.isUnavailable()) return Result::Valid; + if (shouldIgnoreZipperedAvailability(R, SymCtx)) + return Result::Ignore; + const bool IsDeclAvailable = SymCtx.FA->Avail.isUnavailable(); switch (Mode) { @@ -588,13 +622,58 @@ void DylibVerifier::visitSymbolInDylib(const Record &R, SymbolContext &SymCtx) { } } + const bool IsLinkerSymbol = SymbolName.starts_with("$ld$"); + + if (R.isVerified()) { + // Check for unavailable symbols. + // This should only occur in the zippered case where we ignored + // availability until all headers have been parsed. + auto It = DeferredZipperedSymbols.find(SymCtx.SymbolName); + if (It == DeferredZipperedSymbols.end()) { + updateState(Result::Valid); + return; + } + + ZipperedDeclSources Locs; + for (const ZipperedDeclSource &ZSource : It->second) { + if (ZSource.FA->Avail.isObsoleted()) { + updateState(Result::Ignore); + return; + } + if (ZSource.T.Arch != Ctx.Target.Arch) + continue; + Locs.emplace_back(ZSource); + } + assert(Locs.size() == 2 && "Expected two decls for zippered symbol"); + + // Print violating declarations per platform. + for (const ZipperedDeclSource &ZSource : Locs) { + unsigned DiagID = 0; + if (Mode == VerificationMode::Pedantic || IsLinkerSymbol) { + updateState(Result::Invalid); + DiagID = diag::err_header_availability_mismatch; + } else if (Mode == VerificationMode::ErrorsAndWarnings) { + updateState(Result::Ignore); + DiagID = diag::warn_header_availability_mismatch; + } else { + updateState(Result::Ignore); + return; + } + // Bypass emitDiag banner and print the target everytime. + Ctx.Diag->setSourceManager(ZSource.SrcMgr); + Ctx.Diag->Report(diag::warn_target) << getTargetTripleName(ZSource.T); + Ctx.Diag->Report(ZSource.FA->Loc, DiagID) + << getAnnotatedName(&R, SymCtx) << ZSource.FA->Avail.isUnavailable() + << ZSource.FA->Avail.isUnavailable(); + } + return; + } + if (shouldIgnoreCpp(SymbolName, R.isWeakDefined())) { updateState(Result::Valid); return; } - const bool IsLinkerSymbol = SymbolName.starts_with("$ld$"); - // All checks at this point classify as some kind of violation. // The different verification modes dictate whether they are reported to the // user. @@ -647,8 +726,6 @@ void DylibVerifier::visitSymbolInDylib(const Record &R, SymbolContext &SymCtx) { } void DylibVerifier::visitGlobal(const GlobalRecord &R) { - if (R.isVerified()) - return; SymbolContext SymCtx; SimpleSymbol Sym = parseSymbol(R.getName()); SymCtx.SymbolName = Sym.Name; @@ -658,8 +735,6 @@ void DylibVerifier::visitGlobal(const GlobalRecord &R) { void DylibVerifier::visitObjCIVar(const ObjCIVarRecord &R, const StringRef Super) { - if (R.isVerified()) - return; SymbolContext SymCtx; SymCtx.SymbolName = ObjCIVarRecord::createScopedName(Super, R.getName()); SymCtx.Kind = EncodeKind::ObjectiveCInstanceVariable; @@ -679,8 +754,6 @@ void DylibVerifier::accumulateSrcLocForDylibSymbols() { } void DylibVerifier::visitObjCInterface(const ObjCInterfaceRecord &R) { - if (R.isVerified()) - return; SymbolContext SymCtx; SymCtx.SymbolName = R.getName(); SymCtx.ObjCIFKind = assignObjCIFSymbolKind(&R); @@ -713,9 +786,12 @@ DylibVerifier::Result DylibVerifier::verifyRemainingSymbols() { DWARFContext DWARFInfo; DWARFCtx = &DWARFInfo; - Ctx.DiscoveredFirstError = false; - Ctx.PrintArch = true; + Ctx.Target = Target(Architecture::AK_unknown, PlatformType::PLATFORM_UNKNOWN); for (std::shared_ptr Slice : Dylib) { + if (Ctx.Target.Arch == Slice->getTarget().Arch) + continue; + Ctx.DiscoveredFirstError = false; + Ctx.PrintArch = true; Ctx.Target = Slice->getTarget(); Ctx.DylibSlice = Slice.get(); Slice->visit(*this); diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index cf31456b6950a..b20e6efcebfd1 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -550,7 +550,8 @@ std::unique_ptr Interpreter::FindRuntimeInterface() { auto LookupInterface = [&](Expr *&Interface, llvm::StringRef Name) { LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(), - Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration); + Sema::LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration); S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl()); if (R.empty()) return false; diff --git a/clang/lib/Interpreter/InterpreterUtils.cpp b/clang/lib/Interpreter/InterpreterUtils.cpp index c19cf6aa3156c..45f6322b8461e 100644 --- a/clang/lib/Interpreter/InterpreterUtils.cpp +++ b/clang/lib/Interpreter/InterpreterUtils.cpp @@ -72,7 +72,7 @@ NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name, const DeclContext *Within) { DeclarationName DName = &S.Context.Idents.get(Name); LookupResult R(S, DName, SourceLocation(), Sema::LookupOrdinaryName, - Sema::ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); R.suppressDiagnostics(); diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp index 7dffcf0e941e0..0632882b29614 100644 --- a/clang/lib/Lex/HeaderSearch.cpp +++ b/clang/lib/Lex/HeaderSearch.cpp @@ -64,8 +64,7 @@ HeaderFileInfo::getControllingMacro(ExternalPreprocessorSource *External) { if (ControllingMacro->isOutOfDate()) { assert(External && "We must have an external source if we have a " "controlling macro that is out of date."); - External->updateOutOfDateIdentifier( - *const_cast(ControllingMacro)); + External->updateOutOfDateIdentifier(*ControllingMacro); } return ControllingMacro; } @@ -947,9 +946,13 @@ OptionalFileEntryRef HeaderSearch::LookupFile( // If we have no includer, that means we're processing a #include // from a module build. We should treat this as a system header if we're // building a [system] module. - bool IncluderIsSystemHeader = - Includer ? getFileInfo(*Includer).DirInfo != SrcMgr::C_User : - BuildSystemModule; + bool IncluderIsSystemHeader = [&]() { + if (!Includer) + return BuildSystemModule; + const HeaderFileInfo *HFI = getExistingFileInfo(*Includer); + assert(HFI && "includer without file info"); + return HFI->DirInfo != SrcMgr::C_User; + }(); if (OptionalFileEntryRef FE = getFileAndSuggestModule( TmpDir, IncludeLoc, IncluderAndDir.second, IncluderIsSystemHeader, RequestingModule, SuggestedModule)) { @@ -964,10 +967,11 @@ OptionalFileEntryRef HeaderSearch::LookupFile( // Note that we only use one of FromHFI/ToHFI at once, due to potential // reallocation of the underlying vector potentially making the first // reference binding dangling. - HeaderFileInfo &FromHFI = getFileInfo(*Includer); - unsigned DirInfo = FromHFI.DirInfo; - bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader; - StringRef Framework = FromHFI.Framework; + const HeaderFileInfo *FromHFI = getExistingFileInfo(*Includer); + assert(FromHFI && "includer without file info"); + unsigned DirInfo = FromHFI->DirInfo; + bool IndexHeaderMapHeader = FromHFI->IndexHeaderMapHeader; + StringRef Framework = FromHFI->Framework; HeaderFileInfo &ToHFI = getFileInfo(*FE); ToHFI.DirInfo = DirInfo; @@ -1154,10 +1158,12 @@ OptionalFileEntryRef HeaderSearch::LookupFile( // "Foo" is the name of the framework in which the including header was found. if (!Includers.empty() && Includers.front().first && !isAngled && !Filename.contains('/')) { - HeaderFileInfo &IncludingHFI = getFileInfo(*Includers.front().first); - if (IncludingHFI.IndexHeaderMapHeader) { + const HeaderFileInfo *IncludingHFI = + getExistingFileInfo(*Includers.front().first); + assert(IncludingHFI && "includer without file info"); + if (IncludingHFI->IndexHeaderMapHeader) { SmallString<128> ScratchFilename; - ScratchFilename += IncludingHFI.Framework; + ScratchFilename += IncludingHFI->Framework; ScratchFilename += '/'; ScratchFilename += Filename; @@ -1287,11 +1293,11 @@ OptionalFileEntryRef HeaderSearch::LookupSubframeworkHeader( } // This file is a system header or C++ unfriendly if the old file is. - // - // Note that the temporary 'DirInfo' is required here, as either call to - // getFileInfo could resize the vector and we don't want to rely on order - // of evaluation. - unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo; + const HeaderFileInfo *ContextHFI = getExistingFileInfo(ContextFileEnt); + assert(ContextHFI && "context file without file info"); + // Note that the temporary 'DirInfo' is required here, as the call to + // getFileInfo could resize the vector and might invalidate 'ContextHFI'. + unsigned DirInfo = ContextHFI->DirInfo; getFileInfo(*File).DirInfo = DirInfo; FrameworkName.pop_back(); // remove the trailing '/' @@ -1349,8 +1355,6 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI, HFI.Framework = OtherHFI.Framework; } -/// getFileInfo - Return the HeaderFileInfo structure for the specified -/// FileEntry. HeaderFileInfo &HeaderSearch::getFileInfo(FileEntryRef FE) { if (FE.getUID() >= FileInfo.size()) FileInfo.resize(FE.getUID() + 1); @@ -1367,27 +1371,20 @@ HeaderFileInfo &HeaderSearch::getFileInfo(FileEntryRef FE) { } HFI->IsValid = true; - // We have local information about this header file, so it's no longer - // strictly external. + // We assume the caller has local information about this header file, so it's + // no longer strictly external. HFI->External = false; return *HFI; } -const HeaderFileInfo * -HeaderSearch::getExistingFileInfo(FileEntryRef FE, bool WantExternal) const { - // If we have an external source, ensure we have the latest information. - // FIXME: Use a generation count to check whether this is really up to date. +const HeaderFileInfo *HeaderSearch::getExistingFileInfo(FileEntryRef FE) const { HeaderFileInfo *HFI; if (ExternalSource) { - if (FE.getUID() >= FileInfo.size()) { - if (!WantExternal) - return nullptr; + if (FE.getUID() >= FileInfo.size()) FileInfo.resize(FE.getUID() + 1); - } HFI = &FileInfo[FE.getUID()]; - if (!WantExternal && (!HFI->IsValid || HFI->External)) - return nullptr; + // FIXME: Use a generation count to check whether this is really up to date. if (!HFI->Resolved) { auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE); if (ExternalHFI.IsValid) { @@ -1396,16 +1393,25 @@ HeaderSearch::getExistingFileInfo(FileEntryRef FE, bool WantExternal) const { mergeHeaderFileInfo(*HFI, ExternalHFI); } } - } else if (FE.getUID() >= FileInfo.size()) { - return nullptr; - } else { + } else if (FE.getUID() < FileInfo.size()) { HFI = &FileInfo[FE.getUID()]; + } else { + HFI = nullptr; } - if (!HFI->IsValid || (HFI->External && !WantExternal)) - return nullptr; + return (HFI && HFI->IsValid) ? HFI : nullptr; +} + +const HeaderFileInfo * +HeaderSearch::getExistingLocalFileInfo(FileEntryRef FE) const { + HeaderFileInfo *HFI; + if (FE.getUID() < FileInfo.size()) { + HFI = &FileInfo[FE.getUID()]; + } else { + HFI = nullptr; + } - return HFI; + return (HFI && HFI->IsValid && !HFI->External) ? HFI : nullptr; } bool HeaderSearch::isFileMultipleIncludeGuarded(FileEntryRef File) const { diff --git a/clang/lib/Lex/MacroInfo.cpp b/clang/lib/Lex/MacroInfo.cpp index 39bb0f44eff25..dfdf463665f3c 100644 --- a/clang/lib/Lex/MacroInfo.cpp +++ b/clang/lib/Lex/MacroInfo.cpp @@ -257,7 +257,7 @@ LLVM_DUMP_METHOD void MacroDirective::dump() const { } ModuleMacro *ModuleMacro::create(Preprocessor &PP, Module *OwningModule, - IdentifierInfo *II, MacroInfo *Macro, + const IdentifierInfo *II, MacroInfo *Macro, ArrayRef Overrides) { void *Mem = PP.getPreprocessorAllocator().Allocate( sizeof(ModuleMacro) + sizeof(ModuleMacro *) * Overrides.size(), diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp index 3b1b6df1dbae4..2ca2122ac7109 100644 --- a/clang/lib/Lex/PPLexerChange.cpp +++ b/clang/lib/Lex/PPLexerChange.cpp @@ -368,8 +368,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) { // Okay, this has a controlling macro, remember in HeaderFileInfo. if (OptionalFileEntryRef FE = CurPPLexer->getFileEntry()) { HeaderInfo.SetFileControllingMacro(*FE, ControllingMacro); - if (MacroInfo *MI = - getMacroInfo(const_cast(ControllingMacro))) + if (MacroInfo *MI = getMacroInfo(ControllingMacro)) MI->setUsedForHeaderGuard(true); if (const IdentifierInfo *DefinedMacro = CurPPLexer->MIOpt.GetDefinedMacro()) { @@ -805,7 +804,7 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) { llvm::SmallPtrSet VisitedMacros; for (unsigned I = Info.OuterPendingModuleMacroNames; I != PendingModuleMacroNames.size(); ++I) { - auto *II = const_cast(PendingModuleMacroNames[I]); + const auto *II = PendingModuleMacroNames[I]; if (!VisitedMacros.insert(II).second) continue; @@ -855,8 +854,8 @@ Module *Preprocessor::LeaveSubmodule(bool ForPragma) { // Don't bother creating a module macro if it would represent a #undef // that doesn't override anything. if (Def || !Macro.getOverriddenMacros().empty()) - addModuleMacro(LeavingMod, II, Def, - Macro.getOverriddenMacros(), IsNew); + addModuleMacro(LeavingMod, II, Def, Macro.getOverriddenMacros(), + IsNew); if (!getLangOpts().ModulesLocalVisibility) { // This macro is exposed to the rest of this compilation as a diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp index 86317cf44000e..e6060ac0905e6 100644 --- a/clang/lib/Lex/PPMacroExpansion.cpp +++ b/clang/lib/Lex/PPMacroExpansion.cpp @@ -129,7 +129,7 @@ void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II, II->setHasMacroDefinition(false); } -ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II, +ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, const IdentifierInfo *II, MacroInfo *Macro, ArrayRef Overrides, bool &New) { @@ -162,7 +162,7 @@ ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II, // The new macro is always a leaf macro. LeafMacros.push_back(MM); // The identifier now has defined macros (that may or may not be visible). - II->setHasMacroDefinition(true); + const_cast(II)->setHasMacroDefinition(true); New = true; return MM; diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp index 031ed1e16bb8f..0b70192743a39 100644 --- a/clang/lib/Lex/Preprocessor.cpp +++ b/clang/lib/Lex/Preprocessor.cpp @@ -759,7 +759,7 @@ void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) { Diag(Identifier,it->second) << Identifier.getIdentifierInfo(); } -void Preprocessor::updateOutOfDateIdentifier(IdentifierInfo &II) const { +void Preprocessor::updateOutOfDateIdentifier(const IdentifierInfo &II) const { assert(II.isOutOfDate() && "not out of date"); getExternalSource()->updateOutOfDateIdentifier(II); } diff --git a/clang/lib/Parse/ParseAST.cpp b/clang/lib/Parse/ParseAST.cpp index 4e47ea5ea364f..dc8df8e8f3f03 100644 --- a/clang/lib/Parse/ParseAST.cpp +++ b/clang/lib/Parse/ParseAST.cpp @@ -21,6 +21,7 @@ #include "clang/Sema/EnterExpressionEvaluationContext.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaConsumer.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/TemplateInstCallback.h" #include "llvm/Support/CrashRecoveryContext.h" #include "llvm/Support/TimeProfiler.h" @@ -174,7 +175,7 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) { Consumer->HandleTopLevelDecl(DeclGroupRef(D)); if (S.getLangOpts().SYCLIsDevice) { - for (Decl *D : S.syclDeviceDecls()) { + for (Decl *D : S.SYCL().syclDeviceDecls()) { Consumer->HandleTopLevelDecl(DeclGroupRef(D)); } } diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp index d790060c17c04..d054eda279b8c 100644 --- a/clang/lib/Parse/ParseCXXInlineMethods.cpp +++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp @@ -20,6 +20,49 @@ using namespace clang; +/// Parse the optional ("message") part of a deleted-function-body. +StringLiteral *Parser::ParseCXXDeletedFunctionMessage() { + if (!Tok.is(tok::l_paren)) + return nullptr; + StringLiteral *Message = nullptr; + BalancedDelimiterTracker BT{*this, tok::l_paren}; + BT.consumeOpen(); + + if (isTokenStringLiteral()) { + ExprResult Res = ParseUnevaluatedStringLiteralExpression(); + if (Res.isUsable()) { + Message = Res.getAs(); + Diag(Message->getBeginLoc(), getLangOpts().CPlusPlus26 + ? diag::warn_cxx23_delete_with_message + : diag::ext_delete_with_message) + << Message->getSourceRange(); + } + } else { + Diag(Tok.getLocation(), diag::err_expected_string_literal) + << /*Source='in'*/ 0 << "'delete'"; + SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch); + } + + BT.consumeClose(); + return Message; +} + +/// If we've encountered '= delete' in a context where it is ill-formed, such +/// as in the declaration of a non-function, also skip the ("message") part if +/// it is present to avoid issuing further diagnostics. +void Parser::SkipDeletedFunctionBody() { + if (!Tok.is(tok::l_paren)) + return; + + BalancedDelimiterTracker BT{*this, tok::l_paren}; + BT.consumeOpen(); + + // Just skip to the end of the current declaration. + SkipUntil(tok::r_paren, tok::comma, StopAtSemi | StopBeforeMatch); + if (Tok.is(tok::r_paren)) + BT.consumeClose(); +} + /// ParseCXXInlineMethodDef - We parsed and verified that the specified /// Declarator is a well formed C++ inline method definition. Now lex its body /// and store its tokens for parsing after the C++ class is complete. @@ -70,7 +113,8 @@ NamedDecl *Parser::ParseCXXInlineMethodDef( ? diag::warn_cxx98_compat_defaulted_deleted_function : diag::ext_defaulted_deleted_function) << 1 /* deleted */; - Actions.SetDeclDeleted(FnD, KWLoc); + StringLiteral *Message = ParseCXXDeletedFunctionMessage(); + Actions.SetDeclDeleted(FnD, KWLoc, Message); Delete = true; if (auto *DeclAsFunction = dyn_cast(FnD)) { DeclAsFunction->setRangeEnd(KWEndLoc); diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index 1fe6ddf16d62b..6b5685a48e827 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -26,7 +26,9 @@ #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaDiagnostic.h" +#include "clang/Sema/SemaOpenMP.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" @@ -2379,14 +2381,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, if (getLangOpts().CPlusPlus23) { auto &LastRecord = Actions.ExprEvalContexts.back(); LastRecord.InLifetimeExtendingContext = true; - - // Materialize non-`cv void` prvalue temporaries in discarded - // expressions. These materialized temporaries may be lifetime-extented. - LastRecord.InMaterializeTemporaryObjectContext = true; } if (getLangOpts().OpenMP) - Actions.startOpenMPCXXRangeFor(); + Actions.OpenMP().startOpenMPCXXRangeFor(); if (Tok.is(tok::l_brace)) FRI->RangeExpr = ParseBraceInitializer(); else @@ -2664,7 +2662,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes( } } - Sema::CUDATargetContextRAII X(Actions, Sema::CTCK_InitGlobalVar, ThisDecl); + SemaCUDA::CUDATargetContextRAII X(Actions.CUDA(), + SemaCUDA::CTCK_InitGlobalVar, ThisDecl); switch (TheInitKind) { // Parse declarator '=' initializer. case InitKind::Equal: { @@ -2676,6 +2675,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes( << 1 /* delete */; else Diag(ConsumeToken(), diag::err_deleted_non_function); + SkipDeletedFunctionBody(); } else if (Tok.is(tok::kw_default)) { if (D.isFunctionDeclarator()) Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration) @@ -5332,7 +5332,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, stripTypeAttributesOffDeclSpec(attrs, DS, TUK); - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) && NextToken().is(tok::identifier)) SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(), @@ -7661,8 +7661,21 @@ void Parser::ParseParameterDeclarationClause( // Parse a C++23 Explicit Object Parameter // We do that in all language modes to produce a better diagnostic. SourceLocation ThisLoc; - if (getLangOpts().CPlusPlus && Tok.is(tok::kw_this)) + if (getLangOpts().CPlusPlus && Tok.is(tok::kw_this)) { ThisLoc = ConsumeToken(); + // C++23 [dcl.fct]p6: + // An explicit-object-parameter-declaration is a parameter-declaration + // with a this specifier. An explicit-object-parameter-declaration + // shall appear only as the first parameter-declaration of a + // parameter-declaration-list of either: + // - a member-declarator that declares a member function, or + // - a lambda-declarator. + // + // The parameter-declaration-list of a requires-expression is not such + // a context. + if (DeclaratorCtx == DeclaratorContext::RequiresExpr) + Diag(ThisLoc, diag::err_requires_expr_explicit_object_parameter); + } ParseDeclarationSpecifiers(DS, /*TemplateInfo=*/ParsedTemplateInfo(), AS_none, DeclSpecContext::DSC_normal, @@ -7701,7 +7714,7 @@ void Parser::ParseParameterDeclarationClause( } // Remember this parsed parameter in ParamInfo. - IdentifierInfo *ParmII = ParmDeclarator.getIdentifier(); + const IdentifierInfo *ParmII = ParmDeclarator.getIdentifier(); // DefArgToks is used when the parsing of default arguments needs // to be delayed. diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp index 861a25dc5103c..8e0e868248293 100644 --- a/clang/lib/Parse/ParseDeclCXX.cpp +++ b/clang/lib/Parse/ParseDeclCXX.cpp @@ -616,7 +616,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context, } // Parse nested-name-specifier. - IdentifierInfo *LastII = nullptr; + const IdentifierInfo *LastII = nullptr; if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr, /*ObjectHasErrors=*/false, /*EnteringContext=*/false, @@ -799,6 +799,11 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration( ProhibitAttributes(PrefixAttrs); Decl *DeclFromDeclSpec = nullptr; + Scope *CurScope = getCurScope(); + if (CurScope) + CurScope->setFlags(Scope::ScopeFlags::TypeAliasScope | + CurScope->getFlags()); + Decl *AD = ParseAliasDeclarationAfterDeclarator( TemplateInfo, UsingLoc, D, DeclEnd, AS, Attrs, &DeclFromDeclSpec); return Actions.ConvertDeclToDeclGroup(AD, DeclFromDeclSpec); @@ -2092,7 +2097,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, TypeResult TypeResult = true; // invalid bool Owned = false; - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; if (TemplateId) { // Explicit specialization, class template partial specialization, // or explicit instantiation. @@ -3397,6 +3402,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction, << 1 /* delete */; else Diag(ConsumeToken(), diag::err_deleted_non_function); + SkipDeletedFunctionBody(); return ExprError(); } } else if (Tok.is(tok::kw_default)) { diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp index 7164b3e34b028..69746eaccb43b 100644 --- a/clang/lib/Parse/ParseExpr.cpp +++ b/clang/lib/Parse/ParseExpr.cpp @@ -30,6 +30,9 @@ #include "clang/Sema/EnterExpressionEvaluationContext.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaCUDA.h" +#include "clang/Sema/SemaOpenMP.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/TypoCorrection.h" #include "llvm/ADT/SmallVector.h" #include @@ -1949,8 +1952,8 @@ ExprResult Parser::ParseSYCLBuiltinNum() { T.consumeClose(); if (IsNumFields) - return Actions.ActOnSYCLBuiltinNumFieldsExpr(TR.get()); - return Actions.ActOnSYCLBuiltinNumBasesExpr(TR.get()); + return Actions.SYCL().ActOnSYCLBuiltinNumFieldsExpr(TR.get()); + return Actions.SYCL().ActOnSYCLBuiltinNumBasesExpr(TR.get()); } /// __builtin_field_type '(' type-id ',' integer-constant ')' or @@ -1981,8 +1984,8 @@ ExprResult Parser::ParseSYCLBuiltinType() { T.consumeClose(); if (IsFieldType) - return Actions.ActOnSYCLBuiltinFieldTypeExpr(TR.get(), IdxRes.get()); - return Actions.ActOnSYCLBuiltinBaseTypeExpr(TR.get(), IdxRes.get()); + return Actions.SYCL().ActOnSYCLBuiltinFieldTypeExpr(TR.get(), IdxRes.get()); + return Actions.SYCL().ActOnSYCLBuiltinBaseTypeExpr(TR.get(), IdxRes.get()); } /// Once the leading part of a postfix-expression is parsed, this @@ -2142,7 +2145,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { // replace this call to ActOnOpenACCArraySectionExpr in the future. // Eventually we'll genericize the OPenMPArraySectionExpr type as // well. - LHS = Actions.ActOnOMPArraySectionExpr( + LHS = Actions.OpenMP().ActOnOMPArraySectionExpr( LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0], ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(), RLoc); } else { @@ -2197,10 +2200,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { } if (!LHS.isInvalid()) { - ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(), - OpenLoc, - ExecConfigExprs, - CloseLoc); + ExprResult ECResult = Actions.CUDA().ActOnExecConfigExpr( + getCurScope(), OpenLoc, ExecConfigExprs, CloseLoc); if (ECResult.isInvalid()) LHS = ExprError(); else @@ -2559,8 +2560,8 @@ ExprResult Parser::ParseSYCLUniqueStableNameExpression() { if (T.consumeClose()) return ExprError(); - return Actions.ActOnSYCLUniqueStableNameExpr(OpLoc, T.getOpenLocation(), - T.getCloseLocation(), Ty.get()); + return Actions.SYCL().ActOnUniqueStableNameExpr( + OpLoc, T.getOpenLocation(), T.getCloseLocation(), Ty.get()); } // Parse a __builtin_sycl_unique_stable_id expression. Accepts an expression, @@ -2590,7 +2591,7 @@ ExprResult Parser::ParseSYCLUniqueStableIdExpression() { if (T.consumeClose()) return ExprError(); - return Actions.ActOnSYCLUniqueStableIdExpr( + return Actions.SYCL().ActOnUniqueStableIdExpr( OpLoc, T.getOpenLocation(), T.getCloseLocation(), VarExpr.get()); } @@ -3377,7 +3378,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, if (ErrorFound) { Result = ExprError(); } else if (!Result.isInvalid()) { - Result = Actions.ActOnOMPArrayShapingExpr( + Result = Actions.OpenMP().ActOnOMPArrayShapingExpr( Result.get(), OpenLoc, RParenLoc, OMPDimensions, OMPBracketsRanges); } return Result; diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index 73c85c585baae..0d2ad980696fc 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -157,7 +157,8 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType, bool Parser::ParseOptionalCXXScopeSpecifier( CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool *MayBePseudoDestructor, bool IsTypename, - IdentifierInfo **LastII, bool OnlyNamespace, bool InUsingDeclaration) { + const IdentifierInfo **LastII, bool OnlyNamespace, + bool InUsingDeclaration) { assert(getLangOpts().CPlusPlus && "Call sites of this function should be guarded by checking for C++"); @@ -2626,7 +2627,7 @@ bool Parser::ParseUnqualifiedIdTemplateId( // UnqualifiedId. // FIXME: Store name for literal operator too. - IdentifierInfo *TemplateII = + const IdentifierInfo *TemplateII = Id.getKind() == UnqualifiedIdKind::IK_Identifier ? Id.Identifier : nullptr; OverloadedOperatorKind OpKind = @@ -3909,10 +3910,10 @@ ExprResult Parser::ParseTypeTrait() { SmallVector Args; do { // Parse the next type. - TypeResult Ty = - ParseTypeName(/*SourceRange=*/nullptr, - getLangOpts().CPlusPlus ? DeclaratorContext::TemplateArg - : DeclaratorContext::TypeName); + TypeResult Ty = ParseTypeName(/*SourceRange=*/nullptr, + getLangOpts().CPlusPlus + ? DeclaratorContext::TemplateTypeArg + : DeclaratorContext::TypeName); if (Ty.isInvalid()) { Parens.skipToEnd(); return ExprError(); @@ -3954,8 +3955,8 @@ ExprResult Parser::ParseArrayTypeTrait() { if (T.expectAndConsume()) return ExprError(); - TypeResult Ty = - ParseTypeName(/*SourceRange=*/nullptr, DeclaratorContext::TemplateArg); + TypeResult Ty = ParseTypeName(/*SourceRange=*/nullptr, + DeclaratorContext::TemplateTypeArg); if (Ty.isInvalid()) { SkipUntil(tok::comma, StopAtSemi); SkipUntil(tok::r_paren, StopAtSemi); diff --git a/clang/lib/Parse/ParseHLSL.cpp b/clang/lib/Parse/ParseHLSL.cpp index 5afc958600fa5..d97985d42369a 100644 --- a/clang/lib/Parse/ParseHLSL.cpp +++ b/clang/lib/Parse/ParseHLSL.cpp @@ -72,9 +72,9 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) { return nullptr; } - Decl *D = Actions.HLSL().ActOnStartHLSLBuffer( - getCurScope(), IsCBuffer, BufferLoc, Identifier, IdentifierLoc, - T.getOpenLocation()); + Decl *D = Actions.HLSL().ActOnStartBuffer(getCurScope(), IsCBuffer, BufferLoc, + Identifier, IdentifierLoc, + T.getOpenLocation()); while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) { // FIXME: support attribute on constants inside cbuffer/tbuffer. @@ -88,7 +88,7 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) { T.skipToEnd(); DeclEnd = T.getCloseLocation(); BufferScope.Exit(); - Actions.HLSL().ActOnFinishHLSLBuffer(D, DeclEnd); + Actions.HLSL().ActOnFinishBuffer(D, DeclEnd); return nullptr; } } @@ -96,7 +96,7 @@ Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) { T.consumeClose(); DeclEnd = T.getCloseLocation(); BufferScope.Exit(); - Actions.HLSL().ActOnFinishHLSLBuffer(D, DeclEnd); + Actions.HLSL().ActOnFinishBuffer(D, DeclEnd); Actions.ProcessDeclAttributeList(Actions.CurScope, D, Attrs); return D; diff --git a/clang/lib/Parse/ParseObjc.cpp b/clang/lib/Parse/ParseObjc.cpp index 88bab0eb27a3e..671dcb71e51a3 100644 --- a/clang/lib/Parse/ParseObjc.cpp +++ b/clang/lib/Parse/ParseObjc.cpp @@ -375,7 +375,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, Actions.ActOnTypedefedProtocols(protocols, protocolLocs, superClassId, superClassLoc); - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; ObjCInterfaceDecl *ClsType = Actions.ActOnStartClassInterface( getCurScope(), AtLoc, nameId, nameLoc, typeParameterList, superClassId, superClassLoc, typeArgs, @@ -799,11 +799,11 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, addedToDeclSpec); // Install the property declarator into interfaceDecl. - IdentifierInfo *SelName = + const IdentifierInfo *SelName = OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier(); Selector GetterSel = PP.getSelectorTable().getNullarySelector(SelName); - IdentifierInfo *SetterName = OCDS.getSetterName(); + const IdentifierInfo *SetterName = OCDS.getSetterName(); Selector SetterSel; if (SetterName) SetterSel = PP.getSelectorTable().getSelector(1, &SetterName); @@ -1445,7 +1445,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc, return Result; } - SmallVector KeyIdents; + SmallVector KeyIdents; SmallVector KeyLocs; SmallVector ArgInfos; ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope | @@ -1541,7 +1541,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc, Declarator ParmDecl(DS, ParsedAttributesView::none(), DeclaratorContext::Prototype); ParseDeclarator(ParmDecl); - IdentifierInfo *ParmII = ParmDecl.getIdentifier(); + const IdentifierInfo *ParmII = ParmDecl.getIdentifier(); Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl); CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII, ParmDecl.getIdentifierLoc(), @@ -2133,7 +2133,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc, /*consumeLastToken=*/true)) return nullptr; - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; ObjCProtocolDecl *ProtoType = Actions.ActOnStartProtocolInterface( AtLoc, protocolName, nameLoc, ProtocolRefs.data(), ProtocolRefs.size(), ProtocolLocs.data(), EndProtoLoc, attrs, &SkipBody); @@ -3242,7 +3242,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc, SourceLocation Loc; IdentifierInfo *selIdent = ParseObjCSelectorPiece(Loc); - SmallVector KeyIdents; + SmallVector KeyIdents; SmallVector KeyLocs; ExprVector KeyExprs; @@ -3642,7 +3642,7 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) { if (Tok.isNot(tok::l_paren)) return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@selector"); - SmallVector KeyIdents; + SmallVector KeyIdents; SourceLocation sLoc; BalancedDelimiterTracker T(*this, tok::l_paren); diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp index f434e1542c801..123be476e928e 100644 --- a/clang/lib/Parse/ParseOpenACC.cpp +++ b/clang/lib/Parse/ParseOpenACC.cpp @@ -535,14 +535,6 @@ bool ClauseHasRequiredParens(OpenACCDirectiveKind DirKind, return getClauseParensKind(DirKind, Kind) == ClauseParensKind::Required; } -ExprResult ParseOpenACCConditionalExpr(Parser &P) { - // FIXME: It isn't clear if the spec saying 'condition' means the same as - // it does in an if/while/etc (See ParseCXXCondition), however as it was - // written with Fortran/C in mind, we're going to assume it just means an - // 'expression evaluating to boolean'. - return P.getActions().CorrectDelayedTyposInExpr(P.ParseExpression()); -} - // Skip until we see the end of pragma token, but don't consume it. This is us // just giving up on the rest of the pragma so we can continue executing. We // have to do this because 'SkipUntil' considers paren balancing, which isn't @@ -595,6 +587,23 @@ Parser::OpenACCClauseParseResult Parser::OpenACCSuccess(OpenACCClause *Clause) { return {Clause, OpenACCParseCanContinue::Can}; } +ExprResult Parser::ParseOpenACCConditionExpr() { + // FIXME: It isn't clear if the spec saying 'condition' means the same as + // it does in an if/while/etc (See ParseCXXCondition), however as it was + // written with Fortran/C in mind, we're going to assume it just means an + // 'expression evaluating to boolean'. + ExprResult ER = getActions().CorrectDelayedTyposInExpr(ParseExpression()); + + if (!ER.isUsable()) + return ER; + + Sema::ConditionResult R = + getActions().ActOnCondition(getCurScope(), ER.get()->getExprLoc(), + ER.get(), Sema::ConditionKind::Boolean); + + return R.isInvalid() ? ExprError() : R.get().second; +} + // OpenACC 3.3, section 1.7: // To simplify the specification and convey appropriate constraint information, // a pqr-list is a comma-separated list of pdr items. The one exception is a @@ -826,25 +835,35 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams( case OpenACCClauseKind::Default: { Token DefKindTok = getCurToken(); - if (expectIdentifierOrKeyword(*this)) - break; + if (expectIdentifierOrKeyword(*this)) { + Parens.skipToEnd(); + return OpenACCCanContinue(); + } ConsumeToken(); - if (getOpenACCDefaultClauseKind(DefKindTok) == - OpenACCDefaultClauseKind::Invalid) + OpenACCDefaultClauseKind DefKind = + getOpenACCDefaultClauseKind(DefKindTok); + + if (DefKind == OpenACCDefaultClauseKind::Invalid) { Diag(DefKindTok, diag::err_acc_invalid_default_clause_kind); + Parens.skipToEnd(); + return OpenACCCanContinue(); + } + ParsedClause.setDefaultDetails(DefKind); break; } case OpenACCClauseKind::If: { - ExprResult CondExpr = ParseOpenACCConditionalExpr(*this); - // An invalid expression can be just about anything, so just give up on - // this clause list. + ExprResult CondExpr = ParseOpenACCConditionExpr(); + ParsedClause.setConditionDetails(CondExpr.isUsable() ? CondExpr.get() + : nullptr); + if (CondExpr.isInvalid()) { Parens.skipToEnd(); return OpenACCCanContinue(); } + break; } case OpenACCClauseKind::CopyIn: @@ -961,9 +980,10 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams( switch (ClauseKind) { case OpenACCClauseKind::Self: { assert(DirKind != OpenACCDirectiveKind::Update); - ExprResult CondExpr = ParseOpenACCConditionalExpr(*this); - // An invalid expression can be just about anything, so just give up on - // this clause list. + ExprResult CondExpr = ParseOpenACCConditionExpr(); + ParsedClause.setConditionDetails(CondExpr.isUsable() ? CondExpr.get() + : nullptr); + if (CondExpr.isInvalid()) { Parens.skipToEnd(); return OpenACCCanContinue(); diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp index 814126e321d3b..480201bc06f61 100644 --- a/clang/lib/Parse/ParseOpenMP.cpp +++ b/clang/lib/Parse/ParseOpenMP.cpp @@ -21,6 +21,7 @@ #include "clang/Parse/RAIIObjectsForParser.h" #include "clang/Sema/EnterExpressionEvaluationContext.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaOpenMP.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/UniqueVector.h" @@ -87,7 +88,7 @@ class DeclDirectiveListParserHelper final { DeclDirectiveListParserHelper(Parser *P, OpenMPDirectiveKind Kind) : P(P), Kind(Kind) {} void operator()(CXXScopeSpec &SS, DeclarationNameInfo NameInfo) { - ExprResult Res = P->getActions().ActOnOpenMPIdExpression( + ExprResult Res = P->getActions().OpenMP().ActOnOpenMPIdExpression( P->getCurScope(), SS, NameInfo, Kind); if (Res.isUsable()) Identifiers.push_back(Res.get()); @@ -322,8 +323,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { SourceRange Range; TypeResult TR = ParseTypeName(&Range, DeclaratorContext::Prototype, AS); if (TR.isUsable()) { - QualType ReductionType = - Actions.ActOnOpenMPDeclareReductionType(Range.getBegin(), TR); + QualType ReductionType = Actions.OpenMP().ActOnOpenMPDeclareReductionType( + Range.getBegin(), TR); if (!ReductionType.isNull()) { ReductionTypes.push_back( std::make_pair(ReductionType, Range.getBegin())); @@ -363,8 +364,10 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { return DeclGroupPtrTy(); } - DeclGroupPtrTy DRD = Actions.ActOnOpenMPDeclareReductionDirectiveStart( - getCurScope(), Actions.getCurLexicalContext(), Name, ReductionTypes, AS); + DeclGroupPtrTy DRD = + Actions.OpenMP().ActOnOpenMPDeclareReductionDirectiveStart( + getCurScope(), Actions.getCurLexicalContext(), Name, ReductionTypes, + AS); // Parse expression and then parse initializer if any for each // correct type. @@ -375,10 +378,11 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope); // Parse expression. - Actions.ActOnOpenMPDeclareReductionCombinerStart(getCurScope(), D); + Actions.OpenMP().ActOnOpenMPDeclareReductionCombinerStart(getCurScope(), D); ExprResult CombinerResult = Actions.ActOnFinishFullExpr( ParseExpression().get(), D->getLocation(), /*DiscardedValue*/ false); - Actions.ActOnOpenMPDeclareReductionCombinerEnd(D, CombinerResult.get()); + Actions.OpenMP().ActOnOpenMPDeclareReductionCombinerEnd( + D, CombinerResult.get()); if (CombinerResult.isInvalid() && Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) { @@ -411,8 +415,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { Scope::OpenMPDirectiveScope); // Parse expression. VarDecl *OmpPrivParm = - Actions.ActOnOpenMPDeclareReductionInitializerStart(getCurScope(), - D); + Actions.OpenMP().ActOnOpenMPDeclareReductionInitializerStart( + getCurScope(), D); // Check if initializer is omp_priv or something else. if (Tok.is(tok::identifier) && Tok.getIdentifierInfo()->isStr("omp_priv")) { @@ -423,7 +427,7 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { ParseAssignmentExpression().get(), D->getLocation(), /*DiscardedValue*/ false); } - Actions.ActOnOpenMPDeclareReductionInitializerEnd( + Actions.OpenMP().ActOnOpenMPDeclareReductionInitializerEnd( D, InitializerResult.get(), OmpPrivParm); if (InitializerResult.isInvalid() && Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) { @@ -444,8 +448,8 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) { else TPA.Commit(); } - return Actions.ActOnOpenMPDeclareReductionDirectiveEnd(getCurScope(), DRD, - IsCorrect); + return Actions.OpenMP().ActOnOpenMPDeclareReductionDirectiveEnd( + getCurScope(), DRD, IsCorrect); } void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) { @@ -569,8 +573,8 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) { SourceRange Range; TypeResult ParsedType = parseOpenMPDeclareMapperVarDecl(Range, VName, AS); if (ParsedType.isUsable()) - MapperType = - Actions.ActOnOpenMPDeclareMapperType(Range.getBegin(), ParsedType); + MapperType = Actions.OpenMP().ActOnOpenMPDeclareMapperType(Range.getBegin(), + ParsedType); if (MapperType.isNull()) IsCorrect = false; if (!IsCorrect) { @@ -591,11 +595,13 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) { unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope | Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope; ParseScope OMPDirectiveScope(this, ScopeFlags); - Actions.StartOpenMPDSABlock(OMPD_declare_mapper, DirName, getCurScope(), Loc); + Actions.OpenMP().StartOpenMPDSABlock(OMPD_declare_mapper, DirName, + getCurScope(), Loc); // Add the mapper variable declaration. - ExprResult MapperVarRef = Actions.ActOnOpenMPDeclareMapperDirectiveVarDecl( - getCurScope(), MapperType, Range.getBegin(), VName); + ExprResult MapperVarRef = + Actions.OpenMP().ActOnOpenMPDeclareMapperDirectiveVarDecl( + getCurScope(), MapperType, Range.getBegin(), VName); // Parse map clauses. SmallVector Clauses; @@ -603,7 +609,7 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) { OpenMPClauseKind CKind = Tok.isAnnotation() ? OMPC_unknown : getOpenMPClauseKind(PP.getSpelling(Tok)); - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); OMPClause *Clause = ParseOpenMPClause(OMPD_declare_mapper, CKind, Clauses.empty()); if (Clause) @@ -613,7 +619,7 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) { // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } if (Clauses.empty()) { Diag(Tok, diag::err_omp_expected_clause) @@ -622,9 +628,9 @@ Parser::ParseOpenMPDeclareMapperDirective(AccessSpecifier AS) { } // Exit scope. - Actions.EndOpenMPDSABlock(nullptr); + Actions.OpenMP().EndOpenMPDSABlock(nullptr); OMPDirectiveScope.Exit(); - DeclGroupPtrTy DG = Actions.ActOnOpenMPDeclareMapperDirective( + DeclGroupPtrTy DG = Actions.OpenMP().ActOnOpenMPDeclareMapperDirective( getCurScope(), Actions.getCurLexicalContext(), MapperId, MapperType, Range.getBegin(), VName, AS, MapperVarRef.get(), Clauses); if (!IsCorrect) @@ -652,7 +658,8 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range, } Name = Actions.GetNameForDeclarator(DeclaratorInfo).getName(); - return Actions.ActOnOpenMPDeclareMapperVarDecl(getCurScope(), DeclaratorInfo); + return Actions.OpenMP().ActOnOpenMPDeclareMapperVarDecl(getCurScope(), + DeclaratorInfo); } namespace { @@ -748,7 +755,7 @@ static bool parseDeclareSimdClauses( OpenMPClauseKind CKind = getOpenMPClauseKind(ClauseName); if (CKind == OMPC_uniform || CKind == OMPC_aligned || CKind == OMPC_linear) { - Sema::OpenMPVarListDataTy Data; + SemaOpenMP::OpenMPVarListDataTy Data; SmallVectorImpl *Vars = &Uniforms; if (CKind == OMPC_aligned) { Vars = &Aligneds; @@ -768,7 +775,7 @@ static bool parseDeclareSimdClauses( assert(0 <= Data.ExtraModifier && Data.ExtraModifier <= OMPC_LINEAR_unknown && "Unexpected linear modifier."); - if (P.getActions().CheckOpenMPLinearModifier( + if (P.getActions().OpenMP().CheckOpenMPLinearModifier( static_cast(Data.ExtraModifier), Data.ExtraModifierLoc)) Data.ExtraModifier = OMPC_LINEAR_val; @@ -816,7 +823,7 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr, SourceLocation EndLoc = ConsumeAnnotationToken(); if (IsError) return Ptr; - return Actions.ActOnOpenMPDeclareSimdDirective( + return Actions.OpenMP().ActOnOpenMPDeclareSimdDirective( Ptr, BS, Simdlen.get(), Uniforms, Aligneds, Alignments, Linears, LinModifiers, Steps, SourceRange(Loc, EndLoc)); } @@ -1412,7 +1419,8 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr, return; } - OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope(); + OMPTraitInfo *ParentTI = + Actions.OpenMP().getOMPTraitInfoForSurroundingScope(); ASTContext &ASTCtx = Actions.getASTContext(); OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo(); SmallVector AdjustNothing; @@ -1445,7 +1453,7 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr, case OMPC_adjust_args: { AdjustArgsLoc = Tok.getLocation(); ConsumeToken(); - Sema::OpenMPVarListDataTy Data; + SemaOpenMP::OpenMPVarListDataTy Data; SmallVector Vars; IsError = ParseOpenMPVarList(OMPD_declare_variant, OMPC_adjust_args, Vars, Data); @@ -1486,12 +1494,12 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr, } std::optional> DeclVarData = - Actions.checkOpenMPDeclareVariantFunction( + Actions.OpenMP().checkOpenMPDeclareVariantFunction( Ptr, AssociatedFunction.get(), TI, AppendArgs.size(), SourceRange(Loc, Tok.getLocation())); if (DeclVarData && !TI.Sets.empty()) - Actions.ActOnOpenMPDeclareVariantDirective( + Actions.OpenMP().ActOnOpenMPDeclareVariantDirective( DeclVarData->first, DeclVarData->second, TI, AdjustNothing, AdjustNeedDevicePtr, AppendArgs, AdjustArgsLoc, AppendArgsLoc, SourceRange(Loc, Tok.getLocation())); @@ -1642,7 +1650,7 @@ void Parser::ParseOpenMPClauses(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind = Tok.isAnnotation() ? OMPC_unknown : getOpenMPClauseKind(PP.getSpelling(Tok)); - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); OMPClause *Clause = ParseOpenMPClause( DKind, CKind, !FirstClauses[unsigned(CKind)].getInt()); SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, @@ -1651,13 +1659,13 @@ void Parser::ParseOpenMPClauses(OpenMPDirectiveKind DKind, if (Clause != nullptr) Clauses.push_back(Clause); if (Tok.is(tok::annot_pragma_openmp_end)) { - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); break; } // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } } @@ -1750,12 +1758,13 @@ void Parser::ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, Assumptions.push_back(Assumption); } - Actions.ActOnOpenMPAssumesDirective(Loc, DKind, Assumptions, SkippedClauses); + Actions.OpenMP().ActOnOpenMPAssumesDirective(Loc, DKind, Assumptions, + SkippedClauses); } void Parser::ParseOpenMPEndAssumesDirective(SourceLocation Loc) { - if (Actions.isInOpenMPAssumeScope()) - Actions.ActOnOpenMPEndAssumesDirective(); + if (Actions.OpenMP().isInOpenMPAssumeScope()) + Actions.OpenMP().ActOnOpenMPEndAssumesDirective(); else Diag(Loc, diag::err_expected_begin_assumes); } @@ -1811,7 +1820,7 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) { } void Parser::ParseOMPDeclareTargetClauses( - Sema::DeclareTargetContextInfo &DTCI) { + SemaOpenMP::DeclareTargetContextInfo &DTCI) { SourceLocation DeviceTypeLoc; bool RequiresToOrLinkOrIndirectClause = false; bool HasToOrLinkOrIndirectClause = false; @@ -1910,11 +1919,11 @@ void Parser::ParseOMPDeclareTargetClauses( if (DTCI.Kind == OMPD_declare_target || HasIdentifier) { auto &&Callback = [this, MT, &DTCI](CXXScopeSpec &SS, DeclarationNameInfo NameInfo) { - NamedDecl *ND = - Actions.lookupOpenMPDeclareTargetName(getCurScope(), SS, NameInfo); + NamedDecl *ND = Actions.OpenMP().lookupOpenMPDeclareTargetName( + getCurScope(), SS, NameInfo); if (!ND) return; - Sema::DeclareTargetContextInfo::MapInfo MI{MT, NameInfo.getLoc()}; + SemaOpenMP::DeclareTargetContextInfo::MapInfo MI{MT, NameInfo.getLoc()}; bool FirstMapping = DTCI.ExplicitlyMapped.try_emplace(ND, MI).second; if (!FirstMapping) Diag(NameInfo.getLoc(), diag::err_omp_declare_target_multiple) @@ -2090,8 +2099,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( skipUntilPragmaOpenMPEnd(DKind); // Skip the last annot_pragma_openmp_end. ConsumeAnnotationToken(); - return Actions.ActOnOpenMPThreadprivateDirective(Loc, - Helper.getIdentifiers()); + return Actions.OpenMP().ActOnOpenMPThreadprivateDirective( + Loc, Helper.getIdentifiers()); } break; } @@ -2109,7 +2118,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( OpenMPClauseKind CKind = Tok.isAnnotation() ? OMPC_unknown : getOpenMPClauseKind(PP.getSpelling(Tok)); - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); OMPClause *Clause = ParseOpenMPClause( OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt()); SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, @@ -2118,20 +2127,20 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( if (Clause != nullptr) Clauses.push_back(Clause); if (Tok.is(tok::annot_pragma_openmp_end)) { - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); break; } // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } skipUntilPragmaOpenMPEnd(DKind); } // Skip the last annot_pragma_openmp_end. ConsumeAnnotationToken(); - return Actions.ActOnOpenMPAllocateDirective(Loc, Helper.getIdentifiers(), - Clauses); + return Actions.OpenMP().ActOnOpenMPAllocateDirective( + Loc, Helper.getIdentifiers(), Clauses); } break; } @@ -2150,7 +2159,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( OpenMPClauseKind CKind = Tok.isAnnotation() ? OMPC_unknown : getOpenMPClauseKind(PP.getSpelling(Tok)); - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); OMPClause *Clause = ParseOpenMPClause( OMPD_requires, CKind, !FirstClauses[unsigned(CKind)].getInt()); SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, @@ -2159,13 +2168,13 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( if (Clause != nullptr) Clauses.push_back(Clause); if (Tok.is(tok::annot_pragma_openmp_end)) { - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); break; } // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } // Consume final annot_pragma_openmp_end if (Clauses.empty()) { @@ -2175,14 +2184,15 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( return nullptr; } ConsumeAnnotationToken(); - return Actions.ActOnOpenMPRequiresDirective(StartLoc, Clauses); + return Actions.OpenMP().ActOnOpenMPRequiresDirective(StartLoc, Clauses); } case OMPD_error: { SmallVector Clauses; SourceLocation StartLoc = ConsumeToken(); ParseOpenMPClauses(DKind, Clauses, StartLoc); - Actions.ActOnOpenMPErrorDirective(Clauses, StartLoc, SourceLocation(), - /*InExContext = */ false); + Actions.OpenMP().ActOnOpenMPErrorDirective(Clauses, StartLoc, + SourceLocation(), + /*InExContext = */ false); break; } case OMPD_assumes: @@ -2217,7 +2227,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( // { #pragma omp end declare variant } // ConsumeToken(); - OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope(); + OMPTraitInfo *ParentTI = + Actions.OpenMP().getOMPTraitInfoForSurroundingScope(); ASTContext &ASTCtx = Actions.getASTContext(); OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo(); if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI)) { @@ -2248,7 +2259,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( /* ConstructTraits */ ArrayRef()); if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) { - Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI); + Actions.OpenMP().ActOnOpenMPBeginDeclareVariant(Loc, TI); break; } @@ -2275,8 +2286,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( break; } case OMPD_end_declare_variant: { - if (Actions.isInOpenMPDeclareVariantScope()) - Actions.ActOnOpenMPEndDeclareVariant(); + if (Actions.OpenMP().isInOpenMPDeclareVariantScope()) + Actions.OpenMP().ActOnOpenMPEndDeclareVariant(); else Diag(Loc, diag::err_expected_begin_declare_variant); ConsumeToken(); @@ -2331,7 +2342,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( case OMPD_declare_target: { SourceLocation DTLoc = ConsumeAnyToken(); bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end); - Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc); + SemaOpenMP::DeclareTargetContextInfo DTCI(DKind, DTLoc); if (HasClauses) ParseOMPDeclareTargetClauses(DTCI); bool HasImplicitMappings = DKind == OMPD_begin_declare_target || @@ -2342,24 +2353,24 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl( ConsumeAnyToken(); if (HasImplicitMappings) { - Actions.ActOnStartOpenMPDeclareTargetContext(DTCI); + Actions.OpenMP().ActOnStartOpenMPDeclareTargetContext(DTCI); return nullptr; } - Actions.ActOnFinishedOpenMPDeclareTargetContext(DTCI); + Actions.OpenMP().ActOnFinishedOpenMPDeclareTargetContext(DTCI); llvm::SmallVector Decls; for (auto &It : DTCI.ExplicitlyMapped) Decls.push_back(It.first); return Actions.BuildDeclaratorGroup(Decls); } case OMPD_end_declare_target: { - if (!Actions.isInOpenMPDeclareTargetContext()) { + if (!Actions.OpenMP().isInOpenMPDeclareTargetContext()) { Diag(Tok, diag::err_omp_unexpected_directive) << 1 << getOpenMPDirectiveName(DKind); break; } - const Sema::DeclareTargetContextInfo &DTCI = - Actions.ActOnOpenMPEndDeclareTargetDirective(); + const SemaOpenMP::DeclareTargetContextInfo &DTCI = + Actions.OpenMP().ActOnOpenMPEndDeclareTargetDirective(); ParseOMPEndDeclareTargetDirective(DTCI.Kind, DKind, DTCI.Loc); return nullptr; } @@ -2683,7 +2694,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( if (!ParseOpenMPSimpleVarList(DKind, Helper, /*AllowScopeSpecifier=*/false)) { skipUntilPragmaOpenMPEnd(DKind); - DeclGroupPtrTy Res = Actions.ActOnOpenMPThreadprivateDirective( + DeclGroupPtrTy Res = Actions.OpenMP().ActOnOpenMPThreadprivateDirective( Loc, Helper.getIdentifiers()); Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation()); } @@ -2710,7 +2721,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( OpenMPClauseKind CKind = Tok.isAnnotation() ? OMPC_unknown : getOpenMPClauseKind(PP.getSpelling(Tok)); - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); OMPClause *Clause = ParseOpenMPClause( OMPD_allocate, CKind, !FirstClauses[unsigned(CKind)].getInt()); SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, @@ -2719,17 +2730,17 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( if (Clause != nullptr) Clauses.push_back(Clause); if (Tok.is(tok::annot_pragma_openmp_end)) { - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); break; } // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } skipUntilPragmaOpenMPEnd(DKind); } - DeclGroupPtrTy Res = Actions.ActOnOpenMPAllocateDirective( + DeclGroupPtrTy Res = Actions.OpenMP().ActOnOpenMPAllocateDirective( Loc, Helper.getIdentifiers(), Clauses); Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation()); } @@ -2875,7 +2886,8 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( if (isOpenMPSimdDirective(DKind)) ScopeFlags |= Scope::OpenMPSimdDirectiveScope; ParseScope OMPDirectiveScope(this, ScopeFlags); - Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc); + Actions.OpenMP().StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), + Loc); while (Tok.isNot(tok::annot_pragma_openmp_end)) { // If we are parsing for a directive within a metadirective, the directive @@ -2909,7 +2921,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( } // No more implicit clauses allowed. ImplicitClauseAllowed = false; - Actions.StartOpenMPClause(CKind); + Actions.OpenMP().StartOpenMPClause(CKind); HasImplicitClause = false; OMPClause *Clause = ParseOpenMPClause( DKind, CKind, !FirstClauses[unsigned(CKind)].getInt()); @@ -2922,7 +2934,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( // Skip ',' if any. if (Tok.is(tok::comma)) ConsumeToken(); - Actions.EndOpenMPClause(); + Actions.OpenMP().EndOpenMPClause(); } // End location of the directive. EndLoc = Tok.getLocation(); @@ -2953,7 +2965,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( StmtResult AssociatedStmt; if (HasAssociatedStatement) { // The body is a block scope like in Lambdas and Blocks. - Actions.ActOnOpenMPRegionStart(DKind, getCurScope()); + Actions.OpenMP().ActOnOpenMPRegionStart(DKind, getCurScope()); // FIXME: We create a bogus CompoundStmt scope to hold the contents of // the captured region. Code elsewhere assumes that any FunctionScopeInfo // should have at least one compound statement scope within it. @@ -2964,30 +2976,33 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( if (AssociatedStmt.isUsable() && isOpenMPLoopDirective(DKind) && getLangOpts().OpenMPIRBuilder) - AssociatedStmt = Actions.ActOnOpenMPLoopnest(AssociatedStmt.get()); + AssociatedStmt = + Actions.OpenMP().ActOnOpenMPLoopnest(AssociatedStmt.get()); } - AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses); + AssociatedStmt = + Actions.OpenMP().ActOnOpenMPRegionEnd(AssociatedStmt, Clauses); } else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data || DKind == OMPD_target_exit_data) { - Actions.ActOnOpenMPRegionStart(DKind, getCurScope()); + Actions.OpenMP().ActOnOpenMPRegionStart(DKind, getCurScope()); AssociatedStmt = (Sema::CompoundScopeRAII(Actions), Actions.ActOnCompoundStmt(Loc, Loc, std::nullopt, /*isStmtExpr=*/false)); - AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses); + AssociatedStmt = + Actions.OpenMP().ActOnOpenMPRegionEnd(AssociatedStmt, Clauses); } - Directive = Actions.ActOnOpenMPExecutableDirective( + Directive = Actions.OpenMP().ActOnOpenMPExecutableDirective( DKind, DirName, CancelRegion, Clauses, AssociatedStmt.get(), Loc, EndLoc); // Exit scope. - Actions.EndOpenMPDSABlock(Directive.get()); + Actions.OpenMP().EndOpenMPDSABlock(Directive.get()); OMPDirectiveScope.Exit(); break; } case OMPD_declare_target: { SourceLocation DTLoc = ConsumeAnyToken(); bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end); - Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc); + SemaOpenMP::DeclareTargetContextInfo DTCI(DKind, DTLoc); if (HasClauses) ParseOMPDeclareTargetClauses(DTCI); bool HasImplicitMappings = @@ -3003,7 +3018,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective( // Skip the last annot_pragma_openmp_end. ConsumeAnyToken(); - Actions.ActOnFinishedOpenMPDeclareTargetContext(DTCI); + Actions.OpenMP().ActOnFinishedOpenMPDeclareTargetContext(DTCI); break; } case OMPD_declare_simd: @@ -3118,7 +3133,7 @@ OMPClause *Parser::ParseOpenMPSizesClause() { T.consumeClose(); - return Actions.ActOnOpenMPSizesClause( + return Actions.OpenMP().ActOnOpenMPSizesClause( ValExprs, ClauseNameLoc, T.getOpenLocation(), T.getCloseLocation()); } @@ -3130,7 +3145,7 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) { BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end); if (T.expectAndConsume(diag::err_expected_lparen_after, "uses_allocator")) return nullptr; - SmallVector Data; + SmallVector Data; do { CXXScopeSpec SS; Token Replacement; @@ -3144,7 +3159,7 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) { StopBeforeMatch); break; } - Sema::UsesAllocatorsData &D = Data.emplace_back(); + SemaOpenMP::UsesAllocatorsData &D = Data.emplace_back(); D.Allocator = Allocator.get(); if (Tok.is(tok::l_paren)) { BalancedDelimiterTracker T(*this, tok::l_paren, @@ -3169,8 +3184,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) { ConsumeAnyToken(); } while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)); T.consumeClose(); - return Actions.ActOnOpenMPUsesAllocatorClause(Loc, T.getOpenLocation(), - T.getCloseLocation(), Data); + return Actions.OpenMP().ActOnOpenMPUsesAllocatorClause( + Loc, T.getOpenLocation(), T.getCloseLocation(), Data); } /// Parsing of OpenMP clauses. @@ -3538,15 +3553,16 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, if (ParseOnly) return nullptr; - return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc); + return Actions.OpenMP().ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, + LLoc, RLoc); } /// Parse indirect clause for '#pragma omp declare target' directive. /// 'indirect' '[' '(' invoked-by-fptr ')' ']' /// where invoked-by-fptr is a constant boolean expression that evaluates to /// true or false at compile time. -bool Parser::ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, - bool ParseOnly) { +bool Parser::ParseOpenMPIndirectClause( + SemaOpenMP::DeclareTargetContextInfo &DTCI, bool ParseOnly) { SourceLocation Loc = ConsumeToken(); SourceLocation RLoc; @@ -3721,15 +3737,16 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind, return nullptr; if (Kind == OMPC_init) - return Actions.ActOnOpenMPInitClause(InteropVarExpr.get(), InteropInfo, Loc, - T.getOpenLocation(), VarLoc, RLoc); + return Actions.OpenMP().ActOnOpenMPInitClause( + InteropVarExpr.get(), InteropInfo, Loc, T.getOpenLocation(), VarLoc, + RLoc); if (Kind == OMPC_use) - return Actions.ActOnOpenMPUseClause(InteropVarExpr.get(), Loc, - T.getOpenLocation(), VarLoc, RLoc); + return Actions.OpenMP().ActOnOpenMPUseClause( + InteropVarExpr.get(), Loc, T.getOpenLocation(), VarLoc, RLoc); if (Kind == OMPC_destroy) - return Actions.ActOnOpenMPDestroyClause(InteropVarExpr.get(), Loc, - T.getOpenLocation(), VarLoc, RLoc); + return Actions.OpenMP().ActOnOpenMPDestroyClause( + InteropVarExpr.get(), Loc, T.getOpenLocation(), VarLoc, RLoc); llvm_unreachable("Unexpected interop variable clause."); } @@ -3787,8 +3804,8 @@ OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) { }; } - return Actions.ActOnOpenMPXAttributeClause(Attrs, Loc, T.getOpenLocation(), - T.getCloseLocation()); + return Actions.OpenMP().ActOnOpenMPXAttributeClause( + Attrs, Loc, T.getOpenLocation(), T.getCloseLocation()); } /// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'. @@ -3823,9 +3840,8 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind, << getOpenMPClauseName(OMPC_default) << "5.1"; return nullptr; } - return Actions.ActOnOpenMPSimpleClause(Kind, Val->Type, - Val->TypeLoc, Val->LOpen, - Val->Loc, Val->RLoc); + return Actions.OpenMP().ActOnOpenMPSimpleClause( + Kind, Val->Type, Val->TypeLoc, Val->LOpen, Val->Loc, Val->RLoc); } /// Parsing of OpenMP clauses like 'ordered'. @@ -3860,7 +3876,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) { if (ParseOnly) return nullptr; - return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation()); + return Actions.OpenMP().ActOnOpenMPClause(Kind, Loc, Tok.getLocation()); } /// Parsing of OpenMP clauses with single expressions and some additional @@ -4118,7 +4134,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, if (ParseOnly) return nullptr; - return Actions.ActOnOpenMPSingleExprWithArgClause( + return Actions.OpenMP().ActOnOpenMPSingleExprWithArgClause( Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc, RLoc); } @@ -4184,7 +4200,7 @@ static OpenMPMapModifierKind isMapModifier(Parser &P) { } /// Parse the mapper modifier in map, to, and from clauses. -bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) { +bool Parser::parseMapperModifier(SemaOpenMP::OpenMPVarListDataTy &Data) { // Parse '('. BalancedDelimiterTracker T(*this, tok::l_paren, tok::colon); if (T.expectAndConsume(diag::err_expected_lparen_after, "mapper")) { @@ -4216,7 +4232,7 @@ bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) { /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) | /// present -bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) { +bool Parser::parseMapTypeModifiers(SemaOpenMP::OpenMPVarListDataTy &Data) { while (getCurToken().isNot(tok::colon)) { OpenMPMapModifierKind TypeModifier = isMapModifier(*this); if (TypeModifier == OMPC_MAP_MODIFIER_always || @@ -4282,7 +4298,7 @@ static OpenMPMapClauseKind isMapType(Parser &P) { /// Parse map-type in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type ::= to | from | tofrom | alloc | release | delete -static void parseMapType(Parser &P, Sema::OpenMPVarListDataTy &Data) { +static void parseMapType(Parser &P, SemaOpenMP::OpenMPVarListDataTy &Data) { Token Tok = P.getCurToken(); if (Tok.is(tok::colon)) { P.Diag(Tok, diag::err_omp_map_type_missing); @@ -4306,7 +4322,7 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() { return ExprError(); SourceLocation LLoc = T.getOpenLocation(); - SmallVector Data; + SmallVector Data; while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) { // Check if the type parsing is required. ParsedType IteratorType; @@ -4380,7 +4396,7 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() { if (Tok.is(tok::comma)) ConsumeToken(); - Sema::OMPIteratorData &D = Data.emplace_back(); + SemaOpenMP::OMPIteratorData &D = Data.emplace_back(); D.DeclIdent = II; D.DeclIdentLoc = IdLoc; D.Type = IteratorType; @@ -4397,12 +4413,12 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() { if (!T.consumeClose()) RLoc = T.getCloseLocation(); - return Actions.ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc, LLoc, RLoc, - Data); + return Actions.OpenMP().ActOnOMPIteratorExpr(getCurScope(), IteratorKwLoc, + LLoc, RLoc, Data); } bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind, - Sema::OpenMPVarListDataTy &Data, + SemaOpenMP::OpenMPVarListDataTy &Data, const LangOptions &LangOpts) { // Currently the only reserved locator is 'omp_all_memory' which is only // allowed on a depend clause. @@ -4430,7 +4446,7 @@ bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind, /// Parse step size expression. Returns true if parsing is successfull, /// otherwise returns false. -static bool parseStepSize(Parser &P, Sema::OpenMPVarListDataTy &Data, +static bool parseStepSize(Parser &P, SemaOpenMP::OpenMPVarListDataTy &Data, OpenMPClauseKind CKind, SourceLocation ELoc) { ExprResult Tail = P.ParseAssignmentExpression(); Sema &Actions = P.getActions(); @@ -4451,7 +4467,7 @@ static bool parseStepSize(Parser &P, Sema::OpenMPVarListDataTy &Data, bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl &Vars, - Sema::OpenMPVarListDataTy &Data) { + SemaOpenMP::OpenMPVarListDataTy &Data) { UnqualifiedId UnqualifiedReductionId; bool InvalidReductionId = false; bool IsInvalidMapperModifier = false; @@ -4961,7 +4977,7 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, SourceLocation Loc = Tok.getLocation(); SourceLocation LOpen = ConsumeToken(); SmallVector Vars; - Sema::OpenMPVarListDataTy Data; + SemaOpenMP::OpenMPVarListDataTy Data; if (ParseOpenMPVarList(DKind, Kind, Vars, Data)) return nullptr; @@ -4969,5 +4985,5 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, if (ParseOnly) return nullptr; OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc); - return Actions.ActOnOpenMPVarListClause(Kind, Vars, Locs, Data); + return Actions.OpenMP().ActOnOpenMPVarListClause(Kind, Vars, Locs, Data); } diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp index 0f692e2146a49..3979f75b6020d 100644 --- a/clang/lib/Parse/ParsePragma.cpp +++ b/clang/lib/Parse/ParsePragma.cpp @@ -21,6 +21,7 @@ #include "clang/Parse/RAIIObjectsForParser.h" #include "clang/Sema/EnterExpressionEvaluationContext.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaCUDA.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringSwitch.h" #include @@ -3900,8 +3901,8 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma( } if (Info->isStr("begin")) - Actions.PushForceCUDAHostDevice(); - else if (!Actions.PopForceCUDAHostDevice()) + Actions.CUDA().PushForceHostDevice(); + else if (!Actions.CUDA().PopForceHostDevice()) PP.Diag(FirstTok.getLocation(), diag::err_pragma_cannot_end_force_cuda_host_device); diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp index 76a3fa8f2627d..629421c01d17d 100644 --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -22,6 +22,7 @@ #include "clang/Sema/DeclSpec.h" #include "clang/Sema/EnterExpressionEvaluationContext.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaOpenMP.h" #include "clang/Sema/TypoCorrection.h" #include "llvm/ADT/STLExtras.h" #include @@ -2301,7 +2302,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) { // In OpenMP loop region loop control variable must be captured and be // private. Perform analysis of first part (if any). if (getLangOpts().OpenMP && FirstPart.isUsable()) { - Actions.ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get()); + Actions.OpenMP().ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get()); } } diff --git a/clang/lib/Parse/ParseTemplate.cpp b/clang/lib/Parse/ParseTemplate.cpp index d4897f8f66072..b07ce451e878e 100644 --- a/clang/lib/Parse/ParseTemplate.cpp +++ b/clang/lib/Parse/ParseTemplate.cpp @@ -313,7 +313,7 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, return nullptr; } - IdentifierInfo *Id = Result.Identifier; + const IdentifierInfo *Id = Result.Identifier; SourceLocation IdLoc = Result.getBeginLoc(); DiagnoseAndSkipCXX11Attributes(); @@ -805,10 +805,12 @@ NamedDecl *Parser::ParseTemplateTemplateParameter(unsigned Depth, // identifier, comma, or greater. Provide a fixit if the identifier, comma, // or greater appear immediately or after 'struct'. In the latter case, // replace the keyword with 'class'. + bool TypenameKeyword = false; if (!TryConsumeToken(tok::kw_class)) { bool Replace = Tok.isOneOf(tok::kw_typename, tok::kw_struct); const Token &Next = Tok.is(tok::kw_struct) ? NextToken() : Tok; if (Tok.is(tok::kw_typename)) { + TypenameKeyword = true; Diag(Tok.getLocation(), getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_template_template_param_typename @@ -878,10 +880,9 @@ NamedDecl *Parser::ParseTemplateTemplateParameter(unsigned Depth, } } - return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc, - ParamList, EllipsisLoc, - ParamName, NameLoc, Depth, - Position, EqualLoc, DefaultArg); + return Actions.ActOnTemplateTemplateParameter( + getCurScope(), TemplateLoc, ParamList, TypenameKeyword, EllipsisLoc, + ParamName, NameLoc, Depth, Position, EqualLoc, DefaultArg); } /// ParseNonTypeTemplateParameter - Handle the parsing of non-type @@ -1289,7 +1290,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, // later. Tok.setKind(tok::annot_template_id); - IdentifierInfo *TemplateII = + const IdentifierInfo *TemplateII = TemplateName.getKind() == UnqualifiedIdKind::IK_Identifier ? TemplateName.Identifier : nullptr; diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp index cc0e41ed221c4..ef46fc74cedc1 100644 --- a/clang/lib/Parse/Parser.cpp +++ b/clang/lib/Parse/Parser.cpp @@ -1404,6 +1404,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, // Parse function body eagerly if it is either '= delete;' or '= default;' as // ActOnStartOfFunctionDef needs to know whether the function is deleted. + StringLiteral *DeletedMessage = nullptr; Sema::FnBodyKind BodyKind = Sema::FnBodyKind::Other; SourceLocation KWLoc; if (TryConsumeToken(tok::equal)) { @@ -1415,6 +1416,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, : diag::ext_defaulted_deleted_function) << 1 /* deleted */; BodyKind = Sema::FnBodyKind::Delete; + DeletedMessage = ParseCXXDeletedFunctionMessage(); } else if (TryConsumeToken(tok::kw_default, KWLoc)) { Diag(KWLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_defaulted_deleted_function @@ -1439,7 +1441,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, // Tell the actions module that we have entered a function definition with the // specified Declarator for the function. - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D, TemplateInfo.TemplateParams ? *TemplateInfo.TemplateParams @@ -1473,7 +1475,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, D.getMutableDeclSpec().abort(); if (BodyKind != Sema::FnBodyKind::Other) { - Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind); + Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind, DeletedMessage); Stmt *GeneratedBody = Res ? Res->getBody() : nullptr; Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false); return Res; diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index 7db3c2f1e2bd7..75e80559d91a4 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -1,5 +1,6 @@ set(LLVM_LINK_COMPONENTS Core + Demangle FrontendHLSL FrontendOpenMP MC diff --git a/clang/lib/Sema/CodeCompleteConsumer.cpp b/clang/lib/Sema/CodeCompleteConsumer.cpp index 350bd78b57107..91713d71786ee 100644 --- a/clang/lib/Sema/CodeCompleteConsumer.cpp +++ b/clang/lib/Sema/CodeCompleteConsumer.cpp @@ -854,7 +854,8 @@ StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const { if (IdentifierInfo *Id = Name.getAsIdentifierInfo()) return Id->getName(); if (Name.isObjCZeroArgSelector()) - if (IdentifierInfo *Id = Name.getObjCSelector().getIdentifierInfoForSlot(0)) + if (const IdentifierInfo *Id = + Name.getObjCSelector().getIdentifierInfoForSlot(0)) return Id->getName(); Saved = Name.getAsString(); diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp index 09b56fddea882..77a00e64c3ee5 100644 --- a/clang/lib/Sema/Sema.cpp +++ b/clang/lib/Sema/Sema.cpp @@ -41,10 +41,13 @@ #include "clang/Sema/RISCVIntrinsicManager.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaConsumer.h" #include "clang/Sema/SemaHLSL.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaOpenACC.h" +#include "clang/Sema/SemaOpenMP.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/TemplateInstCallback.h" #include "clang/Sema/TypoCorrection.h" @@ -91,9 +94,8 @@ DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { return nullptr; } -IdentifierInfo * -Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, - unsigned int Index) { +IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName( + const IdentifierInfo *ParamName, unsigned int Index) { std::string InventedName; llvm::raw_string_ostream OS(InventedName); @@ -199,8 +201,11 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr), CurScope(nullptr), Ident_super(nullptr), + CUDAPtr(std::make_unique(*this)), HLSLPtr(std::make_unique(*this)), OpenACCPtr(std::make_unique(*this)), + OpenMPPtr(std::make_unique(*this)), + SYCLPtr(std::make_unique(*this)), MSPointerToMemberRepresentationMethod( LangOpts.getMSPointerToMemberRepresentationMethod()), MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()), @@ -223,9 +228,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, StringWithUTF8StringMethod(nullptr), ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), - DictionaryWithObjectsMethod(nullptr), CodeCompleter(CodeCompleter), - VarDataSharingAttributesStack(nullptr), - SyclIntHeader(nullptr), SyclIntFooter(nullptr) { + DictionaryWithObjectsMethod(nullptr), CodeCompleter(CodeCompleter) { assert(pp.TUKind == TUKind); TUScope = nullptr; @@ -250,7 +253,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, nullptr, ExpressionEvaluationContextRecord::EK_Other); // Initialization of data sharing attributes stack for OpenMP - InitDataSharingAttributesStack(); + OpenMP().InitDataSharingAttributesStack(); std::unique_ptr Callbacks = std::make_unique(); @@ -526,7 +529,7 @@ Sema::~Sema() { threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); // Destroys data sharing attributes stack for OpenMP - DestroyDataSharingAttributesStack(); + OpenMP().DestroyDataSharingAttributesStack(); // Detach from the PP callback handler which outlives Sema since it's owned // by the preprocessor. @@ -1125,16 +1128,16 @@ void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { if (getLangOpts().SYCLIsDevice) { // Set the names of the kernels, now that the names have settled down. This // needs to happen before we generate the integration headers. - SetSYCLKernelNames(); + SYCL().SetSYCLKernelNames(); // Make sure that the footer is emitted before header, since only after the // footer is emitted is it known that translation unit contains device // global variables. - if (SyclIntFooter != nullptr) - SyclIntFooter->emit(getLangOpts().SYCLIntFooter); + if (SYCL().hasSyclIntegrationFooter()) + SYCL().getSyclIntegrationFooter().emit(getLangOpts().SYCLIntFooter); // Emit SYCL integration header for current translation unit if needed - if (SyclIntHeader != nullptr) - SyclIntHeader->emit(getLangOpts().SYCLIntHeader); - MarkDevices(); + if (SYCL().hasSyclIntegrationHeader()) + SYCL().getSyclIntegrationHeader().emit(getLangOpts().SYCLIntHeader); + SYCL().MarkDevices(); } emitDeferredDiags(); @@ -1199,7 +1202,7 @@ void Sema::ActOnEndOfTranslationUnit() { DiagnoseUnterminatedPragmaAlignPack(); DiagnoseUnterminatedPragmaAttribute(); - DiagnoseUnterminatedOpenMPDeclareTarget(); + OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget(); // All delayed member exception specs should be checked or we end up accepting // incompatible declarations. @@ -1678,15 +1681,15 @@ bool Sema::hasUncompilableErrorOccurred() const { // Print notes showing how we can reach FD starting from an a priori // known-callable function. static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) { - auto FnIt = S.DeviceKnownEmittedFns.find(FD); - while (FnIt != S.DeviceKnownEmittedFns.end()) { + auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD); + while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) { // Respect error limit. if (S.Diags.hasFatalErrorOccurred()) return; DiagnosticBuilder Builder( S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); Builder << FnIt->second.FD; - FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); + FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD); } } @@ -1760,15 +1763,16 @@ class DeferredDiagnosticsEmitter void visitUsedDecl(SourceLocation Loc, Decl *D) { if (S.LangOpts.SYCLIsDevice && ShouldEmitRootNode) { if (auto *VD = dyn_cast(D)) { - if (!S.checkAllowedSYCLInitializer(VD) && - !S.isTypeDecoratedWithDeclAttribute( - VD->getType())) { + if (!S.SYCL().checkAllowedSYCLInitializer(VD) && + !S.SYCL() + .isTypeDecoratedWithDeclAttribute< + SYCLGlobalVariableAllowedAttr>(VD->getType())) { S.Diag(Loc, diag::err_sycl_restrict) - << Sema::KernelConstStaticVariable; + << SemaSYCL::KernelConstStaticVariable; return; } if (!VD->hasInit() && - S.isTypeDecoratedWithDeclAttribute( + S.SYCL().isTypeDecoratedWithDeclAttribute( VD->getType()) && !VD->hasAttr()) S.Diag(Loc, diag::err_sycl_external_global); @@ -1816,12 +1820,12 @@ class DeferredDiagnosticsEmitter // Finalize analysis of OpenMP-specific constructs. if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && (ShouldEmitRootNode || InOMPDeviceContext)) - S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); + S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); // Finalize analysis of SYCL-specific constructs. if (Caller && S.LangOpts.SYCLIsDevice) - S.finalizeSYCLDelayedAnalysis(Caller, FD, Loc, RootReason); + S.SYCL().finalizeSYCLDelayedAnalysis(Caller, FD, Loc, RootReason); if (Caller) - S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; + S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc}; // Always emit deferred diagnostics for the direct users. This does not // lead to explosion of diagnostics since each user is visited at most // twice. @@ -1929,8 +1933,8 @@ void Sema::emitDeferredDiags() { // which other not-known-emitted functions. // // When we see something which is illegal if the current function is emitted -// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or -// CheckCUDACall), we first check if the current function is known-emitted. If +// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or +// CheckCall), we first check if the current function is known-emitted. If // so, we immediately output the diagnostic. // // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags @@ -1990,15 +1994,18 @@ Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) { FD = FD ? FD : getCurFunctionDecl(); if (LangOpts.OpenMP) return LangOpts.OpenMPIsTargetDevice - ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) - : diagIfOpenMPHostCode(Loc, DiagID, FD); + ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD) + : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD); + if (getLangOpts().CUDA) + return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID) + : CUDA().DiagIfHostCode(Loc, DiagID); if (getLangOpts().SYCLIsDevice) - return SYCLDiagIfDeviceCode(Loc, DiagID); + return SYCL().DiagIfDeviceCode(Loc, DiagID); if (getLangOpts().CUDA) - return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) - : CUDADiagIfHostCode(Loc, DiagID); + return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID) + : CUDA().DiagIfHostCode(Loc, DiagID); return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, FD, *this, DeviceDiagnosticReason::All); @@ -2014,7 +2021,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { // constant byte size like zero length arrays. So, do a deep check for SYCL. if (D && LangOpts.SYCLIsDevice) { llvm::DenseSet Visited; - deepTypeCheckForSYCLDevice(Loc, Visited, D); + SYCL().deepTypeCheckForDevice(Loc, Visited, D); } Decl *C = cast(getCurLexicalContext()); @@ -2225,7 +2232,7 @@ void Sema::PushFunctionScope() { FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); } if (LangOpts.OpenMP) - pushOpenMPFunctionRegion(); + OpenMP().pushOpenMPFunctionRegion(); } void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { @@ -2345,7 +2352,7 @@ Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, PoppedFunctionScopeDeleter(this)); if (LangOpts.OpenMP) - popOpenMPFunctionRegion(Scope.get()); + OpenMP().popOpenMPFunctionRegion(Scope.get()); // Issue any analysis-based warnings. if (WP && D) @@ -2781,7 +2788,9 @@ void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, unsigned OpenMPCaptureLevel) { auto *CSI = new CapturedRegionScopeInfo( getDiagnostics(), S, CD, RD, CD->getContextParam(), K, - (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, + (getLangOpts().OpenMP && K == CR_OpenMP) + ? OpenMP().getOpenMPNestingLevel() + : 0, OpenMPCaptureLevel); CSI->ReturnType = Context.VoidTy; FunctionScopes.push_back(CSI); diff --git a/clang/lib/Sema/SemaAPINotes.cpp b/clang/lib/Sema/SemaAPINotes.cpp index a3128306c664f..4c445f28bba8c 100644 --- a/clang/lib/Sema/SemaAPINotes.cpp +++ b/clang/lib/Sema/SemaAPINotes.cpp @@ -463,6 +463,8 @@ static void ProcessAPINotes(Sema &S, FunctionOrMethod AnyFunc, D = MD; } + assert((FD || MD) && "Expecting Function or ObjCMethod"); + // Nullability of return type. if (Info.NullabilityAudited) applyNullability(S, D, Info.getReturnTypeInfo(), Metadata); diff --git a/clang/lib/Sema/SemaAccess.cpp b/clang/lib/Sema/SemaAccess.cpp index 47de47fcba40e..b5dfe8b87e206 100644 --- a/clang/lib/Sema/SemaAccess.cpp +++ b/clang/lib/Sema/SemaAccess.cpp @@ -10,8 +10,6 @@ // //===----------------------------------------------------------------------===// -#include "clang/Basic/Specifiers.h" -#include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclCXX.h" @@ -19,9 +17,12 @@ #include "clang/AST/DeclObjC.h" #include "clang/AST/DependentDiagnostic.h" #include "clang/AST/ExprCXX.h" +#include "clang/Basic/Specifiers.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" +#include "clang/Sema/SemaInternal.h" +#include "llvm/ADT/STLForwardCompat.h" using namespace clang; using namespace sema; @@ -1658,21 +1659,24 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc, case InitializedEntity::EK_Base: PD = PDiag(diag::err_access_base_ctor); PD << Entity.isInheritedVirtualBase() - << Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor); + << Entity.getBaseSpecifier()->getType() + << llvm::to_underlying(getSpecialMember(Constructor)); break; case InitializedEntity::EK_Member: case InitializedEntity::EK_ParenAggInitMember: { const FieldDecl *Field = cast(Entity.getDecl()); PD = PDiag(diag::err_access_field_ctor); - PD << Field->getType() << getSpecialMember(Constructor); + PD << Field->getType() + << llvm::to_underlying(getSpecialMember(Constructor)); break; } case InitializedEntity::EK_LambdaCapture: { StringRef VarName = Entity.getCapturedVarName(); PD = PDiag(diag::err_access_lambda_capture); - PD << VarName << Entity.getType() << getSpecialMember(Constructor); + PD << VarName << Entity.getType() + << llvm::to_underlying(getSpecialMember(Constructor)); break; } diff --git a/clang/lib/Sema/SemaBase.cpp b/clang/lib/Sema/SemaBase.cpp index 3a2f54e8699c4..0972d6921315e 100644 --- a/clang/lib/Sema/SemaBase.cpp +++ b/clang/lib/Sema/SemaBase.cpp @@ -1,5 +1,6 @@ #include "clang/Sema/SemaBase.h" #include "clang/Sema/Sema.h" +#include "clang/Sema/SemaCUDA.h" namespace clang { @@ -72,8 +73,8 @@ Sema::SemaDiagnosticBuilder SemaBase::Diag(SourceLocation Loc, unsigned DiagID, } SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice - ? SemaRef.CUDADiagIfDeviceCode(Loc, DiagID) - : SemaRef.CUDADiagIfHostCode(Loc, DiagID); + ? SemaRef.CUDA().DiagIfDeviceCode(Loc, DiagID) + : SemaRef.CUDA().DiagIfHostCode(Loc, DiagID); SetIsLastErrorImmediate(DB.isImmediate()); return DB; } diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp index c3172d6b057dd..8825ad0ef3e47 100644 --- a/clang/lib/Sema/SemaCUDA.cpp +++ b/clang/lib/Sema/SemaCUDA.cpp @@ -10,6 +10,7 @@ /// //===----------------------------------------------------------------------===// +#include "clang/Sema/SemaCUDA.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/ExprCXX.h" @@ -22,10 +23,13 @@ #include "clang/Sema/SemaDiagnostic.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallVector.h" #include using namespace clang; +SemaCUDA::SemaCUDA(Sema &S) : SemaBase(S) {} + template static bool hasExplicitAttr(const VarDecl *D) { if (!D) return false; @@ -34,38 +38,37 @@ template static bool hasExplicitAttr(const VarDecl *D) { return false; } -void Sema::PushForceCUDAHostDevice() { +void SemaCUDA::PushForceHostDevice() { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - ForceCUDAHostDeviceDepth++; + ForceHostDeviceDepth++; } -bool Sema::PopForceCUDAHostDevice() { +bool SemaCUDA::PopForceHostDevice() { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - if (ForceCUDAHostDeviceDepth == 0) + if (ForceHostDeviceDepth == 0) return false; - ForceCUDAHostDeviceDepth--; + ForceHostDeviceDepth--; return true; } -ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, +ExprResult SemaCUDA::ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc) { - FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl(); + FunctionDecl *ConfigDecl = getASTContext().getcudaConfigureCallDecl(); if (!ConfigDecl) return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use) - << getCudaConfigureFuncName()); + << getConfigureFuncName()); QualType ConfigQTy = ConfigDecl->getType(); - DeclRefExpr *ConfigDR = new (Context) - DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc); - MarkFunctionReferenced(LLLLoc, ConfigDecl); + DeclRefExpr *ConfigDR = new (getASTContext()) DeclRefExpr( + getASTContext(), ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc); + SemaRef.MarkFunctionReferenced(LLLLoc, ConfigDecl); - return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr, - /*IsExecConfig=*/true); + return SemaRef.BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr, + /*IsExecConfig=*/true); } -Sema::CUDAFunctionTarget -Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) { +CUDAFunctionTarget SemaCUDA::IdentifyTarget(const ParsedAttributesView &Attrs) { bool HasHostAttr = false; bool HasDeviceAttr = false; bool HasGlobalAttr = false; @@ -90,18 +93,18 @@ Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) { } if (HasInvalidTargetAttr) - return CFT_InvalidTarget; + return CUDAFunctionTarget::InvalidTarget; if (HasGlobalAttr) - return CFT_Global; + return CUDAFunctionTarget::Global; if (HasHostAttr && HasDeviceAttr) - return CFT_HostDevice; + return CUDAFunctionTarget::HostDevice; if (HasDeviceAttr) - return CFT_Device; + return CUDAFunctionTarget::Device; - return CFT_Host; + return CUDAFunctionTarget::Host; } template @@ -112,55 +115,54 @@ static bool hasAttr(const Decl *D, bool IgnoreImplicitAttr) { }); } -Sema::CUDATargetContextRAII::CUDATargetContextRAII(Sema &S_, - CUDATargetContextKind K, - Decl *D) +SemaCUDA::CUDATargetContextRAII::CUDATargetContextRAII( + SemaCUDA &S_, SemaCUDA::CUDATargetContextKind K, Decl *D) : S(S_) { SavedCtx = S.CurCUDATargetCtx; - assert(K == CTCK_InitGlobalVar); + assert(K == SemaCUDA::CTCK_InitGlobalVar); auto *VD = dyn_cast_or_null(D); if (VD && VD->hasGlobalStorage() && !VD->isStaticLocal()) { - auto Target = CFT_Host; + auto Target = CUDAFunctionTarget::Host; if ((hasAttr(VD, /*IgnoreImplicit=*/true) && !hasAttr(VD, /*IgnoreImplicit=*/true)) || hasAttr(VD, /*IgnoreImplicit=*/true) || hasAttr(VD, /*IgnoreImplicit=*/true)) - Target = CFT_Device; + Target = CUDAFunctionTarget::Device; S.CurCUDATargetCtx = {Target, K, VD}; } } -/// IdentifyCUDATarget - Determine the CUDA compilation target for this function -Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D, - bool IgnoreImplicitHDAttr) { +/// IdentifyTarget - Determine the CUDA compilation target for this function +CUDAFunctionTarget SemaCUDA::IdentifyTarget(const FunctionDecl *D, + bool IgnoreImplicitHDAttr) { // Code that lives outside a function gets the target from CurCUDATargetCtx. if (D == nullptr) return CurCUDATargetCtx.Target; if (D->hasAttr()) - return CFT_InvalidTarget; + return CUDAFunctionTarget::InvalidTarget; if (D->hasAttr()) - return CFT_Global; + return CUDAFunctionTarget::Global; if (hasAttr(D, IgnoreImplicitHDAttr)) { if (hasAttr(D, IgnoreImplicitHDAttr)) - return CFT_HostDevice; - return CFT_Device; + return CUDAFunctionTarget::HostDevice; + return CUDAFunctionTarget::Device; } else if (hasAttr(D, IgnoreImplicitHDAttr)) { - return CFT_Host; + return CUDAFunctionTarget::Host; } else if ((D->isImplicit() || !D->isUserProvided()) && !IgnoreImplicitHDAttr) { // Some implicit declarations (like intrinsic functions) are not marked. // Set the most lenient target on them for maximal flexibility. - return CFT_HostDevice; + return CUDAFunctionTarget::HostDevice; } - return CFT_Host; + return CUDAFunctionTarget::Host; } /// IdentifyTarget - Determine the CUDA compilation target for this variable. -Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) { +SemaCUDA::CUDAVariableTarget SemaCUDA::IdentifyTarget(const VarDecl *Var) { if (Var->hasAttr()) return CVT_Unified; // Only constexpr and const variabless with implicit constant attribute @@ -180,11 +182,11 @@ Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) { // - on both sides in host device functions // - on device side in device or global functions if (auto *FD = dyn_cast(Var->getDeclContext())) { - switch (IdentifyCUDATarget(FD)) { - case CFT_HostDevice: + switch (IdentifyTarget(FD)) { + case CUDAFunctionTarget::HostDevice: return CVT_Both; - case CFT_Device: - case CFT_Global: + case CUDAFunctionTarget::Device: + case CUDAFunctionTarget::Global: return CVT_Device; default: return CVT_Host; @@ -247,21 +249,21 @@ Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) { // | hd | h | SS | WS | SS | (d) | // | hd | hd | HD | HD | HD | (b) | -Sema::CUDAFunctionPreference -Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, +SemaCUDA::CUDAFunctionPreference +SemaCUDA::IdentifyPreference(const FunctionDecl *Caller, const FunctionDecl *Callee) { assert(Callee && "Callee must be valid."); // Treat ctor/dtor as host device function in device var initializer to allow // trivial ctor/dtor without device attr to be used. Non-trivial ctor/dtor - // will be diagnosed by checkAllowedCUDAInitializer. + // will be diagnosed by checkAllowedInitializer. if (Caller == nullptr && CurCUDATargetCtx.Kind == CTCK_InitGlobalVar && - CurCUDATargetCtx.Target == CFT_Device && + CurCUDATargetCtx.Target == CUDAFunctionTarget::Device && (isa(Callee) || isa(Callee))) return CFP_HostDevice; - CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller); - CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee); + CUDAFunctionTarget CallerTarget = IdentifyTarget(Caller); + CUDAFunctionTarget CalleeTarget = IdentifyTarget(Callee); // Pd - Sh -> CUDA device compilation for SYCL+CUDA if (getLangOpts().SYCLIsHost && getLangOpts().CUDA && @@ -271,7 +273,8 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, // __host__) called by a SYCL kernel could end up calling a __device__ one. // In any case, __host__ functions are not emitted by the cuda-dev // compilation. So, this doesn't introduce any error. - if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device) + if (CallerTarget == CUDAFunctionTarget::Host && + CalleeTarget == CUDAFunctionTarget::Device) return CFP_WrongSide; } @@ -280,16 +283,19 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, !getLangOpts().CUDAIsDevice) { // (x), and (p) prefer __device__ function in SYCL-device compilation. // (x) allows to pick a __device__ function. - if ((CallerTarget == CFT_Host || CallerTarget == CFT_HostDevice) && - CalleeTarget == CFT_Device) + if ((CallerTarget == CUDAFunctionTarget::Host || + CallerTarget == CUDAFunctionTarget::HostDevice) && + CalleeTarget == CUDAFunctionTarget::Device) return CFP_Native; // (p) lowers the preference of __host__ functions for favoring __device__ // ones. - if (CallerTarget == CFT_Host && CalleeTarget == CFT_Host) + if (CallerTarget == CUDAFunctionTarget::Host && + CalleeTarget == CUDAFunctionTarget::Host) return CFP_SameSide; // (z) - if (CallerTarget == CFT_HostDevice && CalleeTarget == CFT_Global) + if (CallerTarget == CUDAFunctionTarget::HostDevice && + CalleeTarget == CUDAFunctionTarget::Global) return CFP_Never; } @@ -301,49 +307,56 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, // without having a corresponding __host__. In this case, a dummy __host__ // function is generated. This dummy function is required since the lambda // that forms the SYCL kernel (having host device attr.) needs to be - // compiled also for the host. (CallerTarget == CFT_Host) is added in case a + // compiled also for the host. (CallerTarget == CUDAFunctionTarget::Host) is added in case a // regular function (implicitly __host__) is called by a SYCL kernel lambda. - if ((CallerTarget == CFT_Host || CallerTarget == CFT_HostDevice) && - CalleeTarget == CFT_Device) + if ((CallerTarget == CUDAFunctionTarget::Host || CallerTarget == CUDAFunctionTarget::HostDevice) && + CalleeTarget == CUDAFunctionTarget::Device) return CFP_HostDevice; } // If one of the targets is invalid, the check always fails, no matter what // the other target is. - if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget) + if (CallerTarget == CUDAFunctionTarget::InvalidTarget || + CalleeTarget == CUDAFunctionTarget::InvalidTarget) return CFP_Never; // (a) Can't call global from some contexts until we support CUDA's // dynamic parallelism. - if (CalleeTarget == CFT_Global && - (CallerTarget == CFT_Global || CallerTarget == CFT_Device)) + if (CalleeTarget == CUDAFunctionTarget::Global && + (CallerTarget == CUDAFunctionTarget::Global || + CallerTarget == CUDAFunctionTarget::Device)) return CFP_Never; // (b) Calling HostDevice is OK for everyone. - if (CalleeTarget == CFT_HostDevice) + if (CalleeTarget == CUDAFunctionTarget::HostDevice) return CFP_HostDevice; // (c) Best case scenarios if (CalleeTarget == CallerTarget || - (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) || - (CallerTarget == CFT_Global && CalleeTarget == CFT_Device)) + (CallerTarget == CUDAFunctionTarget::Host && + CalleeTarget == CUDAFunctionTarget::Global) || + (CallerTarget == CUDAFunctionTarget::Global && + CalleeTarget == CUDAFunctionTarget::Device)) return CFP_Native; // HipStdPar mode is special, in that assessing whether a device side call to // a host target is deferred to a subsequent pass, and cannot unambiguously be // adjudicated in the AST, hence we optimistically allow them to pass here. if (getLangOpts().HIPStdPar && - (CallerTarget == CFT_Global || CallerTarget == CFT_Device || - CallerTarget == CFT_HostDevice) && - CalleeTarget == CFT_Host) + (CallerTarget == CUDAFunctionTarget::Global || + CallerTarget == CUDAFunctionTarget::Device || + CallerTarget == CUDAFunctionTarget::HostDevice) && + CalleeTarget == CUDAFunctionTarget::Host) return CFP_HostDevice; // (d) HostDevice behavior depends on compilation mode. - if (CallerTarget == CFT_HostDevice) { + if (CallerTarget == CUDAFunctionTarget::HostDevice) { // It's OK to call a compilation-mode matching function from an HD one. - if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) || + if ((getLangOpts().CUDAIsDevice && + CalleeTarget == CUDAFunctionTarget::Device) || (!getLangOpts().CUDAIsDevice && - (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))) + (CalleeTarget == CUDAFunctionTarget::Host || + CalleeTarget == CUDAFunctionTarget::Global))) return CFP_SameSide; // Calls from HD to non-mode-matching functions (i.e., to host functions @@ -354,9 +367,12 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, } // (e) Calling across device/host boundary is not something you should do. - if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) || - (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) || - (CallerTarget == CFT_Global && CalleeTarget == CFT_Host)) + if ((CallerTarget == CUDAFunctionTarget::Host && + CalleeTarget == CUDAFunctionTarget::Device) || + (CallerTarget == CUDAFunctionTarget::Device && + CalleeTarget == CUDAFunctionTarget::Host) || + (CallerTarget == CUDAFunctionTarget::Global && + CalleeTarget == CUDAFunctionTarget::Host)) return CFP_Never; llvm_unreachable("All cases should've been handled by now."); @@ -370,13 +386,13 @@ template static bool hasImplicitAttr(const FunctionDecl *D) { return D->isImplicit(); } -bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) { +bool SemaCUDA::isImplicitHostDeviceFunction(const FunctionDecl *D) { bool IsImplicitDevAttr = hasImplicitAttr(D); bool IsImplicitHostAttr = hasImplicitAttr(D); return IsImplicitDevAttr && IsImplicitHostAttr; } -void Sema::EraseUnwantedCUDAMatches( +void SemaCUDA::EraseUnwantedMatches( const FunctionDecl *Caller, SmallVectorImpl> &Matches) { if (Matches.size() <= 1) @@ -386,7 +402,7 @@ void Sema::EraseUnwantedCUDAMatches( // Gets the CUDA function preference for a call from Caller to Match. auto GetCFP = [&](const Pair &Match) { - return IdentifyCUDAPreference(Caller, Match.second); + return IdentifyPreference(Caller, Match.second); }; // Find the best call preference among the functions in Matches. @@ -408,16 +424,16 @@ void Sema::EraseUnwantedCUDAMatches( /// \param ResolvedTarget with a target that resolves for both calls. /// \return true if there's a conflict, false otherwise. static bool -resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1, - Sema::CUDAFunctionTarget Target2, - Sema::CUDAFunctionTarget *ResolvedTarget) { +resolveCalleeCUDATargetConflict(CUDAFunctionTarget Target1, + CUDAFunctionTarget Target2, + CUDAFunctionTarget *ResolvedTarget) { // Only free functions and static member functions may be global. - assert(Target1 != Sema::CFT_Global); - assert(Target2 != Sema::CFT_Global); + assert(Target1 != CUDAFunctionTarget::Global); + assert(Target2 != CUDAFunctionTarget::Global); - if (Target1 == Sema::CFT_HostDevice) { + if (Target1 == CUDAFunctionTarget::HostDevice) { *ResolvedTarget = Target2; - } else if (Target2 == Sema::CFT_HostDevice) { + } else if (Target2 == CUDAFunctionTarget::HostDevice) { *ResolvedTarget = Target1; } else if (Target1 != Target2) { return true; @@ -428,8 +444,8 @@ resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1, return false; } -bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, - CXXSpecialMember CSM, +bool SemaCUDA::inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, + CXXSpecialMemberKind CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose) { @@ -449,7 +465,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, // We're going to invoke special member lookup; mark that these special // members are called from this one, and not from its caller. - ContextRAII MethodContext(*this, MemberDecl); + Sema::ContextRAII MethodContext(SemaRef, MemberDecl); // Look for special members in base classes that should be invoked from here. // Infer the target of this member base on the ones it should call. @@ -473,17 +489,17 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXRecordDecl *BaseClassDecl = cast(BaseType->getDecl()); Sema::SpecialMemberOverloadResult SMOR = - LookupSpecialMember(BaseClassDecl, CSM, - /* ConstArg */ ConstRHS, - /* VolatileArg */ false, - /* RValueThis */ false, - /* ConstThis */ false, - /* VolatileThis */ false); + SemaRef.LookupSpecialMember(BaseClassDecl, CSM, + /* ConstArg */ ConstRHS, + /* VolatileArg */ false, + /* RValueThis */ false, + /* ConstThis */ false, + /* VolatileThis */ false); if (!SMOR.getMethod()) continue; - CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod()); + CUDAFunctionTarget BaseMethodTarget = IdentifyTarget(SMOR.getMethod()); if (!InferredTarget) { InferredTarget = BaseMethodTarget; } else { @@ -493,9 +509,11 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, if (Diagnose) { Diag(ClassDecl->getLocation(), diag::note_implicit_member_target_infer_collision) - << (unsigned)CSM << *InferredTarget << BaseMethodTarget; + << (unsigned)CSM << llvm::to_underlying(*InferredTarget) + << llvm::to_underlying(BaseMethodTarget); } - MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); + MemberDecl->addAttr( + CUDAInvalidTargetAttr::CreateImplicit(getASTContext())); return true; } } @@ -508,25 +526,24 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, } const RecordType *FieldType = - Context.getBaseElementType(F->getType())->getAs(); + getASTContext().getBaseElementType(F->getType())->getAs(); if (!FieldType) { continue; } CXXRecordDecl *FieldRecDecl = cast(FieldType->getDecl()); Sema::SpecialMemberOverloadResult SMOR = - LookupSpecialMember(FieldRecDecl, CSM, - /* ConstArg */ ConstRHS && !F->isMutable(), - /* VolatileArg */ false, - /* RValueThis */ false, - /* ConstThis */ false, - /* VolatileThis */ false); + SemaRef.LookupSpecialMember(FieldRecDecl, CSM, + /* ConstArg */ ConstRHS && !F->isMutable(), + /* VolatileArg */ false, + /* RValueThis */ false, + /* ConstThis */ false, + /* VolatileThis */ false); if (!SMOR.getMethod()) continue; - CUDAFunctionTarget FieldMethodTarget = - IdentifyCUDATarget(SMOR.getMethod()); + CUDAFunctionTarget FieldMethodTarget = IdentifyTarget(SMOR.getMethod()); if (!InferredTarget) { InferredTarget = FieldMethodTarget; } else { @@ -536,9 +553,11 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, if (Diagnose) { Diag(ClassDecl->getLocation(), diag::note_implicit_member_target_infer_collision) - << (unsigned)CSM << *InferredTarget << FieldMethodTarget; + << (unsigned)CSM << llvm::to_underlying(*InferredTarget) + << llvm::to_underlying(FieldMethodTarget); } - MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); + MemberDecl->addAttr( + CUDAInvalidTargetAttr::CreateImplicit(getASTContext())); return true; } } @@ -549,25 +568,25 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, // it's the least restrictive option that can be invoked from any target. bool NeedsH = true, NeedsD = true; if (InferredTarget) { - if (*InferredTarget == CFT_Device) + if (*InferredTarget == CUDAFunctionTarget::Device) NeedsH = false; - else if (*InferredTarget == CFT_Host) + else if (*InferredTarget == CUDAFunctionTarget::Host) NeedsD = false; } // We either setting attributes first time, or the inferred ones must match // previously set ones. if (NeedsD && !HasD) - MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); + MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext())); if (NeedsH && !HasH) - MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); + MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(getASTContext())); return false; } -bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { +bool SemaCUDA::isEmptyConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { if (!CD->isDefined() && CD->isTemplateInstantiation()) - InstantiateFunctionDefinition(Loc, CD->getFirstDecl()); + SemaRef.InstantiateFunctionDefinition(Loc, CD->getFirstDecl()); // (E.2.3.1, CUDA 7.5) A constructor for a class type is considered // empty at a point in the translation unit, if it is either a @@ -595,7 +614,7 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) { if (const CXXConstructExpr *CE = dyn_cast(CI->getInit())) - return isEmptyCudaConstructor(Loc, CE->getConstructor()); + return isEmptyConstructor(Loc, CE->getConstructor()); return false; })) return false; @@ -603,13 +622,13 @@ bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { return true; } -bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { +bool SemaCUDA::isEmptyDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { // No destructor -> no problem. if (!DD) return true; if (!DD->isDefined() && DD->isTemplateInstantiation()) - InstantiateFunctionDefinition(Loc, DD->getFirstDecl()); + SemaRef.InstantiateFunctionDefinition(Loc, DD->getFirstDecl()); // (E.2.3.1, CUDA 7.5) A destructor for a class type is considered // empty at a point in the translation unit, if it is either a @@ -638,7 +657,7 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { // destructors for all base classes... if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) { if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl()) - return isEmptyCudaDestructor(Loc, RD->getDestructor()); + return isEmptyDestructor(Loc, RD->getDestructor()); return true; })) return false; @@ -648,7 +667,7 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { if (CXXRecordDecl *RD = Field->getType() ->getBaseElementTypeUnsafe() ->getAsCXXRecordDecl()) - return isEmptyCudaDestructor(Loc, RD->getDestructor()); + return isEmptyDestructor(Loc, RD->getDestructor()); return true; })) return false; @@ -679,7 +698,7 @@ bool IsDependentVar(VarDecl *VD) { // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. -bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD, +bool HasAllowedCUDADeviceStaticInitializer(SemaCUDA &S, VarDecl *VD, CUDAInitializerCheckKind CheckKind) { assert(!VD->isInvalidDecl() && VD->hasGlobalStorage()); assert(!IsDependentVar(VD) && "do not check dependent var"); @@ -688,30 +707,30 @@ bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD, if (!Init) return true; if (const auto *CE = dyn_cast(Init)) { - return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor()); + return S.isEmptyConstructor(VD->getLocation(), CE->getConstructor()); } return false; }; auto IsConstantInit = [&](const Expr *Init) { assert(Init); - ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context, + ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.getASTContext(), /*NoWronSidedVars=*/true); - return Init->isConstantInitializer(S.Context, + return Init->isConstantInitializer(S.getASTContext(), VD->getType()->isReferenceType()); }; auto HasEmptyDtor = [&](VarDecl *VD) { if (const auto *RD = VD->getType()->getAsCXXRecordDecl()) - return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor()); + return S.isEmptyDestructor(VD->getLocation(), RD->getDestructor()); return true; }; if (CheckKind == CICK_Shared) return IsEmptyInit(Init) && HasEmptyDtor(VD); - return S.LangOpts.GPUAllowDeviceInit || + return S.getLangOpts().GPUAllowDeviceInit || ((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD)); } } // namespace -void Sema::checkAllowedCUDAInitializer(VarDecl *VD) { +void SemaCUDA::checkAllowedInitializer(VarDecl *VD) { // Return early if VD is inside a non-instantiated template function since // the implicit constructor is not defined yet. if (const FunctionDecl *FD = @@ -747,10 +766,11 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) { InitFn = CE->getDirectCallee(); } if (InitFn) { - CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn); - if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) { + CUDAFunctionTarget InitFnTarget = IdentifyTarget(InitFn); + if (InitFnTarget != CUDAFunctionTarget::Host && + InitFnTarget != CUDAFunctionTarget::HostDevice) { Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer) - << InitFnTarget << InitFn; + << llvm::to_underlying(InitFnTarget) << InitFn; Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn; VD->setInvalidDecl(); } @@ -758,21 +778,22 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) { } } -void Sema::CUDARecordImplicitHostDeviceFuncUsedByDevice( +void SemaCUDA::RecordImplicitHostDeviceFuncUsedByDevice( const FunctionDecl *Callee) { - FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); + FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); if (!Caller) return; - if (!isCUDAImplicitHostDeviceFunction(Callee)) + if (!isImplicitHostDeviceFunction(Callee)) return; - CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller); + CUDAFunctionTarget CallerTarget = IdentifyTarget(Caller); // Record whether an implicit host device function is used on device side. - if (CallerTarget != CFT_Device && CallerTarget != CFT_Global && - (CallerTarget != CFT_HostDevice || - (isCUDAImplicitHostDeviceFunction(Caller) && + if (CallerTarget != CUDAFunctionTarget::Device && + CallerTarget != CUDAFunctionTarget::Global && + (CallerTarget != CUDAFunctionTarget::HostDevice || + (isImplicitHostDeviceFunction(Caller) && !getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Caller)))) return; @@ -788,18 +809,18 @@ void Sema::CUDARecordImplicitHostDeviceFuncUsedByDevice( // system header, in which case we leave the constexpr function unattributed. // // In addition, all function decls are treated as __host__ __device__ when -// ForceCUDAHostDeviceDepth > 0 (corresponding to code within a +// ForceHostDeviceDepth > 0 (corresponding to code within a // #pragma clang force_cuda_host_device_begin/end // pair). -void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, +void SemaCUDA::maybeAddHostDeviceAttrs(FunctionDecl *NewD, const LookupResult &Previous) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - if (ForceCUDAHostDeviceDepth > 0) { + if (ForceHostDeviceDepth > 0) { if (!NewD->hasAttr()) - NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); + NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext())); if (!NewD->hasAttr()) - NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); + NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext())); return; } @@ -810,8 +831,8 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, !NewD->hasAttr() && (NewD->getDescribedFunctionTemplate() || NewD->isFunctionTemplateSpecialization())) { - NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); - NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); + NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext())); + NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext())); return; } @@ -828,8 +849,9 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, FunctionDecl *OldD = D->getAsFunction(); return OldD && OldD->hasAttr() && !OldD->hasAttr() && - !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false, - /* ConsiderCudaAttrs = */ false); + !SemaRef.IsOverload(NewD, OldD, + /* UseMemberUsingDeclRules = */ false, + /* ConsiderCudaAttrs = */ false); }; auto It = llvm::find_if(Previous, IsMatchingDeviceFn); if (It != Previous.end()) { @@ -838,7 +860,7 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, // in a system header, in which case we simply return without making NewD // host+device. NamedDecl *Match = *It; - if (!getSourceManager().isInSystemHeader(Match->getLocation())) { + if (!SemaRef.getSourceManager().isInSystemHeader(Match->getLocation())) { Diag(NewD->getLocation(), diag::err_cuda_unattributed_constexpr_cannot_overload_device) << NewD; @@ -848,14 +870,14 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, return; } - NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); - NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); + NewD->addAttr(CUDAHostAttr::CreateImplicit(getASTContext())); + NewD->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext())); } // TODO: `__constant__` memory may be a limited resource for certain targets. // A safeguard may be needed at the end of compilation pipeline if // `__constant__` memory usage goes beyond limit. -void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) { +void SemaCUDA::MaybeAddConstantAttr(VarDecl *VD) { // Do not promote dependent variables since the cotr/dtor/initializer are // not determined. Do it after instantiation. if (getLangOpts().CUDAIsDevice && !VD->hasAttr() && @@ -869,88 +891,92 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) { } } -Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc, - unsigned DiagID) { +SemaBase::SemaDiagnosticBuilder SemaCUDA::DiagIfDeviceCode(SourceLocation Loc, + unsigned DiagID) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true); + FunctionDecl *CurFunContext = + SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); SemaDiagnosticBuilder::Kind DiagKind = [&] { if (!CurFunContext) return SemaDiagnosticBuilder::K_Nop; - switch (CurrentCUDATarget()) { - case CFT_Global: - case CFT_Device: + switch (CurrentTarget()) { + case CUDAFunctionTarget::Global: + case CUDAFunctionTarget::Device: return SemaDiagnosticBuilder::K_Immediate; - case CFT_HostDevice: + case CUDAFunctionTarget::HostDevice: // An HD function counts as host code if we're compiling for host, and // device code if we're compiling for device. Defer any errors in device // mode until the function is known-emitted. if (!getLangOpts().CUDAIsDevice) return SemaDiagnosticBuilder::K_Nop; - if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID)) + if (SemaRef.IsLastErrorImmediate && + getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID)) return SemaDiagnosticBuilder::K_Immediate; - return (getEmissionStatus(CurFunContext) == - FunctionEmissionStatus::Emitted) + return (SemaRef.getEmissionStatus(CurFunContext) == + Sema::FunctionEmissionStatus::Emitted) ? SemaDiagnosticBuilder::K_ImmediateWithCallStack : SemaDiagnosticBuilder::K_Deferred; default: return SemaDiagnosticBuilder::K_Nop; } }(); - return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this, + return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, SemaRef, DeviceDiagnosticReason::CudaDevice); } -Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc, +Sema::SemaDiagnosticBuilder SemaCUDA::DiagIfHostCode(SourceLocation Loc, unsigned DiagID) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true); + FunctionDecl *CurFunContext = + SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); SemaDiagnosticBuilder::Kind DiagKind = [&] { if (!CurFunContext) return SemaDiagnosticBuilder::K_Nop; - switch (CurrentCUDATarget()) { - case CFT_Host: + switch (CurrentTarget()) { + case CUDAFunctionTarget::Host: return SemaDiagnosticBuilder::K_Immediate; - case CFT_HostDevice: + case CUDAFunctionTarget::HostDevice: // An HD function counts as host code if we're compiling for host, and // device code if we're compiling for device. Defer any errors in device // mode until the function is known-emitted. if (getLangOpts().CUDAIsDevice) return SemaDiagnosticBuilder::K_Nop; - if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID)) + if (SemaRef.IsLastErrorImmediate && + getDiagnostics().getDiagnosticIDs()->isBuiltinNote(DiagID)) return SemaDiagnosticBuilder::K_Immediate; - return (getEmissionStatus(CurFunContext) == - FunctionEmissionStatus::Emitted) + return (SemaRef.getEmissionStatus(CurFunContext) == + Sema::FunctionEmissionStatus::Emitted) ? SemaDiagnosticBuilder::K_ImmediateWithCallStack : SemaDiagnosticBuilder::K_Deferred; default: return SemaDiagnosticBuilder::K_Nop; } }(); - return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this, + return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, SemaRef, DeviceDiagnosticReason::CudaHost); } -bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { +bool SemaCUDA::CheckCall(SourceLocation Loc, FunctionDecl *Callee) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); assert(Callee && "Callee may not be null."); - const auto &ExprEvalCtx = currentEvaluationContext(); + const auto &ExprEvalCtx = SemaRef.currentEvaluationContext(); if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated()) return true; // FIXME: Is bailing out early correct here? Should we instead assume that // the caller is a global initializer? - FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); + FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); if (!Caller) return true; // If the caller is known-emitted, mark the callee as known-emitted. // Otherwise, mark the call in our call graph so we can traverse it later. - bool CallerKnownEmitted = - getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted; + bool CallerKnownEmitted = SemaRef.getEmissionStatus(Caller) == + Sema::FunctionEmissionStatus::Emitted; SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee, CallerKnownEmitted] { - switch (IdentifyCUDAPreference(Caller, Callee)) { + switch (IdentifyPreference(Caller, Callee)) { case CFP_Never: case CFP_WrongSide: assert(Caller && "Never/wrongSide calls require a non-null caller"); @@ -967,7 +993,7 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { if (DiagKind == SemaDiagnosticBuilder::K_Nop) { // For -fgpu-rdc, keep track of external kernels used by host functions. - if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode && + if (getLangOpts().CUDAIsDevice && getLangOpts().GPURelocatableDeviceCode && Callee->hasAttr() && !Callee->isDefined() && (!Caller || (!Caller->getDescribedFunctionTemplate() && getASTContext().GetGVALinkageForFunction(Caller) == @@ -983,13 +1009,13 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second) return true; - SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this, - DeviceDiagnosticReason::CudaAll) - << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee - << IdentifyCUDATarget(Caller); + SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, + SemaRef, DeviceDiagnosticReason::CudaAll) + << llvm::to_underlying(IdentifyTarget(Callee)) << /*function*/ 0 + << Callee << llvm::to_underlying(IdentifyTarget(Caller)); if (!Callee->getBuiltinID()) SemaDiagnosticBuilder(DiagKind, Callee->getLocation(), - diag::note_previous_decl, Caller, *this, + diag::note_previous_decl, Caller, SemaRef, DeviceDiagnosticReason::CudaAll) << Callee; return DiagKind != SemaDiagnosticBuilder::K_Immediate && @@ -1001,7 +1027,7 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { // defined and uses the capture by reference when the lambda is called. When // the capture and use happen on different sides, the capture is invalid and // should be diagnosed. -void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee, +void SemaCUDA::CheckLambdaCapture(CXXMethodDecl *Callee, const sema::Capture &Capture) { // In host compilation we only need to check lambda functions emitted on host // side. In such lambda functions, a reference capture is invalid only @@ -1011,12 +1037,12 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee, // kernel cannot pass a lambda back to a host function since we cannot // define a kernel argument type which can hold the lambda before the lambda // itself is defined. - if (!LangOpts.CUDAIsDevice) + if (!getLangOpts().CUDAIsDevice) return; // File-scope lambda can only do init captures for global variables, which // results in passing by value for these global variables. - FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); + FunctionDecl *Caller = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true); if (!Caller) return; @@ -1033,7 +1059,7 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee, auto DiagKind = SemaDiagnosticBuilder::K_Deferred; if (Capture.isVariableCapture() && !getLangOpts().HIPStdPar) { SemaDiagnosticBuilder(DiagKind, Capture.getLocation(), - diag::err_capture_bad_target, Callee, *this, + diag::err_capture_bad_target, Callee, SemaRef, DeviceDiagnosticReason::CudaAll) << Capture.getVariable(); } else if (Capture.isThisCapture()) { @@ -1043,47 +1069,49 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee, // accessible on device side. SemaDiagnosticBuilder(DiagKind, Capture.getLocation(), diag::warn_maybe_capture_bad_target_this_ptr, Callee, - *this, DeviceDiagnosticReason::CudaAll); + SemaRef, DeviceDiagnosticReason::CudaAll); } } -void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) { +void SemaCUDA::SetLambdaAttrs(CXXMethodDecl *Method) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); if (Method->hasAttr() || Method->hasAttr()) return; - Method->addAttr(CUDADeviceAttr::CreateImplicit(Context)); - Method->addAttr(CUDAHostAttr::CreateImplicit(Context)); + Method->addAttr(CUDADeviceAttr::CreateImplicit(getASTContext())); + Method->addAttr(CUDAHostAttr::CreateImplicit(getASTContext())); } -void Sema::checkCUDATargetOverload(FunctionDecl *NewFD, +void SemaCUDA::checkTargetOverload(FunctionDecl *NewFD, const LookupResult &Previous) { assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); - CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD); + CUDAFunctionTarget NewTarget = IdentifyTarget(NewFD); for (NamedDecl *OldND : Previous) { FunctionDecl *OldFD = OldND->getAsFunction(); if (!OldFD) continue; - CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD); + CUDAFunctionTarget OldTarget = IdentifyTarget(OldFD); // Don't allow HD and global functions to overload other functions with the // same signature. We allow overloading based on CUDA attributes so that // functions can have different implementations on the host and device, but // HD/global functions "exist" in some sense on both the host and device, so // should have the same implementation on both sides. if (NewTarget != OldTarget && - ((NewTarget == CFT_HostDevice && - !(LangOpts.OffloadImplicitHostDeviceTemplates && - isCUDAImplicitHostDeviceFunction(NewFD) && - OldTarget == CFT_Device)) || - (OldTarget == CFT_HostDevice && - !(LangOpts.OffloadImplicitHostDeviceTemplates && - isCUDAImplicitHostDeviceFunction(OldFD) && - NewTarget == CFT_Device)) || - (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) && - !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false, - /* ConsiderCudaAttrs = */ false)) { + ((NewTarget == CUDAFunctionTarget::HostDevice && + !(getLangOpts().OffloadImplicitHostDeviceTemplates && + isImplicitHostDeviceFunction(NewFD) && + OldTarget == CUDAFunctionTarget::Device)) || + (OldTarget == CUDAFunctionTarget::HostDevice && + !(getLangOpts().OffloadImplicitHostDeviceTemplates && + isImplicitHostDeviceFunction(OldFD) && + NewTarget == CUDAFunctionTarget::Device)) || + (NewTarget == CUDAFunctionTarget::Global) || + (OldTarget == CUDAFunctionTarget::Global)) && + !SemaRef.IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false, + /* ConsiderCudaAttrs = */ false)) { Diag(NewFD->getLocation(), diag::err_cuda_ovl_target) - << NewTarget << NewFD->getDeclName() << OldTarget << OldFD; + << llvm::to_underlying(NewTarget) << NewFD->getDeclName() + << llvm::to_underlying(OldTarget) << OldFD; Diag(OldFD->getLocation(), diag::note_previous_declaration); NewFD->setInvalidDecl(); break; @@ -1101,21 +1129,21 @@ static void copyAttrIfPresent(Sema &S, FunctionDecl *FD, } } -void Sema::inheritCUDATargetAttrs(FunctionDecl *FD, +void SemaCUDA::inheritTargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD) { const FunctionDecl &TemplateFD = *TD.getTemplatedDecl(); - copyAttrIfPresent(*this, FD, TemplateFD); - copyAttrIfPresent(*this, FD, TemplateFD); - copyAttrIfPresent(*this, FD, TemplateFD); + copyAttrIfPresent(SemaRef, FD, TemplateFD); + copyAttrIfPresent(SemaRef, FD, TemplateFD); + copyAttrIfPresent(SemaRef, FD, TemplateFD); } -std::string Sema::getCudaConfigureFuncName() const { +std::string SemaCUDA::getConfigureFuncName() const { if (getLangOpts().HIP) return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration" : "hipConfigureCall"; // New CUDA kernel launch sequence. - if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(), + if (CudaFeatureEnabled(getASTContext().getTargetInfo().getSDKVersion(), CudaFeature::CUDA_USES_NEW_LAUNCH)) return "__cudaPushCallConfiguration"; diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp index bc4441f09028f..200b53da35f5f 100644 --- a/clang/lib/Sema/SemaCast.cpp +++ b/clang/lib/Sema/SemaCast.cpp @@ -24,6 +24,7 @@ #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaSYCL.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include @@ -155,7 +156,7 @@ namespace { Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange); } - void checkObjCConversion(Sema::CheckedConversionKind CCK) { + void checkObjCConversion(CheckedConversionKind CCK) { assert(Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers()); Expr *src = SrcExpr.get(); @@ -248,18 +249,14 @@ static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExp CastKind &Kind, CXXCastPath &BasePath); -static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, - QualType DestType, - Sema::CheckedConversionKind CCK, - SourceRange OpRange, - unsigned &msg, CastKind &Kind, - bool ListInitialization); +static TryCastResult +TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, + CheckedConversionKind CCK, SourceRange OpRange, + unsigned &msg, CastKind &Kind, bool ListInitialization); static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr, - QualType DestType, - Sema::CheckedConversionKind CCK, - SourceRange OpRange, - unsigned &msg, CastKind &Kind, - CXXCastPath &BasePath, + QualType DestType, CheckedConversionKind CCK, + SourceRange OpRange, unsigned &msg, + CastKind &Kind, CXXCastPath &BasePath, bool ListInitialization); static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, bool CStyle, @@ -499,10 +496,22 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT, howManyCandidates = OCD_AmbiguousCandidates; break; - case OR_Deleted: - msg = diag::err_ovl_deleted_conversion_in_cast; - howManyCandidates = OCD_ViableCandidates; - break; + case OR_Deleted: { + OverloadCandidateSet::iterator Best; + [[maybe_unused]] OverloadingResult Res = + candidates.BestViableFunction(S, range.getBegin(), Best); + assert(Res == OR_Deleted && "Inconsistent overload resolution"); + + StringLiteral *Msg = Best->Function->getDeletedMessage(); + candidates.NoteCandidates( + PartialDiagnosticAt(range.getBegin(), + S.PDiag(diag::err_ovl_deleted_conversion_in_cast) + << CT << srcType << destType << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) + << range << src->getSourceRange()), + S, OCD_ViableCandidates, src); + return true; + } } candidates.NoteCandidates( @@ -1212,7 +1221,7 @@ void CastOperation::CheckReinterpretCast() { if (isValidCast(tcr)) { if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers()) - checkObjCConversion(Sema::CCK_OtherCast); + checkObjCConversion(CheckedConversionKind::OtherCast); DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange); if (unsigned DiagID = checkCastFunctionType(Self, SrcExpr, DestType)) @@ -1263,9 +1272,9 @@ void CastOperation::CheckStaticCast() { } unsigned msg = diag::err_bad_cxx_cast_generic; - TryCastResult tcr - = TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg, - Kind, BasePath, /*ListInitialization=*/false); + TryCastResult tcr = + TryStaticCast(Self, SrcExpr, DestType, CheckedConversionKind::OtherCast, + OpRange, msg, Kind, BasePath, /*ListInitialization=*/false); if (tcr != TC_Success && msg != 0) { if (SrcExpr.isInvalid()) return; @@ -1285,7 +1294,7 @@ void CastOperation::CheckStaticCast() { if (Kind == CK_BitCast) checkCastAlign(); if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers()) - checkObjCConversion(Sema::CCK_OtherCast); + checkObjCConversion(CheckedConversionKind::OtherCast); } else { SrcExpr = ExprError(); } @@ -1306,14 +1315,13 @@ static bool IsAddressSpaceConversion(QualType SrcType, QualType DestType) { /// possible. If @p CStyle, ignore access restrictions on hierarchy casting /// and casting away constness. static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr, - QualType DestType, - Sema::CheckedConversionKind CCK, + QualType DestType, CheckedConversionKind CCK, SourceRange OpRange, unsigned &msg, CastKind &Kind, CXXCastPath &BasePath, bool ListInitialization) { // Determine whether we have the semantics of a C-style cast. - bool CStyle - = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast); + bool CStyle = (CCK == CheckedConversionKind::CStyleCast || + CCK == CheckedConversionKind::FunctionalCast); // The order the tests is not entirely arbitrary. There is one conversion // that can be handled in two different ways. Given: @@ -1873,11 +1881,11 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType, /// /// An expression e can be explicitly converted to a type T using a /// @c static_cast if the declaration "T t(e);" is well-formed [...]. -TryCastResult -TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, - Sema::CheckedConversionKind CCK, - SourceRange OpRange, unsigned &msg, - CastKind &Kind, bool ListInitialization) { +TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, + QualType DestType, + CheckedConversionKind CCK, + SourceRange OpRange, unsigned &msg, + CastKind &Kind, bool ListInitialization) { if (DestType->isRecordType()) { if (Self.RequireCompleteType(OpRange.getBegin(), DestType, diag::err_bad_cast_incomplete) || @@ -1889,13 +1897,14 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, } InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType); - InitializationKind InitKind - = (CCK == Sema::CCK_CStyleCast) - ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange, - ListInitialization) - : (CCK == Sema::CCK_FunctionalCast) - ? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization) - : InitializationKind::CreateCast(OpRange); + InitializationKind InitKind = + (CCK == CheckedConversionKind::CStyleCast) + ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange, + ListInitialization) + : (CCK == CheckedConversionKind::FunctionalCast) + ? InitializationKind::CreateFunctionalCast(OpRange, + ListInitialization) + : InitializationKind::CreateCast(OpRange); Expr *SrcExprRaw = SrcExpr.get(); // FIXME: Per DR242, we should check for an implicit conversion sequence // or for a constructor that could be invoked by direct-initialization @@ -1907,8 +1916,8 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, // There is no other way that works. // On the other hand, if we're checking a C-style cast, we've still got // the reinterpret_cast way. - bool CStyle - = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast); + bool CStyle = (CCK == CheckedConversionKind::CStyleCast || + CCK == CheckedConversionKind::FunctionalCast); if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType())) return TC_NotApplicable; @@ -2634,8 +2643,8 @@ static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr, Qualifiers SrcQ = SrcPointeeType.getQualifiers(); Qualifiers DestQ = DestPointeeType.getQualifiers(); if (!DestQ.isAddressSpaceSupersetOf(SrcQ) && OpRange.isValid()) { - Self.SYCLDiagIfDeviceCode(OpRange.getBegin(), - diag::warn_sycl_potentially_invalid_as_cast) + Self.SYCL().DiagIfDeviceCode(OpRange.getBegin(), + diag::warn_sycl_potentially_invalid_as_cast) << SrcType << DestType << OpRange; } } @@ -2820,8 +2829,9 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, if (isValidCast(tcr)) Kind = CK_NoOp; - Sema::CheckedConversionKind CCK = - FunctionalStyle ? Sema::CCK_FunctionalCast : Sema::CCK_CStyleCast; + CheckedConversionKind CCK = FunctionalStyle + ? CheckedConversionKind::FunctionalCast + : CheckedConversionKind::CStyleCast; if (tcr == TC_NotApplicable) { tcr = TryAddressSpaceCast(Self, SrcExpr, DestType, /*CStyle*/ true, msg, Kind, OpRange); @@ -3207,7 +3217,7 @@ void CastOperation::CheckCStyleCast() { // ARC imposes extra restrictions on casts. if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers()) { - checkObjCConversion(Sema::CCK_CStyleCast); + checkObjCConversion(CheckedConversionKind::CStyleCast); if (SrcExpr.isInvalid()) return; diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 8268d7b8d6c80..5f06593f3de2f 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -62,6 +62,7 @@ #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaSYCL.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" @@ -625,7 +626,7 @@ struct BuiltinDumpStructGenerator { for (auto *D : RD->decls()) { auto *IFD = dyn_cast(D); auto *FD = IFD ? IFD->getAnonField() : dyn_cast(D); - if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) + if (!FD || FD->isUnnamedBitField() || FD->isAnonymousStructOrUnion()) continue; llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); @@ -3276,6 +3277,17 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, if (BuiltinCountZeroBitsGeneric(*this, TheCall)) return ExprError(); break; + + case Builtin::BI__builtin_allow_runtime_check: { + Expr *Arg = TheCall->getArg(0); + // Check if the argument is a string literal. + if (!isa(Arg->IgnoreParenImpCasts())) { + Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) + << Arg->getSourceRange(); + return ExprError(); + } + break; + } } if (getLangOpts().HLSL && CheckHLSLBuiltinFunctionCall(BuiltinID, TheCall)) @@ -3295,8 +3307,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, // Detect when host builtins are used in device code only if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(TheCall->getBeginLoc(), - diag::err_builtin_target_unsupported); + SYCL().DiagIfDeviceCode(TheCall->getBeginLoc(), + diag::err_builtin_target_unsupported); } else { if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, TheCall)) @@ -7823,7 +7835,7 @@ bool Sema::CheckIntelSYCLAllocaBuiltinFunctionCall(unsigned, CallExpr *Call) { return true; Ty = Ty->getPointeeType(); return !(Ty.getQualifiers().empty() && - isSyclType(Ty, SYCLTypeAttr::kernel_handler)); + SemaSYCL::isSyclType(Ty, SYCLTypeAttr::kernel_handler)); }; if (CheckArg(FD->getParamDecl(0)->getType())) { Diag(Loc, diag::err_intel_sycl_alloca_wrong_arg) @@ -7835,7 +7847,7 @@ bool Sema::CheckIntelSYCLAllocaBuiltinFunctionCall(unsigned, CallExpr *Call) { // sycl::access::address_space::private_space, DecoratedAddress>`: // - `ET`: cv-unqualified trivial type constexpr auto CheckType = [](QualType RT, const ASTContext &Ctx) { - if (!isSyclType(RT, SYCLTypeAttr::multi_ptr)) + if (!SemaSYCL::isSyclType(RT, SYCLTypeAttr::multi_ptr)) return true; // Check element type const TemplateArgumentList &TAL = @@ -7860,7 +7872,7 @@ bool Sema::CheckIntelSYCLAllocaBuiltinFunctionCall(unsigned, CallExpr *Call) { if (Ty.isNull() || !Ty->isReferenceType()) return true; Ty = Ty->getPointeeType(); - if (!isSyclType(Ty, SYCLTypeAttr::specialization_id)) + if (!SemaSYCL::isSyclType(Ty, SYCLTypeAttr::specialization_id)) return true; const TemplateArgumentList &TAL = cast(Ty->getAsCXXRecordDecl()) @@ -7880,7 +7892,6 @@ bool Sema::CheckIntelSYCLAllocaBuiltinFunctionCall(unsigned, CallExpr *Call) { return false; } - /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo /// parameter with the FormatAttr's correct format_idx and firstDataArg. /// Returns true when the format fits the function and the FormatStringInfo has @@ -8246,7 +8257,6 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, // For variadic functions, we may have more args than parameters. // For some K&R functions, we may have less args than parameters. const auto N = std::min(Proto->getNumParams(), Args.size()); - bool AnyScalableArgsOrRet = Proto->getReturnType()->isSizelessVectorType(); for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { // Args[ArgIdx] can be null in malformed code. if (const Expr *Arg = Args[ArgIdx]) { @@ -8260,8 +8270,6 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, checkAIXMemberAlignment((Arg->getExprLoc()), Arg); QualType ParamTy = Proto->getParamType(ArgIdx); - if (ParamTy->isSizelessVectorType()) - AnyScalableArgsOrRet = true; QualType ArgTy = Arg->getType(); CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), ArgTy, ParamTy); @@ -8282,23 +8290,6 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, } } - // If the call requires a streaming-mode change and has scalable vector - // arguments or return values, then warn the user that the streaming and - // non-streaming vector lengths may be different. - const auto *CallerFD = dyn_cast(CurContext); - if (CallerFD && (!FD || !FD->getBuiltinID()) && AnyScalableArgsOrRet) { - bool IsCalleeStreaming = - ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask; - bool IsCalleeStreamingCompatible = - ExtInfo.AArch64SMEAttributes & - FunctionType::SME_PStateSMCompatibleMask; - ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD); - if (!IsCalleeStreamingCompatible && - (CallerFnType == ArmStreamingCompatible || - ((CallerFnType == ArmStreaming) ^ IsCalleeStreaming))) - Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming); - } - FunctionType::ArmStateValue CalleeArmZAState = FunctionType::getArmZAState(ExtInfo.AArch64SMEAttributes); FunctionType::ArmStateValue CalleeArmZT0State = @@ -8307,7 +8298,7 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, CalleeArmZT0State != FunctionType::ARM_None) { bool CallerHasZAState = false; bool CallerHasZT0State = false; - if (CallerFD) { + if (const auto *CallerFD = dyn_cast(CurContext)) { auto *Attr = CallerFD->getAttr(); if (Attr && Attr->isNewZA()) CallerHasZAState = true; @@ -8361,13 +8352,13 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); if (FD && FD->hasAttr()) - CheckSYCLKernelCall(FD, Args); + SYCL().CheckSYCLKernelCall(FD, Args); // Diagnose variadic calls in SYCL. if (FD && FD->isVariadic() && getLangOpts().SYCLIsDevice && - !isUnevaluatedContext() && !isDeclAllowedInSYCLDeviceCode(FD)) - SYCLDiagIfDeviceCode(Loc, diag::err_sycl_restrict) - << Sema::KernelCallVariadicFunction; + !isUnevaluatedContext() && !SYCL().isDeclAllowedInSYCLDeviceCode(FD)) + SYCL().DiagIfDeviceCode(Loc, diag::err_sycl_restrict) + << SemaSYCL::KernelCallVariadicFunction; } /// CheckConstructorCall - Check a constructor call for correctness and safety @@ -16572,7 +16563,8 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) { if (S.getLangOpts().SYCLIsDevice) - S.SYCLDiagIfDeviceCode(CC, diag::warn_imp_float_size_conversion); + S.SYCL().DiagIfDeviceCode(CC, + diag::warn_imp_float_size_conversion); else DiagnoseImpCast(S, E, T, CC, diag::warn_imp_float_size_conversion); @@ -16588,7 +16580,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, // warning. if (S.Diags.isIgnored(diag::warn_impcast_float_precision, CC)) { if (S.getLangOpts().SYCLIsDevice) - S.SYCLDiagIfDeviceCode(CC, diag::warn_imp_float_size_conversion); + S.SYCL().DiagIfDeviceCode(CC, diag::warn_imp_float_size_conversion); else DiagnoseImpCast(S, E, T, CC, diag::warn_imp_float_size_conversion); } @@ -20033,6 +20025,27 @@ bool Sema::IsLayoutCompatible(QualType T1, QualType T2) const { return isLayoutCompatible(getASTContext(), T1, T2); } +//===-------------- Pointer interconvertibility ----------------------------// + +bool Sema::IsPointerInterconvertibleBaseOf(const TypeSourceInfo *Base, + const TypeSourceInfo *Derived) { + QualType BaseT = Base->getType()->getCanonicalTypeUnqualified(); + QualType DerivedT = Derived->getType()->getCanonicalTypeUnqualified(); + + if (BaseT->isStructureOrClassType() && DerivedT->isStructureOrClassType() && + getASTContext().hasSameType(BaseT, DerivedT)) + return true; + + if (!IsDerivedFrom(Derived->getTypeLoc().getBeginLoc(), DerivedT, BaseT)) + return false; + + // Per [basic.compound]/4.3, containing object has to be standard-layout. + if (DerivedT->getAsCXXRecordDecl()->isStandardLayout()) + return true; + + return false; +} + //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// /// Given a type tag expression find the type tag itself. diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp index 83ebcaf9e765a..c335017f243eb 100644 --- a/clang/lib/Sema/SemaCodeComplete.cpp +++ b/clang/lib/Sema/SemaCodeComplete.cpp @@ -3691,7 +3691,7 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl( std::string Keyword; if (Idx > StartParameter) Result.AddChunk(CodeCompletionString::CK_HorizontalSpace); - if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx)) + if (const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx)) Keyword += II->getName(); Keyword += ":"; if (Idx < StartParameter || AllParametersAreInformative) @@ -3720,7 +3720,7 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl( Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(), ParamType); Arg += ParamType.getAsString(Policy) + ")"; - if (IdentifierInfo *II = (*P)->getIdentifier()) + if (const IdentifierInfo *II = (*P)->getIdentifier()) if (DeclaringEntity || AllParametersAreInformative) Arg += II->getName(); } @@ -4500,11 +4500,11 @@ void Sema::CodeCompleteOrdinaryName(Scope *S, Results.data(), Results.size()); } -static void AddClassMessageCompletions(Sema &SemaRef, Scope *S, - ParsedType Receiver, - ArrayRef SelIdents, - bool AtArgumentExpression, bool IsSuper, - ResultBuilder &Results); +static void +AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver, + ArrayRef SelIdents, + bool AtArgumentExpression, bool IsSuper, + ResultBuilder &Results); void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, @@ -4928,7 +4928,7 @@ void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E, /// The set of properties that have already been added, referenced by /// property name. -typedef llvm::SmallPtrSet AddedPropertiesSet; +typedef llvm::SmallPtrSet AddedPropertiesSet; /// Retrieve the container definition, if any? static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) { @@ -5090,7 +5090,7 @@ AddObjCProperties(const CodeCompletionContext &CCContext, PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema()); // Adds a method result const auto AddMethod = [&](const ObjCMethodDecl *M) { - IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0); + const IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0); if (!Name) return; if (!AddedProperties.insert(Name).second) @@ -5859,10 +5859,10 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, } void Sema::CodeCompleteObjCClassPropertyRefExpr(Scope *S, - IdentifierInfo &ClassName, + const IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement) { - IdentifierInfo *ClassNamePtr = &ClassName; + const IdentifierInfo *ClassNamePtr = &ClassName; ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(ClassNamePtr, ClassNameLoc); if (!IFace) return; @@ -7527,7 +7527,7 @@ enum ObjCMethodKind { }; static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind, - ArrayRef SelIdents, + ArrayRef SelIdents, bool AllowSameLength = true) { unsigned NumSelIdents = SelIdents.size(); if (NumSelIdents > Sel.getNumArgs()) @@ -7554,7 +7554,7 @@ static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind, static bool isAcceptableObjCMethod(ObjCMethodDecl *Method, ObjCMethodKind WantKind, - ArrayRef SelIdents, + ArrayRef SelIdents, bool AllowSameLength = true) { return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents, AllowSameLength); @@ -7586,7 +7586,7 @@ typedef llvm::SmallPtrSet VisitedSelectorSet; /// \param Results the structure into which we'll add results. static void AddObjCMethods(ObjCContainerDecl *Container, bool WantInstanceMethods, ObjCMethodKind WantKind, - ArrayRef SelIdents, + ArrayRef SelIdents, DeclContext *CurContext, VisitedSelectorSet &Selectors, bool AllowSameLength, ResultBuilder &Results, bool InOriginalClass = true, @@ -7819,7 +7819,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) { if (Sel.isNull()) return nullptr; - IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0); if (!Id) return nullptr; @@ -7895,7 +7895,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) { /// this "super" completion. If NULL, no completion was added. static ObjCMethodDecl * AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword, - ArrayRef SelIdents, + ArrayRef SelIdents, ResultBuilder &Results) { ObjCMethodDecl *CurMethod = S.getCurMethodDecl(); if (!CurMethod) @@ -8032,9 +8032,9 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) { Results.data(), Results.size()); } -void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, - ArrayRef SelIdents, - bool AtArgumentExpression) { +void Sema::CodeCompleteObjCSuperMessage( + Scope *S, SourceLocation SuperLoc, + ArrayRef SelIdents, bool AtArgumentExpression) { ObjCInterfaceDecl *CDecl = nullptr; if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) { // Figure out which interface we're in. @@ -8059,7 +8059,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, } else { // "super" may be the name of a type or variable. Figure out which // it is. - IdentifierInfo *Super = getSuperIdentifier(); + const IdentifierInfo *Super = getSuperIdentifier(); NamedDecl *ND = LookupSingleName(S, Super, SuperLoc, LookupOrdinaryName); if ((CDecl = dyn_cast_or_null(ND))) { // "super" names an interface. Use it. @@ -8127,11 +8127,11 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results, return PreferredType; } -static void AddClassMessageCompletions(Sema &SemaRef, Scope *S, - ParsedType Receiver, - ArrayRef SelIdents, - bool AtArgumentExpression, bool IsSuper, - ResultBuilder &Results) { +static void +AddClassMessageCompletions(Sema &SemaRef, Scope *S, ParsedType Receiver, + ArrayRef SelIdents, + bool AtArgumentExpression, bool IsSuper, + ResultBuilder &Results) { typedef CodeCompletionResult Result; ObjCInterfaceDecl *CDecl = nullptr; @@ -8202,10 +8202,9 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S, Results.ExitScope(); } -void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, - ArrayRef SelIdents, - bool AtArgumentExpression, - bool IsSuper) { +void Sema::CodeCompleteObjCClassMessage( + Scope *S, ParsedType Receiver, ArrayRef SelIdents, + bool AtArgumentExpression, bool IsSuper) { QualType T = this->GetTypeFromParser(Receiver); @@ -8237,10 +8236,9 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, Results.data(), Results.size()); } -void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, - ArrayRef SelIdents, - bool AtArgumentExpression, - ObjCInterfaceDecl *Super) { +void Sema::CodeCompleteObjCInstanceMessage( + Scope *S, Expr *Receiver, ArrayRef SelIdents, + bool AtArgumentExpression, ObjCInterfaceDecl *Super) { typedef CodeCompletionResult Result; Expr *RecExpr = static_cast(Receiver); @@ -8410,8 +8408,8 @@ void Sema::CodeCompleteObjCForCollection(Scope *S, CodeCompleteExpression(S, Data); } -void Sema::CodeCompleteObjCSelector(Scope *S, - ArrayRef SelIdents) { +void Sema::CodeCompleteObjCSelector( + Scope *S, ArrayRef SelIdents) { // If we have an external source, load the entire class method // pool from the AST file. if (ExternalSource) { @@ -9166,8 +9164,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // Add -(void)getKey:(type **)buffer range:(NSRange)inRange if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("get") + UpperKey).str(); - IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), - &Context.Idents.get("range")}; + const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), + &Context.Idents.get("range")}; if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) { if (ReturnType.isNull()) { @@ -9198,8 +9196,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str(); - IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"), - &Context.Idents.get(SelectorName)}; + const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"), + &Context.Idents.get(SelectorName)}; if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) { if (ReturnType.isNull()) { @@ -9228,8 +9226,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("insert") + UpperKey).str(); - IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), - &Context.Idents.get("atIndexes")}; + const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), + &Context.Idents.get("atIndexes")}; if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) { if (ReturnType.isNull()) { @@ -9258,7 +9256,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("removeObjectFrom") + UpperKey + "AtIndex").str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9279,7 +9277,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // -(void)removeKeyAtIndexes:(NSIndexSet *)indexes if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("remove") + UpperKey + "AtIndexes").str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9301,8 +9299,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("replaceObjectIn") + UpperKey + "AtIndex").str(); - IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), - &Context.Idents.get("withObject")}; + const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName), + &Context.Idents.get("withObject")}; if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) { if (ReturnType.isNull()) { @@ -9332,8 +9330,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, std::string SelectorName1 = (Twine("replace") + UpperKey + "AtIndexes").str(); std::string SelectorName2 = (Twine("with") + UpperKey).str(); - IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1), - &Context.Idents.get(SelectorName2)}; + const IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1), + &Context.Idents.get(SelectorName2)}; if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) { if (ReturnType.isNull()) { @@ -9368,7 +9366,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, ->getInterfaceDecl() ->getName() == "NSEnumerator"))) { std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId)) .second) { if (ReturnType.isNull()) { @@ -9387,7 +9385,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, if (IsInstanceMethod && (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) { std::string SelectorName = (Twine("memberOf") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9417,7 +9415,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("add") + UpperKey + Twine("Object")).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9439,7 +9437,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // - (void)addKey:(NSSet *)objects if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("add") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9461,7 +9459,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("remove") + UpperKey + Twine("Object")).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9483,7 +9481,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // - (void)removeKey:(NSSet *)objects if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("remove") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9504,7 +9502,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, // - (void)intersectKey:(NSSet *)objects if (IsInstanceMethod && ReturnTypeMatchesVoid) { std::string SelectorName = (Twine("intersect") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) { if (ReturnType.isNull()) { Builder.AddChunk(CodeCompletionString::CK_LeftParen); @@ -9533,7 +9531,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, ->getName() == "NSSet"))) { std::string SelectorName = (Twine("keyPathsForValuesAffecting") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId)) .second) { if (ReturnType.isNull()) { @@ -9554,7 +9552,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property, ReturnType->isBooleanType())) { std::string SelectorName = (Twine("automaticallyNotifiesObserversOf") + UpperKey).str(); - IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); + const IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName); if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId)) .second) { if (ReturnType.isNull()) { @@ -9749,7 +9747,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, void Sema::CodeCompleteObjCMethodDeclSelector( Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnTy, - ArrayRef SelIdents) { + ArrayRef SelIdents) { // If we have an external source, load the entire class method // pool from the AST file. if (ExternalSource) { diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index a5b0ebff72482..816725efa32d6 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -45,8 +45,13 @@ #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" +#include "clang/Sema/SemaHLSL.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaOpenMP.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/Template.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/TargetParser/Triple.h" @@ -2318,7 +2323,7 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) { /// /// \returns The declaration of the named Objective-C class, or NULL if the /// class could not be found. -ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id, +ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(const IdentifierInfo *&Id, SourceLocation IdLoc, bool DoTypoCorrection) { // The third "scope" argument is 0 since we aren't enabling lazy built-in @@ -3030,10 +3035,10 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D, else if (const auto *A = dyn_cast(Attr)) NewAttr = S.MergeSYCLReqdWorkGroupSizeAttr(D, *A); else if (const auto *NT = dyn_cast(Attr)) - NewAttr = - S.mergeHLSLNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), NT->getZ()); + NewAttr = S.HLSL().mergeNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), + NT->getZ()); else if (const auto *SA = dyn_cast(Attr)) - NewAttr = S.mergeHLSLShaderAttr(D, *SA, SA->getType()); + NewAttr = S.HLSL().mergeShaderAttr(D, *SA, SA->getType()); else if (isa(Attr)) // Do nothing. Each redeclaration should be suppressed separately. NewAttr = nullptr; @@ -3091,7 +3096,7 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) { if (isa(NewAttribute) || isa(NewAttribute)) { if (FunctionDecl *FD = dyn_cast(New)) { - Sema::SkipBodyInfo SkipBody; + SkipBodyInfo SkipBody; S.CheckForFunctionRedefinition(FD, cast(Def), &SkipBody); // If we're skipping this definition, drop the "alias" attribute. @@ -4106,13 +4111,13 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S, } else { Diag(NewMethod->getLocation(), diag::err_definition_of_implicitly_declared_member) - << New << getSpecialMember(OldMethod); + << New << llvm::to_underlying(getSpecialMember(OldMethod)); return true; } } else if (OldMethod->getFirstDecl()->isExplicitlyDefaulted() && !isFriend) { Diag(NewMethod->getLocation(), diag::err_definition_of_explicitly_defaulted_member) - << getSpecialMember(OldMethod); + << llvm::to_underlying(getSpecialMember(OldMethod)); return true; } } @@ -5428,7 +5433,7 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef, Scope *S, LookupResult R(SemaRef, Name, NameLoc, Owner->isRecord() ? Sema::LookupMemberName : Sema::LookupOrdinaryName, - Sema::ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); if (!SemaRef.LookupName(R, S)) return false; // Pick a representative declaration. @@ -6223,11 +6228,12 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) { // Check if we are in an `omp begin/end declare variant` scope. Handle this // declaration only if the `bind_to_declaration` extension is set. SmallVector Bases; - if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope()) - if (getOMPTraitInfoForSurroundingScope()->isExtensionActive(llvm::omp::TraitProperty:: - implementation_extension_bind_to_declaration)) - ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( - S, D, MultiTemplateParamsArg(), Bases); + if (LangOpts.OpenMP && OpenMP().isInOpenMPDeclareVariantScope()) + if (OpenMP().getOMPTraitInfoForSurroundingScope()->isExtensionActive( + llvm::omp::TraitProperty:: + implementation_extension_bind_to_declaration)) + OpenMP().ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( + S, D, MultiTemplateParamsArg(), Bases); Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg()); @@ -6236,7 +6242,8 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) { Dcl->setTopLevelDeclInObjCContainer(); if (!Bases.empty()) - ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases); + OpenMP().ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, + Bases); return Dcl; } @@ -6393,16 +6400,15 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, if (TST->isDependentType() && TST->isTypeAlias()) Diag(Loc, diag::ext_alias_template_in_declarative_nns) << SpecLoc.getLocalSourceRange(); - } else if (T->isDecltypeType()) { + } else if (T->isDecltypeType() || T->getAsAdjusted()) { // C++23 [expr.prim.id.qual]p2: // [...] A declarative nested-name-specifier shall not have a - // decltype-specifier. + // computed-type-specifier. // - // FIXME: This wording appears to be defective as it does not forbid - // declarative nested-name-specifiers with pack-index-specifiers. - // See https://github.com/cplusplus/CWG/issues/499. - Diag(Loc, diag::err_decltype_in_declarator) - << SpecLoc.getTypeLoc().getSourceRange(); + // CWG2858 changed this from 'decltype-specifier' to + // 'computed-type-specifier'. + Diag(Loc, diag::err_computed_type_in_declarative_nns) + << T->isDecltypeType() << SpecLoc.getTypeLoc().getSourceRange(); } } } while ((SpecLoc = SpecLoc.getPrefix())); @@ -6523,7 +6529,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D, if (IsLinkageLookup) { Previous.clear(LookupRedeclarationWithLinkage); - Previous.setRedeclarationKind(ForExternalRedeclaration); + Previous.setRedeclarationKind( + RedeclarationKind::ForExternalRedeclaration); } LookupName(Previous, S, CreateBuiltins); @@ -6624,8 +6631,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D, if (New->getDeclName() && AddToScope) PushOnScopeChains(New, S); - if (isInOpenMPDeclareTargetContext()) - checkDeclIsAllowedInOpenMPTarget(nullptr, New); + if (OpenMP().isInOpenMPDeclareTargetContext()) + OpenMP().checkDeclIsAllowedInOpenMPTarget(nullptr, New); return New; } @@ -7972,13 +7979,13 @@ NamedDecl *Sema::ActOnVariableDeclarator( if (getLangOpts().SYCLIsDevice) { // device_global array is not allowed. if (const ArrayType *AT = getASTContext().getAsArrayType(NewVD->getType())) - if (isTypeDecoratedWithDeclAttribute( + if (SYCL().isTypeDecoratedWithDeclAttribute( AT->getElementType())) Diag(NewVD->getLocation(), diag::err_sycl_device_global_array); // Global variables with types decorated with device_global attribute must // be static if they are declared in SYCL device code. - if (isTypeDecoratedWithDeclAttribute( + if (SYCL().isTypeDecoratedWithDeclAttribute( NewVD->getType())) { if (SCSpec == DeclSpec::SCS_static) { const DeclContext *DC = NewVD->getDeclContext(); @@ -8000,10 +8007,10 @@ NamedDecl *Sema::ActOnVariableDeclarator( // constexpr unless their types are decorated with global_variable_allowed // attribute. if (SCSpec == DeclSpec::SCS_static && !R.isConstant(Context) && - !isTypeDecoratedWithDeclAttribute( + !SYCL().isTypeDecoratedWithDeclAttribute( NewVD->getType())) - SYCLDiagIfDeviceCode(D.getIdentifierLoc(), diag::err_sycl_restrict) - << Sema::KernelNonConstStaticDataVariable; + SYCL().DiagIfDeviceCode(D.getIdentifierLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelNonConstStaticDataVariable; } switch (D.getDeclSpec().getConstexprSpecifier()) { @@ -8168,8 +8175,8 @@ NamedDecl *Sema::ActOnVariableDeclarator( if (!Context.getTargetInfo().isValidGCCRegisterName(Label) && DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl())) { if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(E->getExprLoc(), - diag::err_asm_unknown_register_name) + SYCL().DiagIfDeviceCode(E->getExprLoc(), + diag::err_asm_unknown_register_name) << Label; else Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label; @@ -8362,7 +8369,7 @@ NamedDecl *Sema::ActOnVariableDeclarator( if (IsMemberSpecialization && !NewVD->isInvalidDecl()) CompleteMemberSpecialization(NewVD, Previous); - addSyclVarDecl(NewVD); + SYCL().addSyclVarDecl(NewVD); emitReadOnlyPlacementAttrWarning(*this, NewVD); return NewVD; @@ -8618,7 +8625,8 @@ void Sema::CheckShadow(Scope *S, VarDecl *D) { return; LookupResult R(*this, D->getDeclName(), D->getLocation(), - Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration); + Sema::LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration); LookupName(R, S); if (NamedDecl *ShadowedDecl = getShadowedDeclaration(D, R)) CheckShadow(D, ShadowedDecl, R); @@ -9258,7 +9266,7 @@ static NamedDecl *DiagnoseInvalidRedeclaration( LookupResult Prev(SemaRef, Name, NewFD->getLocation(), IsLocalFriend ? Sema::LookupLocalFriendName : Sema::LookupOrdinaryName, - Sema::ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); NewFD->setInvalidDecl(); if (IsLocalFriend) @@ -10696,12 +10704,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, // We do not add HD attributes to specializations here because // they may have different constexpr-ness compared to their - // templates and, after maybeAddCUDAHostDeviceAttrs() is applied, + // templates and, after maybeAddHostDeviceAttrs() is applied, // may end up with different effective targets. Instead, a // specialization inherits its target attributes from its template // in the CheckFunctionTemplateSpecialization() call below. if (getLangOpts().CUDA && !isFunctionTemplateSpecialization) - maybeAddCUDAHostDeviceAttrs(NewFD, Previous); + CUDA().maybeAddHostDeviceAttrs(NewFD, Previous); // Handle explict specializations of function templates // and friend function declarations with an explicit @@ -10911,10 +10919,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (getLangOpts().HLSL && D.isFunctionDefinition()) { // Any top level function could potentially be specified as an entry. if (!NewFD->isInvalidDecl() && S->getDepth() == 0 && Name.isIdentifier()) - ActOnHLSLTopLevelFunction(NewFD); + HLSL().ActOnTopLevelFunction(NewFD); if (NewFD->hasAttr()) - CheckHLSLEntryPoint(NewFD); + HLSL().CheckEntryPoint(NewFD); } // If this is the first declaration of a library builtin function, add @@ -10999,12 +11007,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (getLangOpts().CUDA) { IdentifierInfo *II = NewFD->getIdentifier(); - if (II && II->isStr(getCudaConfigureFuncName()) && + if (II && II->isStr(CUDA().getConfigureFuncName()) && !NewFD->isInvalidDecl() && NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) { if (!R->castAs()->getReturnType()->isScalarType()) Diag(NewFD->getLocation(), diag::err_config_scalar_return) - << getCudaConfigureFuncName(); + << CUDA().getConfigureFuncName(); Context.setcudaConfigureCallDecl(NewFD); } @@ -12010,8 +12018,14 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD, return false; } + const llvm::Triple &T = S.getASTContext().getTargetInfo().getTriple(); + // Target attribute on AArch64 is not used for multiversioning - if (NewTA && S.getASTContext().getTargetInfo().getTriple().isAArch64()) + if (NewTA && T.isAArch64()) + return false; + + // Target attribute on RISCV is not used for multiversioning + if (NewTA && T.isRISCV()) return false; if (!OldDecl || !OldDecl->getAsFunction() || @@ -12358,7 +12372,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, } if (LangOpts.OpenMP) - ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(NewFD); + OpenMP().ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(NewFD); // Semantic checking for this function declaration (in isolation). @@ -12489,26 +12503,16 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, } if (!Redeclaration && LangOpts.CUDA) - checkCUDATargetOverload(NewFD, Previous); + CUDA().checkTargetOverload(NewFD, Previous); } // Check if the function definition uses any AArch64 SME features without - // having the '+sme' feature enabled and warn user if sme locally streaming - // function returns or uses arguments with VL-based types. + // having the '+sme' feature enabled. if (DeclIsDefn) { const auto *Attr = NewFD->getAttr(); bool UsesSM = NewFD->hasAttr(); bool UsesZA = Attr && Attr->isNewZA(); bool UsesZT0 = Attr && Attr->isNewZT0(); - - if (NewFD->hasAttr()) { - if (NewFD->getReturnType()->isSizelessVectorType() || - llvm::any_of(NewFD->parameters(), [](ParmVarDecl *P) { - return P->getOriginalType()->isSizelessVectorType(); - })) - Diag(NewFD->getLocation(), - diag::warn_sme_locally_streaming_has_vl_args_returns); - } if (const auto *FPT = NewFD->getType()->getAs()) { FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); UsesSM |= @@ -12758,126 +12762,7 @@ void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) { } } -void Sema::ActOnHLSLTopLevelFunction(FunctionDecl *FD) { - auto &TargetInfo = getASTContext().getTargetInfo(); - - if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry) - return; - - StringRef Env = TargetInfo.getTriple().getEnvironmentName(); - HLSLShaderAttr::ShaderType ShaderType; - if (HLSLShaderAttr::ConvertStrToShaderType(Env, ShaderType)) { - if (const auto *Shader = FD->getAttr()) { - // The entry point is already annotated - check that it matches the - // triple. - if (Shader->getType() != ShaderType) { - Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch) - << Shader; - FD->setInvalidDecl(); - } - } else { - // Implicitly add the shader attribute if the entry function isn't - // explicitly annotated. - FD->addAttr(HLSLShaderAttr::CreateImplicit(Context, ShaderType, - FD->getBeginLoc())); - } - } else { - switch (TargetInfo.getTriple().getEnvironment()) { - case llvm::Triple::UnknownEnvironment: - case llvm::Triple::Library: - break; - default: - llvm_unreachable("Unhandled environment in triple"); - } - } -} - -void Sema::CheckHLSLEntryPoint(FunctionDecl *FD) { - const auto *ShaderAttr = FD->getAttr(); - assert(ShaderAttr && "Entry point has no shader attribute"); - HLSLShaderAttr::ShaderType ST = ShaderAttr->getType(); - - switch (ST) { - case HLSLShaderAttr::Pixel: - case HLSLShaderAttr::Vertex: - case HLSLShaderAttr::Geometry: - case HLSLShaderAttr::Hull: - case HLSLShaderAttr::Domain: - case HLSLShaderAttr::RayGeneration: - case HLSLShaderAttr::Intersection: - case HLSLShaderAttr::AnyHit: - case HLSLShaderAttr::ClosestHit: - case HLSLShaderAttr::Miss: - case HLSLShaderAttr::Callable: - if (const auto *NT = FD->getAttr()) { - DiagnoseHLSLAttrStageMismatch(NT, ST, - {HLSLShaderAttr::Compute, - HLSLShaderAttr::Amplification, - HLSLShaderAttr::Mesh}); - FD->setInvalidDecl(); - } - break; - - case HLSLShaderAttr::Compute: - case HLSLShaderAttr::Amplification: - case HLSLShaderAttr::Mesh: - if (!FD->hasAttr()) { - Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads) - << HLSLShaderAttr::ConvertShaderTypeToStr(ST); - FD->setInvalidDecl(); - } - break; - } - - for (ParmVarDecl *Param : FD->parameters()) { - if (const auto *AnnotationAttr = Param->getAttr()) { - CheckHLSLSemanticAnnotation(FD, Param, AnnotationAttr); - } else { - // FIXME: Handle struct parameters where annotations are on struct fields. - // See: https://github.com/llvm/llvm-project/issues/57875 - Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation); - Diag(Param->getLocation(), diag::note_previous_decl) << Param; - FD->setInvalidDecl(); - } - } - // FIXME: Verify return type semantic annotation. -} - -void Sema::CheckHLSLSemanticAnnotation( - FunctionDecl *EntryPoint, const Decl *Param, - const HLSLAnnotationAttr *AnnotationAttr) { - auto *ShaderAttr = EntryPoint->getAttr(); - assert(ShaderAttr && "Entry point has no shader attribute"); - HLSLShaderAttr::ShaderType ST = ShaderAttr->getType(); - - switch (AnnotationAttr->getKind()) { - case attr::HLSLSV_DispatchThreadID: - case attr::HLSLSV_GroupIndex: - if (ST == HLSLShaderAttr::Compute) - return; - DiagnoseHLSLAttrStageMismatch(AnnotationAttr, ST, - {HLSLShaderAttr::Compute}); - break; - default: - llvm_unreachable("Unknown HLSLAnnotationAttr"); - } -} - -void Sema::DiagnoseHLSLAttrStageMismatch( - const Attr *A, HLSLShaderAttr::ShaderType Stage, - std::initializer_list AllowedStages) { - SmallVector StageStrings; - llvm::transform(AllowedStages, std::back_inserter(StageStrings), - [](HLSLShaderAttr::ShaderType ST) { - return StringRef( - HLSLShaderAttr::ConvertShaderTypeToStr(ST)); - }); - Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage) - << A << HLSLShaderAttr::ConvertShaderTypeToStr(Stage) - << (AllowedStages.size() != 1) << join(StageStrings, ", "); -} - -bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) { +bool Sema::CheckForConstantInitializer(Expr *Init, unsigned DiagID) { // FIXME: Need strict checking. In C89, we need to check for // any assignment, increment, decrement, function-calls, or // commas outside of a sizeof. In C99, it's the same list, @@ -12895,8 +12780,7 @@ bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) { const Expr *Culprit; if (Init->isConstantInitializer(Context, false, &Culprit)) return false; - Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant) - << Culprit->getSourceRange(); + Diag(Culprit->getExprLoc(), DiagID) << Culprit->getSourceRange(); return true; } @@ -13853,7 +13737,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) { } // In the SYCL explicit SIMD extension non constant "private globals" can't // be explicitly initialized in the declaration. - if (isSYCLEsimdPrivateGlobal(VDecl)) { + if (SYCL().isSYCLEsimdPrivateGlobal(VDecl)) { Diag(VDecl->getLocation(), diag::err_esimd_glob_cant_init); VDecl->setInvalidDecl(); return; @@ -14021,29 +13905,24 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) { // OpenCL v1.2 s6.5.3: __constant locals must be constant-initialized. // This is true even in C++ for OpenCL. } else if (VDecl->getType().getAddressSpace() == LangAS::opencl_constant) { - CheckForConstantInitializer(Init, DclT); + CheckForConstantInitializer(Init); - // Otherwise, C++ does not restrict the initializer. + // Otherwise, C++ does not restrict the initializer. } else if (getLangOpts().CPlusPlus) { // do nothing // C99 6.7.8p4: All the expressions in an initializer for an object that has // static storage duration shall be constant expressions or string literals. } else if (VDecl->getStorageClass() == SC_Static) { - CheckForConstantInitializer(Init, DclT); + CheckForConstantInitializer(Init); - // C89 is stricter than C99 for aggregate initializers. - // C89 6.5.7p3: All the expressions [...] in an initializer list - // for an object that has aggregate or union type shall be - // constant expressions. + // C89 is stricter than C99 for aggregate initializers. + // C89 6.5.7p3: All the expressions [...] in an initializer list + // for an object that has aggregate or union type shall be + // constant expressions. } else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() && isa(Init)) { - const Expr *Culprit; - if (!Init->isConstantInitializer(Context, false, &Culprit)) { - Diag(Culprit->getExprLoc(), - diag::ext_aggregate_init_not_constant) - << Culprit->getSourceRange(); - } + CheckForConstantInitializer(Init, diag::ext_aggregate_init_not_constant); } if (auto *E = dyn_cast(Init)) @@ -14176,7 +14055,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) { // Avoid duplicate diagnostics for constexpr variables. if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl() && !VDecl->isConstexpr()) - CheckForConstantInitializer(Init, DclT); + CheckForConstantInitializer(Init); } QualType InitType = Init->getType(); @@ -14510,7 +14389,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) { return; // In SYCL explicit SIMD extension "private global" variables can't be // initialized even implicitly, so don't synthesize an implicit initializer. - if (isSYCLEsimdPrivateGlobal(Var)) + if (SYCL().isSYCLEsimdPrivateGlobal(Var)) return; // C++03 [dcl.init]p9: @@ -14637,7 +14516,7 @@ StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { if (var->isInvalidDecl()) return; - MaybeAddCUDAConstantAttr(var); + CUDA().MaybeAddConstantAttr(var); if (getLangOpts().OpenCL) { // OpenCL v2.0 s6.12.5 - Every block variable declaration must have an @@ -14652,7 +14531,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { } if (getLangOpts().SYCLIsDevice) - checkSYCLDeviceVarDecl(var); + SYCL().checkSYCLDeviceVarDecl(var); // In Objective-C, don't allow jumps past the implicit initialization of a // local retaining variable. @@ -15054,7 +14933,7 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) { // variables whether they are local or not. CUDA also allows // constant initializers for __constant__ and __device__ variables. if (getLangOpts().CUDA) - checkAllowedCUDAInitializer(VD); + CUDA().checkAllowedInitializer(VD); // Grab the dllimport or dllexport attribute off of the VarDecl. const InheritableAttr *DLLAttr = getDLLAttr(VD); @@ -15180,7 +15059,7 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, if (auto *VD = dyn_cast(D); LangOpts.OpenMP && VD && VD->hasAttr() && VD->hasGlobalStorage()) - ActOnOpenMPDeclareTargetInitializer(D); + OpenMP().ActOnOpenMPDeclareTargetInitializer(D); // For declarators, there are some additional syntactic-ish checks we need // to perform. if (auto *DD = dyn_cast(D)) { @@ -15420,10 +15299,10 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D, QualType parmDeclType = TInfo->getType(); // Check for redeclaration of parameters, e.g. int foo(int x, int x); - IdentifierInfo *II = D.getIdentifier(); + const IdentifierInfo *II = D.getIdentifier(); if (II) { LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(R, S); if (!R.empty()) { NamedDecl *PrevDecl = *R.begin(); @@ -15572,9 +15451,9 @@ QualType Sema::AdjustParameterTypeForObjCAutoRefCount(QualType T, } ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc, - SourceLocation NameLoc, IdentifierInfo *Name, - QualType T, TypeSourceInfo *TSInfo, - StorageClass SC) { + SourceLocation NameLoc, + const IdentifierInfo *Name, QualType T, + TypeSourceInfo *TSInfo, StorageClass SC) { // In ARC, infer a lifetime qualifier for appropriate parameter types. if (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_None && @@ -15719,8 +15598,8 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D, // specialization function under the OpenMP context defined as part of the // `omp begin declare variant`. SmallVector Bases; - if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope()) - ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( + if (LangOpts.OpenMP && OpenMP().isInOpenMPDeclareVariantScope()) + OpenMP().ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( ParentScope, D, TemplateParameterLists, Bases); D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition); @@ -15728,7 +15607,8 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D, Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody, BodyKind); if (!Bases.empty()) - ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases); + OpenMP().ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, + Bases); return Dcl; } @@ -16307,7 +16187,17 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, // This is meant to pop the context added in ActOnStartOfFunctionDef(). ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD)); if (FD) { - FD->setBody(Body); + // If this is called by Parser::ParseFunctionDefinition() after marking + // the declaration as deleted, and if the deleted-function-body contains + // a message (C++26), then a DefaultedOrDeletedInfo will have already been + // added to store that message; do not overwrite it in that case. + // + // Since this would always set the body to 'nullptr' in that case anyway, + // which is already done when the function decl is initially created, + // always skipping this irrespective of whether there is a delete message + // should not be a problem. + if (!FD->isDeletedAsWritten()) + FD->setBody(Body); FD->setWillHaveBody(false); CheckImmediateEscalatingFunctionDefinition(FD, FSI); @@ -17646,7 +17536,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, RedeclarationKind Redecl = forRedeclarationInCurContext(); if (TUK == TUK_Friend || TUK == TUK_Reference) - Redecl = NotForRedeclaration; + Redecl = RedeclarationKind::NotForRedeclaration; /// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C /// implemented asks for structural equivalence checking, the returned decl @@ -18666,8 +18556,9 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) { // Note that FieldName may be null for anonymous bitfields. ExprResult Sema::VerifyBitField(SourceLocation FieldLoc, - IdentifierInfo *FieldName, QualType FieldTy, - bool IsMsStruct, Expr *BitWidth) { + const IdentifierInfo *FieldName, + QualType FieldTy, bool IsMsStruct, + Expr *BitWidth) { assert(BitWidth); if (BitWidth->containsErrors()) return ExprError(); @@ -18776,7 +18667,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record, return nullptr; } - IdentifierInfo *II = D.getIdentifier(); + const IdentifierInfo *II = D.getIdentifier(); SourceLocation Loc = DeclStart; if (II) Loc = D.getIdentifierLoc(); @@ -18806,7 +18697,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record, // Check to see if this name was declared as a member previously NamedDecl *PrevDecl = nullptr; LookupResult Previous(*this, II, Loc, LookupMemberName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S); switch (Previous.getResultKind()) { case LookupResult::Found: @@ -18877,7 +18768,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D) { - IdentifierInfo *II = Name.getAsIdentifierInfo(); + const IdentifierInfo *II = Name.getAsIdentifierInfo(); bool InvalidDecl = false; if (D) InvalidDecl = D->isInvalidType(); @@ -19074,22 +18965,22 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) { // because otherwise we'll never get complaints about // copy constructors. - CXXSpecialMember member = CXXInvalid; + CXXSpecialMemberKind member = CXXSpecialMemberKind::Invalid; // We're required to check for any non-trivial constructors. Since the // implicit default constructor is suppressed if there are any // user-declared constructors, we just need to check that there is a // trivial default constructor and a trivial copy constructor. (We don't // worry about move constructors here, since this is a C++98 check.) if (RDecl->hasNonTrivialCopyConstructor()) - member = CXXCopyConstructor; + member = CXXSpecialMemberKind::CopyConstructor; else if (!RDecl->hasTrivialDefaultConstructor()) - member = CXXDefaultConstructor; + member = CXXSpecialMemberKind::DefaultConstructor; else if (RDecl->hasNonTrivialCopyAssignment()) - member = CXXCopyAssignment; + member = CXXSpecialMemberKind::CopyAssignment; else if (RDecl->hasNonTrivialDestructor()) - member = CXXDestructor; + member = CXXSpecialMemberKind::Destructor; - if (member != CXXInvalid) { + if (member != CXXSpecialMemberKind::Invalid) { if (!getLangOpts().CPlusPlus11 && getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) { // Objective-C++ ARC: it is an error to have a non-trivial field of @@ -19106,10 +18997,13 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) { } } - Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ? - diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member : - diag::err_illegal_union_or_anon_struct_member) - << FD->getParent()->isUnion() << FD->getDeclName() << member; + Diag( + FD->getLocation(), + getLangOpts().CPlusPlus11 + ? diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member + : diag::err_illegal_union_or_anon_struct_member) + << FD->getParent()->isUnion() << FD->getDeclName() + << llvm::to_underlying(member); DiagnoseNontrivial(RDecl, member); return !getLangOpts().CPlusPlus11; } @@ -19137,7 +19031,7 @@ TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) { Decl *Sema::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitWidth, tok::ObjCKeywordKind Visibility) { - IdentifierInfo *II = D.getIdentifier(); + const IdentifierInfo *II = D.getIdentifier(); SourceLocation Loc = DeclStart; if (II) Loc = D.getIdentifierLoc(); @@ -19207,8 +19101,9 @@ Decl *Sema::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, NewID->setInvalidDecl(); if (II) { - NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName, - ForVisibleRedeclaration); + NamedDecl *PrevDecl = + LookupSingleName(S, II, Loc, LookupMemberName, + RedeclarationKind::ForVisibleRedeclaration); if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S) && !isa(PrevDecl)) { Diag(Loc, diag::err_duplicate_member) << II; @@ -19359,10 +19254,10 @@ static void ComputeSelectedDestructor(Sema &S, CXXRecordDecl *Record) { static bool AreSpecialMemberFunctionsSameKind(ASTContext &Context, CXXMethodDecl *M1, CXXMethodDecl *M2, - Sema::CXXSpecialMember CSM) { + CXXSpecialMemberKind CSM) { // We don't want to compare templates to non-templates: See // https://github.com/llvm/llvm-project/issues/59206 - if (CSM == Sema::CXXDefaultConstructor) + if (CSM == CXXSpecialMemberKind::DefaultConstructor) return bool(M1->getDescribedFunctionTemplate()) == bool(M2->getDescribedFunctionTemplate()); // FIXME: better resolve CWG @@ -19385,7 +19280,7 @@ static bool AreSpecialMemberFunctionsSameKind(ASTContext &Context, /// [CWG2595], if any, are satisfied is more constrained. static void SetEligibleMethods(Sema &S, CXXRecordDecl *Record, ArrayRef Methods, - Sema::CXXSpecialMember CSM) { + CXXSpecialMemberKind CSM) { SmallVector SatisfactionStatus; for (CXXMethodDecl *Method : Methods) { @@ -19443,7 +19338,8 @@ static void SetEligibleMethods(Sema &S, CXXRecordDecl *Record, // DR1734 and DR1496. if (!AnotherMethodIsMoreConstrained) { Method->setIneligibleOrNotSelected(false); - Record->addedEligibleSpecialMemberFunction(Method, 1 << CSM); + Record->addedEligibleSpecialMemberFunction(Method, + 1 << llvm::to_underlying(CSM)); } } } @@ -19482,13 +19378,15 @@ static void ComputeSpecialMemberFunctionsEligiblity(Sema &S, } SetEligibleMethods(S, Record, DefaultConstructors, - Sema::CXXDefaultConstructor); - SetEligibleMethods(S, Record, CopyConstructors, Sema::CXXCopyConstructor); - SetEligibleMethods(S, Record, MoveConstructors, Sema::CXXMoveConstructor); + CXXSpecialMemberKind::DefaultConstructor); + SetEligibleMethods(S, Record, CopyConstructors, + CXXSpecialMemberKind::CopyConstructor); + SetEligibleMethods(S, Record, MoveConstructors, + CXXSpecialMemberKind::MoveConstructor); SetEligibleMethods(S, Record, CopyAssignmentOperators, - Sema::CXXCopyAssignment); + CXXSpecialMemberKind::CopyAssignment); SetEligibleMethods(S, Record, MoveAssignmentOperators, - Sema::CXXMoveAssignment); + CXXSpecialMemberKind::MoveAssignment); } void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, @@ -19747,6 +19645,13 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, // Okay, we successfully defined 'Record'. if (Record) { bool Completed = false; + if (S) { + Scope *Parent = S->getParent(); + if (Parent && Parent->isTypeAliasScope() && + Parent->isTemplateParamScope()) + Record->setInvalidDecl(); + } + if (CXXRecord) { if (!CXXRecord->isInvalidDecl()) { // Set access bits correctly on the directly-declared conversions. @@ -19857,7 +19762,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, if (CXXRecord) { auto *Dtor = CXXRecord->getDestructor(); if (Dtor && Dtor->isImplicit() && - ShouldDeleteSpecialMember(Dtor, CXXDestructor)) { + ShouldDeleteSpecialMember(Dtor, CXXSpecialMemberKind::Destructor)) { CXXRecord->setImplicitDestructorIsDeleted(); SetDeclDeleted(Dtor, CXXRecord->getLocation()); } @@ -19894,7 +19799,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, E = Record->field_end(); (NonBitFields == 0 || ZeroSize) && I != E; ++I) { IsEmpty = false; - if (I->isUnnamedBitfield()) { + if (I->isUnnamedBitField()) { if (!I->isZeroLengthBitField(Context)) ZeroSize = false; } else { @@ -20210,7 +20115,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum, Val, EnumVal); } -Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, +SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc) { if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) || !getLangOpts().CPlusPlus) @@ -20250,7 +20155,8 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst, // Verify that there isn't already something declared with this name in this // scope. - LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, ForVisibleRedeclaration); + LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration); LookupName(R, S); NamedDecl *PrevDecl = R.getAsSingle(); @@ -20876,7 +20782,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD, return FunctionEmissionStatus::OMPDiscarded; // If we have an explicit value for the device type, or we are in a target // declare context, we need to emit all extern and used symbols. - if (isInOpenMPDeclareTargetContext() || DevTy) + if (OpenMP().isInOpenMPDeclareTargetContext() || DevTy) if (IsEmittedForExternalSymbol()) return FunctionEmissionStatus::Emitted; // Device mode only emits what it must, if it wasn't tagged yet and needed, @@ -20902,11 +20808,11 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD, // when compiling for host, device and global functions are never emitted. // (Technically, we do emit a host-side stub for global functions, but this // doesn't count for our purposes here.) - Sema::CUDAFunctionTarget T = IdentifyCUDATarget(FD); - if (LangOpts.CUDAIsDevice && T == Sema::CFT_Host) + CUDAFunctionTarget T = CUDA().IdentifyTarget(FD); + if (LangOpts.CUDAIsDevice && T == CUDAFunctionTarget::Host) return FunctionEmissionStatus::CUDADiscarded; if (!LangOpts.CUDAIsDevice && - (T == Sema::CFT_Device || T == Sema::CFT_Global)) + (T == CUDAFunctionTarget::Device || T == CUDAFunctionTarget::Global)) return FunctionEmissionStatus::CUDADiscarded; if (IsEmittedForExternalSymbol()) @@ -20944,5 +20850,5 @@ bool Sema::shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee) { // for host, only HD functions actually called from the host get marked as // known-emitted. return LangOpts.CUDA && !LangOpts.CUDAIsDevice && - IdentifyCUDATarget(Callee) == CFT_Global; + CUDA().IdentifyTarget(Callee) == CUDAFunctionTarget::Global; } diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index 94bbca32063cc..6a43d7142b3aa 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -39,9 +39,14 @@ #include "clang/Sema/ParsedAttr.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" +#include "clang/Sema/SemaHLSL.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaSYCL.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/Demangle/Demangle.h" #include "llvm/IR/Assumptions.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/Support/Error.h" @@ -1056,6 +1061,21 @@ static void handleErrorAttr(Sema &S, Decl *D, const ParsedAttr &AL) { D->addAttr(EA); } +static void handleExcludeFromExplicitInstantiationAttr(Sema &S, Decl *D, + const ParsedAttr &AL) { + const auto *PD = isa(D) + ? cast(D) + : D->getDeclContext()->getRedeclContext(); + if (const auto *RD = dyn_cast(PD); RD && RD->isLocalClass()) { + S.Diag(AL.getLoc(), + diag::warn_attribute_exclude_from_explicit_instantiation_local_class) + << AL << /*IsMember=*/!isa(D); + return; + } + D->addAttr(::new (S.Context) + ExcludeFromExplicitInstantiationAttr(S.Context, AL)); +} + namespace { /// Determines if a given Expr references any of the given function's /// ParmVarDecls, or the function's implicit `this` parameter (if applicable). @@ -2056,6 +2076,38 @@ static void handleWeakRefAttr(Sema &S, Decl *D, const ParsedAttr &AL) { D->addAttr(::new (S.Context) WeakRefAttr(S.Context, AL)); } +// Mark alias/ifunc target as used. Due to name mangling, we look up the +// demangled name ignoring parameters (not supported by microsoftDemangle +// https://github.com/llvm/llvm-project/issues/88825). This should handle the +// majority of use cases while leaving namespace scope names unmarked. +static void markUsedForAliasOrIfunc(Sema &S, Decl *D, const ParsedAttr &AL, + StringRef Str) { + std::unique_ptr Demangled; + if (S.getASTContext().getCXXABIKind() != TargetCXXABI::Microsoft) + Demangled.reset(llvm::itaniumDemangle(Str, /*ParseParams=*/false)); + std::unique_ptr MC(S.Context.createMangleContext()); + SmallString<256> Name; + + const DeclarationNameInfo Target( + &S.Context.Idents.get(Demangled ? Demangled.get() : Str), AL.getLoc()); + LookupResult LR(S, Target, Sema::LookupOrdinaryName); + if (S.LookupName(LR, S.TUScope)) { + for (NamedDecl *ND : LR) { + if (!isa(ND) && !isa(ND)) + continue; + if (MC->shouldMangleDeclName(ND)) { + llvm::raw_svector_ostream Out(Name); + Name.clear(); + MC->mangleName(GlobalDecl(ND), Out); + } else { + Name = ND->getIdentifier()->getName(); + } + if (Name == Str) + ND->markUsed(S.Context); + } + } +} + static void handleIFuncAttr(Sema &S, Decl *D, const ParsedAttr &AL) { StringRef Str; if (!S.checkStringLiteralArgumentAttr(AL, 0, Str)) @@ -2068,6 +2120,7 @@ static void handleIFuncAttr(Sema &S, Decl *D, const ParsedAttr &AL) { return; } + markUsedForAliasOrIfunc(S, D, AL, Str); D->addAttr(::new (S.Context) IFuncAttr(S.Context, AL, Str)); } @@ -2102,17 +2155,7 @@ static void handleAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) { } } - // Mark target used to prevent unneeded-internal-declaration warnings. - if (!S.LangOpts.CPlusPlus) { - // FIXME: demangle Str for C++, as the attribute refers to the mangled - // linkage name, not the pre-mangled identifier. - const DeclarationNameInfo target(&S.Context.Idents.get(Str), AL.getLoc()); - LookupResult LR(S, target, Sema::LookupOrdinaryName); - if (S.LookupQualifiedName(LR, S.getCurLexicalContext())) - for (NamedDecl *ND : LR) - ND->markUsed(S.Context); - } - + markUsedForAliasOrIfunc(S, D, AL, Str); D->addAttr(::new (S.Context) AliasAttr(S.Context, AL, Str)); } @@ -6587,7 +6630,7 @@ static void handleSYCLDeviceAttr(Sema &S, Decl *D, const ParsedAttr &AL) { // Diagnose only for non-dependent types since dependent type don't have // attributes applied on them ATM. if (!VarType->isDependentType() && - !S.isTypeDecoratedWithDeclAttribute( + !S.SYCL().isTypeDecoratedWithDeclAttribute( VD->getType())) { S.Diag(AL.getLoc(), diag::err_sycl_attribute_not_device_global) << AL; return; @@ -6722,8 +6765,8 @@ static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) { return; } if (S.getLangOpts().CUDA && VD->hasLocalStorage() && - S.CUDADiagIfHostCode(AL.getLoc(), diag::err_cuda_host_shared) - << S.CurrentCUDATarget()) + S.CUDA().DiagIfHostCode(AL.getLoc(), diag::err_cuda_host_shared) + << llvm::to_underlying(S.CUDA().CurrentTarget())) return; D->addAttr(::new (S.Context) CUDASharedAttr(S.Context, AL)); } @@ -6812,8 +6855,9 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) { // Diagnostic is emitted elsewhere: here we store the (valid) AL // in the Decl node for syntactic reasoning, e.g., pretty-printing. CallingConv CC; - if (S.CheckCallingConvAttr(AL, CC, /*FD*/ nullptr, - S.IdentifyCUDATarget(dyn_cast(D)))) + if (S.CheckCallingConvAttr( + AL, CC, /*FD*/ nullptr, + S.CUDA().IdentifyTarget(dyn_cast(D)))) return; if (!isa(D)) { @@ -7117,22 +7161,22 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC, // on their host/device attributes. if (LangOpts.CUDA) { auto *Aux = Context.getAuxTargetInfo(); - assert(FD || CFT != CFT_InvalidTarget); - auto CudaTarget = FD ? IdentifyCUDATarget(FD) : CFT; + assert(FD || CFT != CUDAFunctionTarget::InvalidTarget); + auto CudaTarget = FD ? CUDA().IdentifyTarget(FD) : CFT; bool CheckHost = false, CheckDevice = false; switch (CudaTarget) { - case CFT_HostDevice: + case CUDAFunctionTarget::HostDevice: CheckHost = true; CheckDevice = true; break; - case CFT_Host: + case CUDAFunctionTarget::Host: CheckHost = true; break; - case CFT_Device: - case CFT_Global: + case CUDAFunctionTarget::Device: + case CUDAFunctionTarget::Global: CheckDevice = true; break; - case CFT_InvalidTarget: + case CUDAFunctionTarget::InvalidTarget: llvm_unreachable("unexpected cuda target"); } auto *HostTI = LangOpts.CUDAIsDevice ? Aux : &TI; @@ -7475,7 +7519,7 @@ static bool CheckValidFPGAMemoryAttributesVar(Sema &S, Decl *D) { if (!(isa(D) || (VD->getKind() != Decl::ImplicitParam && VD->getKind() != Decl::NonTypeTemplateParm && - (S.isTypeDecoratedWithDeclAttribute( + (S.SYCL().isTypeDecoratedWithDeclAttribute( VD->getType()) || VD->getType().isConstQualified() || VD->getType().getAddressSpace() == LangAS::opencl_constant || @@ -8170,7 +8214,7 @@ void Sema::AddSYCLIntelPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI, VD->getKind() != Decl::NonTypeTemplateParm && VD->getKind() != Decl::ParmVar && (VD->hasLocalStorage() || - isTypeDecoratedWithDeclAttribute( + SYCL().isTypeDecoratedWithDeclAttribute( VD->getType())))))) { Diag(CI.getLoc(), diag::err_fpga_attribute_invalid_decl) << CI; return; @@ -10159,24 +10203,11 @@ static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) { return; } - HLSLNumThreadsAttr *NewAttr = S.mergeHLSLNumThreadsAttr(D, AL, X, Y, Z); + HLSLNumThreadsAttr *NewAttr = S.HLSL().mergeNumThreadsAttr(D, AL, X, Y, Z); if (NewAttr) D->addAttr(NewAttr); } -HLSLNumThreadsAttr *Sema::mergeHLSLNumThreadsAttr(Decl *D, - const AttributeCommonInfo &AL, - int X, int Y, int Z) { - if (HLSLNumThreadsAttr *NT = D->getAttr()) { - if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) { - Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL; - Diag(AL.getLoc(), diag::note_conflicting_attribute); - } - return nullptr; - } - return ::new (Context) HLSLNumThreadsAttr(Context, AL, X, Y, Z); -} - static bool isLegalTypeForHLSLSV_DispatchThreadID(QualType T) { if (!T->hasUnsignedIntegerRepresentation()) return false; @@ -10220,24 +10251,11 @@ static void handleHLSLShaderAttr(Sema &S, Decl *D, const ParsedAttr &AL) { // FIXME: check function match the shader stage. - HLSLShaderAttr *NewAttr = S.mergeHLSLShaderAttr(D, AL, ShaderType); + HLSLShaderAttr *NewAttr = S.HLSL().mergeShaderAttr(D, AL, ShaderType); if (NewAttr) D->addAttr(NewAttr); } -HLSLShaderAttr * -Sema::mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL, - HLSLShaderAttr::ShaderType ShaderType) { - if (HLSLShaderAttr *NT = D->getAttr()) { - if (NT->getType() != ShaderType) { - Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL; - Diag(AL.getLoc(), diag::note_conflicting_attribute); - } - return nullptr; - } - return HLSLShaderAttr::Create(Context, ShaderType, AL); -} - static void handleHLSLResourceBindingAttr(Sema &S, Decl *D, const ParsedAttr &AL) { StringRef Space = "space0"; @@ -10312,34 +10330,13 @@ static void handleHLSLResourceBindingAttr(Sema &S, Decl *D, static void handleHLSLParamModifierAttr(Sema &S, Decl *D, const ParsedAttr &AL) { - HLSLParamModifierAttr *NewAttr = S.mergeHLSLParamModifierAttr( + HLSLParamModifierAttr *NewAttr = S.HLSL().mergeParamModifierAttr( D, AL, static_cast(AL.getSemanticSpelling())); if (NewAttr) D->addAttr(NewAttr); } -HLSLParamModifierAttr * -Sema::mergeHLSLParamModifierAttr(Decl *D, const AttributeCommonInfo &AL, - HLSLParamModifierAttr::Spelling Spelling) { - // We can only merge an `in` attribute with an `out` attribute. All other - // combinations of duplicated attributes are ill-formed. - if (HLSLParamModifierAttr *PA = D->getAttr()) { - if ((PA->isIn() && Spelling == HLSLParamModifierAttr::Keyword_out) || - (PA->isOut() && Spelling == HLSLParamModifierAttr::Keyword_in)) { - D->dropAttr(); - SourceRange AdjustedRange = {PA->getLocation(), AL.getRange().getEnd()}; - return HLSLParamModifierAttr::Create( - Context, /*MergedSpelling=*/true, AdjustedRange, - HLSLParamModifierAttr::Keyword_inout); - } - Diag(AL.getLoc(), diag::err_hlsl_duplicate_parameter_modifier) << AL; - Diag(PA->getLocation(), diag::note_conflicting_attribute); - return nullptr; - } - return HLSLParamModifierAttr::Create(Context, AL); -} - static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) { if (!S.LangOpts.CPlusPlus) { S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang) @@ -12427,6 +12424,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL, case ParsedAttr::AT_Error: handleErrorAttr(S, D, AL); break; + case ParsedAttr::AT_ExcludeFromExplicitInstantiation: + handleExcludeFromExplicitInstantiationAttr(S, D, AL); + break; case ParsedAttr::AT_DiagnoseIf: handleDiagnoseIfAttr(S, D, AL); break; diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 9c51bd128eaff..cfc6c3de53acd 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -42,10 +42,14 @@ #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaOpenMP.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/Template.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" @@ -657,13 +661,13 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, // is ill-formed. This can only happen for constructors. if (isa(New) && New->getMinRequiredArguments() < Old->getMinRequiredArguments()) { - CXXSpecialMember NewSM = getSpecialMember(cast(New)), - OldSM = getSpecialMember(cast(Old)); + CXXSpecialMemberKind NewSM = getSpecialMember(cast(New)), + OldSM = getSpecialMember(cast(Old)); if (NewSM != OldSM) { ParmVarDecl *NewParam = New->getParamDecl(New->getMinRequiredArguments()); assert(NewParam->hasDefaultArg()); Diag(NewParam->getLocation(), diag::err_default_arg_makes_ctor_special) - << NewParam->getDefaultArgRange() << NewSM; + << NewParam->getDefaultArgRange() << llvm::to_underlying(NewSM); Diag(Old->getLocation(), diag::note_previous_declaration); } } @@ -893,7 +897,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D, assert(VarName && "Cannot have an unnamed binding declaration"); LookupResult Previous(*this, NameInfo, LookupOrdinaryName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S, /*CreateBuiltins*/DC->getRedeclContext()->isTranslationUnit()); @@ -948,7 +952,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D, DeclarationNameInfo NameInfo((IdentifierInfo *)nullptr, Decomp.getLSquareLoc()); LookupResult Previous(*this, NameInfo, LookupOrdinaryName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); // Build the variable that holds the non-decomposed object. bool AddToScope = true; @@ -960,8 +964,8 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D, CurContext->addHiddenDecl(New); } - if (isInOpenMPDeclareTargetContext()) - checkDeclIsAllowedInOpenMPTarget(nullptr, New); + if (OpenMP().isInOpenMPDeclareTargetContext()) + OpenMP().checkDeclIsAllowedInOpenMPTarget(nullptr, New); return New; } @@ -1450,7 +1454,7 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef Bindings, auto DiagnoseBadNumberOfBindings = [&]() -> bool { unsigned NumFields = llvm::count_if( - RD->fields(), [](FieldDecl *FD) { return !FD->isUnnamedBitfield(); }); + RD->fields(), [](FieldDecl *FD) { return !FD->isUnnamedBitField(); }); assert(Bindings.size() != NumFields); S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings) << DecompType << (unsigned)Bindings.size() << NumFields << NumFields @@ -1463,7 +1467,7 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef Bindings, // E shall not have an anonymous union member, ... unsigned I = 0; for (auto *FD : RD->fields()) { - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; // All the non-static data members are required to be nameable, so they @@ -2064,7 +2068,7 @@ static bool CheckConstexprCtorInitializer(Sema &SemaRef, if (Field->isInvalidDecl()) return true; - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) return true; // Anonymous unions with no variant members and empty anonymous structs do not @@ -3776,7 +3780,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, // attribute is accessed. if (getLangOpts().SYCLIsDevice) { if (auto Value = dyn_cast(Member)) { - if (isTypeDecoratedWithDeclAttribute( + if (SYCL().isTypeDecoratedWithDeclAttribute( Value->getType())) { if (Value->getAccess() == AS_private || Value->getAccess() == AS_protected) { @@ -5533,7 +5537,7 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, // A declaration for a bit-field that omits the identifier declares an // unnamed bit-field. Unnamed bit-fields are not members and cannot be // initialized. - if (F->isUnnamedBitfield()) + if (F->isUnnamedBitField()) continue; // If we're not generating the implicit copy/move constructor, then we'll @@ -5662,7 +5666,7 @@ static void DiagnoseBaseOrMemInitializerOrder( // 3. Direct fields. for (auto *Field : ClassDecl->fields()) { - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; PopulateKeysForFields(Field, IdealInitKeys); @@ -6806,7 +6810,7 @@ void Sema::propagateDLLAttrToBaseClassTemplate( /// /// If the function is both a default constructor and a copy / move constructor /// (due to having a default argument for the first parameter), this picks -/// CXXDefaultConstructor. +/// CXXSpecialMemberKind::DefaultConstructor. /// /// FIXME: Check that case is properly handled by all callers. Sema::DefaultedFunctionKind @@ -6814,23 +6818,23 @@ Sema::getDefaultedFunctionKind(const FunctionDecl *FD) { if (auto *MD = dyn_cast(FD)) { if (const CXXConstructorDecl *Ctor = dyn_cast(FD)) { if (Ctor->isDefaultConstructor()) - return Sema::CXXDefaultConstructor; + return CXXSpecialMemberKind::DefaultConstructor; if (Ctor->isCopyConstructor()) - return Sema::CXXCopyConstructor; + return CXXSpecialMemberKind::CopyConstructor; if (Ctor->isMoveConstructor()) - return Sema::CXXMoveConstructor; + return CXXSpecialMemberKind::MoveConstructor; } if (MD->isCopyAssignmentOperator()) - return Sema::CXXCopyAssignment; + return CXXSpecialMemberKind::CopyAssignment; if (MD->isMoveAssignmentOperator()) - return Sema::CXXMoveAssignment; + return CXXSpecialMemberKind::MoveAssignment; if (isa(FD)) - return Sema::CXXDestructor; + return CXXSpecialMemberKind::Destructor; } switch (FD->getDeclName().getCXXOverloadedOperator()) { @@ -6870,26 +6874,26 @@ static void DefineDefaultedFunction(Sema &S, FunctionDecl *FD, return S.DefineDefaultedComparison(DefaultLoc, FD, DFK.asComparison()); switch (DFK.asSpecialMember()) { - case Sema::CXXDefaultConstructor: + case CXXSpecialMemberKind::DefaultConstructor: S.DefineImplicitDefaultConstructor(DefaultLoc, cast(FD)); break; - case Sema::CXXCopyConstructor: + case CXXSpecialMemberKind::CopyConstructor: S.DefineImplicitCopyConstructor(DefaultLoc, cast(FD)); break; - case Sema::CXXCopyAssignment: + case CXXSpecialMemberKind::CopyAssignment: S.DefineImplicitCopyAssignment(DefaultLoc, cast(FD)); break; - case Sema::CXXDestructor: + case CXXSpecialMemberKind::Destructor: S.DefineImplicitDestructor(DefaultLoc, cast(FD)); break; - case Sema::CXXMoveConstructor: + case CXXSpecialMemberKind::MoveConstructor: S.DefineImplicitMoveConstructor(DefaultLoc, cast(FD)); break; - case Sema::CXXMoveAssignment: + case CXXSpecialMemberKind::MoveAssignment: S.DefineImplicitMoveAssignment(DefaultLoc, cast(FD)); break; - case Sema::CXXInvalid: + case CXXSpecialMemberKind::Invalid: llvm_unreachable("Invalid special member."); } } @@ -7054,7 +7058,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { !Record->isLambda()) { bool Complained = false; for (const auto *F : Record->fields()) { - if (F->hasInClassInitializer() || F->isUnnamedBitfield()) + if (F->hasInClassInitializer() || F->isUnnamedBitField()) continue; if (F->getType()->isReferenceType() || @@ -7209,9 +7213,9 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { // For an explicitly defaulted or deleted special member, we defer // determining triviality until the class is complete. That time is now! - CXXSpecialMember CSM = getSpecialMember(M); + CXXSpecialMemberKind CSM = getSpecialMember(M); if (!M->isImplicit() && !M->isUserProvided()) { - if (CSM != CXXInvalid) { + if (CSM != CXXSpecialMemberKind::Invalid) { M->setTrivial(SpecialMemberIsTrivial(M, CSM)); // Inform the class that we've finished declaring this member. Record->finishedDefaultedOrDeletedMember(M); @@ -7224,8 +7228,10 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { // Set triviality for the purpose of calls if this is a user-provided // copy/move constructor or destructor. - if ((CSM == CXXCopyConstructor || CSM == CXXMoveConstructor || - CSM == CXXDestructor) && M->isUserProvided()) { + if ((CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::MoveConstructor || + CSM == CXXSpecialMemberKind::Destructor) && + M->isUserProvided()) { M->setTrivialForCall(HasTrivialABI); Record->setTrivialForCallFlags(M); } @@ -7234,8 +7240,9 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { M->hasAttr()) { if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) && M->isTrivial() && - (CSM == CXXDefaultConstructor || CSM == CXXCopyConstructor || - CSM == CXXDestructor)) + (CSM == CXXSpecialMemberKind::DefaultConstructor || + CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::Destructor)) M->dropAttr(); if (M->hasAttr()) { @@ -7247,8 +7254,8 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { // Define defaulted constexpr virtual functions that override a base class // function right away. // FIXME: We can defer doing this until the vtable is marked as used. - if (CSM != CXXInvalid && !M->isDeleted() && M->isDefaulted() && - M->isConstexpr() && M->size_overridden_methods()) + if (CSM != CXXSpecialMemberKind::Invalid && !M->isDeleted() && + M->isDefaulted() && M->isConstexpr() && M->size_overridden_methods()) DefineDefaultedFunction(*this, M, M->getLocation()); if (!Incomplete) @@ -7370,15 +7377,18 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { /// \param ConstRHS True if this is a copy operation with a const object /// on its RHS, that is, if the argument to the outer special member /// function is 'const' and this is not a field marked 'mutable'. -static Sema::SpecialMemberOverloadResult lookupCallFromSpecialMember( - Sema &S, CXXRecordDecl *Class, Sema::CXXSpecialMember CSM, - unsigned FieldQuals, bool ConstRHS) { +static Sema::SpecialMemberOverloadResult +lookupCallFromSpecialMember(Sema &S, CXXRecordDecl *Class, + CXXSpecialMemberKind CSM, unsigned FieldQuals, + bool ConstRHS) { unsigned LHSQuals = 0; - if (CSM == Sema::CXXCopyAssignment || CSM == Sema::CXXMoveAssignment) + if (CSM == CXXSpecialMemberKind::CopyAssignment || + CSM == CXXSpecialMemberKind::MoveAssignment) LHSQuals = FieldQuals; unsigned RHSQuals = FieldQuals; - if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor) + if (CSM == CXXSpecialMemberKind::DefaultConstructor || + CSM == CXXSpecialMemberKind::Destructor) RHSQuals = 0; else if (ConstRHS) RHSQuals |= Qualifiers::Const; @@ -7474,12 +7484,10 @@ class Sema::InheritedConstructorInfo { /// Is the special member function which would be selected to perform the /// specified operation on the specified class type a constexpr constructor? -static bool -specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl, - Sema::CXXSpecialMember CSM, unsigned Quals, - bool ConstRHS, - CXXConstructorDecl *InheritedCtor = nullptr, - Sema::InheritedConstructorInfo *Inherited = nullptr) { +static bool specialMemberIsConstexpr( + Sema &S, CXXRecordDecl *ClassDecl, CXXSpecialMemberKind CSM, unsigned Quals, + bool ConstRHS, CXXConstructorDecl *InheritedCtor = nullptr, + Sema::InheritedConstructorInfo *Inherited = nullptr) { // Suppress duplicate constraint checking here, in case a constraint check // caused us to decide to do this. Any truely recursive checks will get // caught during these checks anyway. @@ -7488,16 +7496,16 @@ specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl, // If we're inheriting a constructor, see if we need to call it for this base // class. if (InheritedCtor) { - assert(CSM == Sema::CXXDefaultConstructor); + assert(CSM == CXXSpecialMemberKind::DefaultConstructor); auto BaseCtor = Inherited->findConstructorForBase(ClassDecl, InheritedCtor).first; if (BaseCtor) return BaseCtor->isConstexpr(); } - if (CSM == Sema::CXXDefaultConstructor) + if (CSM == CXXSpecialMemberKind::DefaultConstructor) return ClassDecl->hasConstexprDefaultConstructor(); - if (CSM == Sema::CXXDestructor) + if (CSM == CXXSpecialMemberKind::Destructor) return ClassDecl->hasConstexprDestructor(); Sema::SpecialMemberOverloadResult SMOR = @@ -7512,8 +7520,8 @@ specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl, /// Determine whether the specified special member function would be constexpr /// if it were implicitly defined. static bool defaultedSpecialMemberIsConstexpr( - Sema &S, CXXRecordDecl *ClassDecl, Sema::CXXSpecialMember CSM, - bool ConstArg, CXXConstructorDecl *InheritedCtor = nullptr, + Sema &S, CXXRecordDecl *ClassDecl, CXXSpecialMemberKind CSM, bool ConstArg, + CXXConstructorDecl *InheritedCtor = nullptr, Sema::InheritedConstructorInfo *Inherited = nullptr) { if (!S.getLangOpts().CPlusPlus11) return false; @@ -7522,7 +7530,7 @@ static bool defaultedSpecialMemberIsConstexpr( // In the definition of a constexpr constructor [...] bool Ctor = true; switch (CSM) { - case Sema::CXXDefaultConstructor: + case CXXSpecialMemberKind::DefaultConstructor: if (Inherited) break; // Since default constructor lookup is essentially trivial (and cannot @@ -7533,23 +7541,23 @@ static bool defaultedSpecialMemberIsConstexpr( // constructor is constexpr to determine whether the type is a literal type. return ClassDecl->defaultedDefaultConstructorIsConstexpr(); - case Sema::CXXCopyConstructor: - case Sema::CXXMoveConstructor: + case CXXSpecialMemberKind::CopyConstructor: + case CXXSpecialMemberKind::MoveConstructor: // For copy or move constructors, we need to perform overload resolution. break; - case Sema::CXXCopyAssignment: - case Sema::CXXMoveAssignment: + case CXXSpecialMemberKind::CopyAssignment: + case CXXSpecialMemberKind::MoveAssignment: if (!S.getLangOpts().CPlusPlus14) return false; // In C++1y, we need to perform overload resolution. Ctor = false; break; - case Sema::CXXDestructor: + case CXXSpecialMemberKind::Destructor: return ClassDecl->defaultedDestructorIsConstexpr(); - case Sema::CXXInvalid: + case CXXSpecialMemberKind::Invalid: return false; } @@ -7561,7 +7569,7 @@ static bool defaultedSpecialMemberIsConstexpr( // will be initialized (if the constructor isn't deleted), we just don't know // which one. if (Ctor && ClassDecl->isUnion()) - return CSM == Sema::CXXDefaultConstructor + return CSM == CXXSpecialMemberKind::DefaultConstructor ? ClassDecl->hasInClassInitializer() || !ClassDecl->hasVariantMembers() : true; @@ -7602,7 +7610,8 @@ static bool defaultedSpecialMemberIsConstexpr( for (const auto *F : ClassDecl->fields()) { if (F->isInvalidDecl()) continue; - if (CSM == Sema::CXXDefaultConstructor && F->hasInClassInitializer()) + if (CSM == CXXSpecialMemberKind::DefaultConstructor && + F->hasInClassInitializer()) continue; QualType BaseType = S.Context.getBaseElementType(F->getType()); if (const RecordType *RecordTy = BaseType->getAs()) { @@ -7611,7 +7620,7 @@ static bool defaultedSpecialMemberIsConstexpr( BaseType.getCVRQualifiers(), ConstArg && !F->isMutable())) return false; - } else if (CSM == Sema::CXXDefaultConstructor) { + } else if (CSM == CXXSpecialMemberKind::DefaultConstructor) { return false; } } @@ -7642,9 +7651,10 @@ struct ComputingExceptionSpec { } static Sema::ImplicitExceptionSpecification -ComputeDefaultedSpecialMemberExceptionSpec( - Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM, - Sema::InheritedConstructorInfo *ICI); +ComputeDefaultedSpecialMemberExceptionSpec(Sema &S, SourceLocation Loc, + CXXMethodDecl *MD, + CXXSpecialMemberKind CSM, + Sema::InheritedConstructorInfo *ICI); static Sema::ImplicitExceptionSpecification ComputeDefaultedComparisonExceptionSpec(Sema &S, SourceLocation Loc, @@ -7668,7 +7678,7 @@ computeImplicitExceptionSpec(Sema &S, SourceLocation Loc, FunctionDecl *FD) { Sema::InheritedConstructorInfo ICI( S, Loc, CD->getInheritedConstructor().getShadowDecl()); return ComputeDefaultedSpecialMemberExceptionSpec( - S, Loc, CD, Sema::CXXDefaultConstructor, &ICI); + S, Loc, CD, CXXSpecialMemberKind::DefaultConstructor, &ICI); } static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S, @@ -7720,11 +7730,11 @@ void Sema::CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *FD) { } bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, - CXXSpecialMember CSM, + CXXSpecialMemberKind CSM, SourceLocation DefaultLoc) { CXXRecordDecl *RD = MD->getParent(); - assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid && + assert(MD->isExplicitlyDefaulted() && CSM != CXXSpecialMemberKind::Invalid && "not an explicitly-defaulted special member"); // Defer all checking for special members of a dependent type. @@ -7750,21 +7760,22 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus20 && First; bool ShouldDeleteForTypeMismatch = false; unsigned ExpectedParams = 1; - if (CSM == CXXDefaultConstructor || CSM == CXXDestructor) + if (CSM == CXXSpecialMemberKind::DefaultConstructor || + CSM == CXXSpecialMemberKind::Destructor) ExpectedParams = 0; if (MD->getNumExplicitParams() != ExpectedParams) { // This checks for default arguments: a copy or move constructor with a // default argument is classified as a default constructor, and assignment // operations and destructors can't have default arguments. Diag(MD->getLocation(), diag::err_defaulted_special_member_params) - << CSM << MD->getSourceRange(); + << llvm::to_underlying(CSM) << MD->getSourceRange(); HadError = true; } else if (MD->isVariadic()) { if (DeleteOnTypeMismatch) ShouldDeleteForTypeMismatch = true; else { Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic) - << CSM << MD->getSourceRange(); + << llvm::to_underlying(CSM) << MD->getSourceRange(); HadError = true; } } @@ -7772,13 +7783,14 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, const FunctionProtoType *Type = MD->getType()->castAs(); bool CanHaveConstParam = false; - if (CSM == CXXCopyConstructor) + if (CSM == CXXSpecialMemberKind::CopyConstructor) CanHaveConstParam = RD->implicitCopyConstructorHasConstParam(); - else if (CSM == CXXCopyAssignment) + else if (CSM == CXXSpecialMemberKind::CopyAssignment) CanHaveConstParam = RD->implicitCopyAssignmentHasConstParam(); QualType ReturnType = Context.VoidTy; - if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) { + if (CSM == CXXSpecialMemberKind::CopyAssignment || + CSM == CXXSpecialMemberKind::MoveAssignment) { // Check for return type matching. ReturnType = Type->getReturnType(); QualType ThisType = MD->getFunctionObjectParameterType(); @@ -7792,7 +7804,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, if (!Context.hasSameType(ReturnType, ExpectedReturnType)) { Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type) - << (CSM == CXXMoveAssignment) << ExpectedReturnType; + << (CSM == CXXSpecialMemberKind::MoveAssignment) + << ExpectedReturnType; HadError = true; } @@ -7802,7 +7815,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, ShouldDeleteForTypeMismatch = true; else { Diag(MD->getLocation(), diag::err_defaulted_special_member_quals) - << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14; + << (CSM == CXXSpecialMemberKind::MoveAssignment) + << getLangOpts().CPlusPlus14; HadError = true; } } @@ -7820,7 +7834,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, else { Diag(MD->getLocation(), diag::err_defaulted_special_member_explicit_object_mismatch) - << (CSM == CXXMoveAssignment) << RD << MD->getSourceRange(); + << (CSM == CXXSpecialMemberKind::MoveAssignment) << RD + << MD->getSourceRange(); HadError = true; } } @@ -7842,7 +7857,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, ShouldDeleteForTypeMismatch = true; else { Diag(MD->getLocation(), - diag::err_defaulted_special_member_volatile_param) << CSM; + diag::err_defaulted_special_member_volatile_param) + << llvm::to_underlying(CSM); HadError = true; } } @@ -7850,23 +7866,25 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, if (HasConstParam && !CanHaveConstParam) { if (DeleteOnTypeMismatch) ShouldDeleteForTypeMismatch = true; - else if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) { + else if (CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::CopyAssignment) { Diag(MD->getLocation(), diag::err_defaulted_special_member_copy_const_param) - << (CSM == CXXCopyAssignment); + << (CSM == CXXSpecialMemberKind::CopyAssignment); // FIXME: Explain why this special member can't be const. HadError = true; } else { Diag(MD->getLocation(), diag::err_defaulted_special_member_move_const_param) - << (CSM == CXXMoveAssignment); + << (CSM == CXXSpecialMemberKind::MoveAssignment); HadError = true; } } } else if (ExpectedParams) { // A copy assignment operator can take its argument by value, but a // defaulted one cannot. - assert(CSM == CXXCopyAssignment && "unexpected non-ref argument"); + assert(CSM == CXXSpecialMemberKind::CopyAssignment && + "unexpected non-ref argument"); Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref); HadError = true; } @@ -7901,12 +7919,12 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, if (!MD->isConsteval() && RD->getNumVBases()) { Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr_with_vb) - << CSM; + << llvm::to_underlying(CSM); for (const auto &I : RD->vbases()) Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here); } else { Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr) - << CSM << MD->isConsteval(); + << llvm::to_underlying(CSM) << MD->isConsteval(); } HadError = true; // FIXME: Explain why the special member can't be constexpr. @@ -7939,9 +7957,11 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, if (First) { SetDeclDeleted(MD, MD->getLocation()); if (!inTemplateInstantiation() && !HadError) { - Diag(MD->getLocation(), diag::warn_defaulted_method_deleted) << CSM; + Diag(MD->getLocation(), diag::warn_defaulted_method_deleted) + << llvm::to_underlying(CSM); if (ShouldDeleteForTypeMismatch) { - Diag(MD->getLocation(), diag::note_deleted_type_mismatch) << CSM; + Diag(MD->getLocation(), diag::note_deleted_type_mismatch) + << llvm::to_underlying(CSM); } else if (ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/ true) && DefaultLoc.isValid()) { @@ -7951,13 +7971,15 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, } if (ShouldDeleteForTypeMismatch && !HadError) { Diag(MD->getLocation(), - diag::warn_cxx17_compat_defaulted_method_type_mismatch) << CSM; + diag::warn_cxx17_compat_defaulted_method_type_mismatch) + << llvm::to_underlying(CSM); } } else { // C++11 [dcl.fct.def.default]p4: // [For a] user-provided explicitly-defaulted function [...] if such a // function is implicitly defined as deleted, the program is ill-formed. - Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM; + Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) + << llvm::to_underlying(CSM); assert(!ShouldDeleteForTypeMismatch && "deleted non-first decl"); ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/true); HadError = true; @@ -7989,7 +8011,7 @@ class DefaultedComparisonVisitor { DefaultedComparisonVisitor(Sema &S, CXXRecordDecl *RD, FunctionDecl *FD, DefaultedComparisonKind DCK) : S(S), RD(RD), FD(FD), DCK(DCK) { - if (auto *Info = FD->getDefaultedFunctionInfo()) { + if (auto *Info = FD->getDefalutedOrDeletedInfo()) { // FIXME: Change CreateOverloadedBinOp to take an ArrayRef instead of an // UnresolvedSet to avoid this copy. Fns.assign(Info->getUnqualifiedLookups().begin(), @@ -8043,7 +8065,7 @@ class DefaultedComparisonVisitor { for (FieldDecl *Field : Record->fields()) { // C++23 [class.bit]p2: // Unnamed bit-fields are not members ... - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; // Recursively expand anonymous structs. if (Field->isAnonymousStructOrUnion()) { @@ -8857,8 +8879,9 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD, UnresolvedSet<32> Operators; lookupOperatorsForDefaultedComparison(*this, S, Operators, FD->getOverloadedOperator()); - FD->setDefaultedFunctionInfo(FunctionDecl::DefaultedFunctionInfo::Create( - Context, Operators.pairs())); + FD->setDefaultedOrDeletedInfo( + FunctionDecl::DefaultedOrDeletedFunctionInfo::Create( + Context, Operators.pairs())); } // C++2a [class.compare.default]p1: @@ -9298,28 +9321,28 @@ template struct SpecialMemberVisitor { Sema &S; CXXMethodDecl *MD; - Sema::CXXSpecialMember CSM; + CXXSpecialMemberKind CSM; Sema::InheritedConstructorInfo *ICI; // Properties of the special member, computed for convenience. bool IsConstructor = false, IsAssignment = false, ConstArg = false; - SpecialMemberVisitor(Sema &S, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM, + SpecialMemberVisitor(Sema &S, CXXMethodDecl *MD, CXXSpecialMemberKind CSM, Sema::InheritedConstructorInfo *ICI) : S(S), MD(MD), CSM(CSM), ICI(ICI) { switch (CSM) { - case Sema::CXXDefaultConstructor: - case Sema::CXXCopyConstructor: - case Sema::CXXMoveConstructor: + case CXXSpecialMemberKind::DefaultConstructor: + case CXXSpecialMemberKind::CopyConstructor: + case CXXSpecialMemberKind::MoveConstructor: IsConstructor = true; break; - case Sema::CXXCopyAssignment: - case Sema::CXXMoveAssignment: + case CXXSpecialMemberKind::CopyAssignment: + case CXXSpecialMemberKind::MoveAssignment: IsAssignment = true; break; - case Sema::CXXDestructor: + case CXXSpecialMemberKind::Destructor: break; - case Sema::CXXInvalid: + case CXXSpecialMemberKind::Invalid: llvm_unreachable("invalid special member kind"); } @@ -9334,7 +9357,8 @@ struct SpecialMemberVisitor { /// Is this a "move" special member? bool isMove() const { - return CSM == Sema::CXXMoveConstructor || CSM == Sema::CXXMoveAssignment; + return CSM == CXXSpecialMemberKind::MoveConstructor || + CSM == CXXSpecialMemberKind::MoveAssignment; } /// Look up the corresponding special member in the given class. @@ -9349,7 +9373,7 @@ struct SpecialMemberVisitor { Sema::SpecialMemberOverloadResult lookupInheritedCtor(CXXRecordDecl *Class) { if (!ICI) return {}; - assert(CSM == Sema::CXXDefaultConstructor); + assert(CSM == CXXSpecialMemberKind::DefaultConstructor); auto *BaseCtor = cast(MD)->getInheritedConstructor().getConstructor(); if (auto *MD = ICI->findConstructorForBase(Class, BaseCtor).first) @@ -9400,7 +9424,7 @@ struct SpecialMemberVisitor { return true; for (auto *F : RD->fields()) - if (!F->isInvalidDecl() && !F->isUnnamedBitfield() && + if (!F->isInvalidDecl() && !F->isUnnamedBitField() && getDerived().visitField(F)) return true; @@ -9419,15 +9443,15 @@ struct SpecialMemberDeletionInfo bool AllFieldsAreConst; SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD, - Sema::CXXSpecialMember CSM, + CXXSpecialMemberKind CSM, Sema::InheritedConstructorInfo *ICI, bool Diagnose) : SpecialMemberVisitor(S, MD, CSM, ICI), Diagnose(Diagnose), Loc(MD->getLocation()), AllFieldsAreConst(true) {} bool inUnion() const { return MD->getParent()->isUnion(); } - Sema::CXXSpecialMember getEffectiveCSM() { - return ICI ? Sema::CXXInvalid : CSM; + CXXSpecialMemberKind getEffectiveCSM() { + return ICI ? CXXSpecialMemberKind::Invalid : CSM; } bool shouldDeleteForVariantObjCPtrMember(FieldDecl *FD, QualType FieldType); @@ -9493,7 +9517,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall( // must be accessible and non-deleted, but need not be trivial. Such a // destructor is never actually called, but is semantically checked as // if it were. - if (CSM == Sema::CXXDefaultConstructor) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor) { // [class.default.ctor]p2: // A defaulted default constructor for class X is defined as deleted if // - X is a union that has a variant member with a non-trivial default @@ -9514,15 +9538,16 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall( if (Field) { S.Diag(Field->getLocation(), diag::note_deleted_special_member_class_subobject) - << getEffectiveCSM() << MD->getParent() << /*IsField*/true - << Field << DiagKind << IsDtorCallInCtor << /*IsObjCPtr*/false; + << llvm::to_underlying(getEffectiveCSM()) << MD->getParent() + << /*IsField*/ true << Field << DiagKind << IsDtorCallInCtor + << /*IsObjCPtr*/ false; } else { CXXBaseSpecifier *Base = Subobj.get(); S.Diag(Base->getBeginLoc(), diag::note_deleted_special_member_class_subobject) - << getEffectiveCSM() << MD->getParent() << /*IsField*/ false - << Base->getType() << DiagKind << IsDtorCallInCtor - << /*IsObjCPtr*/false; + << llvm::to_underlying(getEffectiveCSM()) << MD->getParent() + << /*IsField*/ false << Base->getType() << DiagKind + << IsDtorCallInCtor << /*IsObjCPtr*/ false; } if (DiagKind == 1) @@ -9554,8 +9579,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject( // C++11 [class.dtor]p5: // -- any direct or virtual base class [...] has a type with a destructor // that is deleted or inaccessible - if (!(CSM == Sema::CXXDefaultConstructor && - Field && Field->hasInClassInitializer()) && + if (!(CSM == CXXSpecialMemberKind::DefaultConstructor && Field && + Field->hasInClassInitializer()) && shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals, IsMutable), false)) return true; @@ -9565,8 +9590,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject( // type with a destructor that is deleted or inaccessible if (IsConstructor) { Sema::SpecialMemberOverloadResult SMOR = - S.LookupSpecialMember(Class, Sema::CXXDestructor, - false, false, false, false, false); + S.LookupSpecialMember(Class, CXXSpecialMemberKind::Destructor, false, + false, false, false, false); if (shouldDeleteForSubobjectCall(Subobj, SMOR, true)) return true; } @@ -9584,15 +9609,16 @@ bool SpecialMemberDeletionInfo::shouldDeleteForVariantObjCPtrMember( // Don't make the defaulted default constructor defined as deleted if the // member has an in-class initializer. - if (CSM == Sema::CXXDefaultConstructor && FD->hasInClassInitializer()) + if (CSM == CXXSpecialMemberKind::DefaultConstructor && + FD->hasInClassInitializer()) return false; if (Diagnose) { auto *ParentClass = cast(FD->getParent()); - S.Diag(FD->getLocation(), - diag::note_deleted_special_member_class_subobject) - << getEffectiveCSM() << ParentClass << /*IsField*/true - << FD << 4 << /*IsDtorCallInCtor*/false << /*IsObjCPtr*/true; + S.Diag(FD->getLocation(), diag::note_deleted_special_member_class_subobject) + << llvm::to_underlying(getEffectiveCSM()) << ParentClass + << /*IsField*/ true << FD << 4 << /*IsDtorCallInCtor*/ false + << /*IsObjCPtr*/ true; } return true; @@ -9617,9 +9643,9 @@ bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) { if (BaseCtor->isDeleted() && Diagnose) { S.Diag(Base->getBeginLoc(), diag::note_deleted_special_member_class_subobject) - << getEffectiveCSM() << MD->getParent() << /*IsField*/ false - << Base->getType() << /*Deleted*/ 1 << /*IsDtorCallInCtor*/ false - << /*IsObjCPtr*/false; + << llvm::to_underlying(getEffectiveCSM()) << MD->getParent() + << /*IsField*/ false << Base->getType() << /*Deleted*/ 1 + << /*IsDtorCallInCtor*/ false << /*IsObjCPtr*/ false; S.NoteDeletedFunction(BaseCtor); } return BaseCtor->isDeleted(); @@ -9636,7 +9662,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { if (inUnion() && shouldDeleteForVariantObjCPtrMember(FD, FieldType)) return true; - if (CSM == Sema::CXXDefaultConstructor) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor) { // For a default constructor, all references must be initialized in-class // and, if a union, it must have a non-const member. if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) { @@ -9659,7 +9685,7 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { if (inUnion() && !FieldType.isConstQualified()) AllFieldsAreConst = false; - } else if (CSM == Sema::CXXCopyConstructor) { + } else if (CSM == CXXSpecialMemberKind::CopyConstructor) { // For a copy constructor, data members must not be of rvalue reference // type. if (FieldType->isRValueReferenceType()) { @@ -9710,8 +9736,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { } // At least one member in each anonymous union must be non-const - if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst && - !FieldRecord->field_empty()) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor && + AllVariantFieldsAreConst && !FieldRecord->field_empty()) { if (Diagnose) S.Diag(FieldRecord->getLocation(), diag::note_deleted_default_ctor_all_const) @@ -9739,10 +9765,11 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() { // This is a silly definition, because it gives an empty union a deleted // default constructor. Don't do that. - if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor && inUnion() && + AllFieldsAreConst) { bool AnyFields = false; for (auto *F : MD->getParent()->fields()) - if ((AnyFields = !F->isUnnamedBitfield())) + if ((AnyFields = !F->isUnnamedBitField())) break; if (!AnyFields) return false; @@ -9758,7 +9785,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() { /// Determine whether a defaulted special member function should be defined as /// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11, /// C++11 [class.copy]p23, and C++11 [class.dtor]p5. -bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, +bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, + CXXSpecialMemberKind CSM, InheritedConstructorInfo *ICI, bool Diagnose) { if (MD->isInvalidDecl()) @@ -9775,7 +9803,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, // assignment operator. // C++2a adds back these operators if the lambda has no lambda-capture. if (RD->isLambda() && !RD->lambdaIsDefaultConstructibleAndAssignable() && - (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) { + (CSM == CXXSpecialMemberKind::DefaultConstructor || + CSM == CXXSpecialMemberKind::CopyAssignment)) { if (Diagnose) Diag(RD->getLocation(), diag::note_lambda_decl); return true; @@ -9784,16 +9813,16 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, // For an anonymous struct or union, the copy and assignment special members // will never be used, so skip the check. For an anonymous union declared at // namespace scope, the constructor and destructor are used. - if (CSM != CXXDefaultConstructor && CSM != CXXDestructor && - RD->isAnonymousStructOrUnion()) + if (CSM != CXXSpecialMemberKind::DefaultConstructor && + CSM != CXXSpecialMemberKind::Destructor && RD->isAnonymousStructOrUnion()) return false; // C++11 [class.copy]p7, p18: // If the class definition declares a move constructor or move assignment // operator, an implicitly declared copy constructor or copy assignment // operator is defined as deleted. - if (MD->isImplicit() && - (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) { + if (MD->isImplicit() && (CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::CopyAssignment)) { CXXMethodDecl *UserDeclaredMove = nullptr; // In Microsoft mode up to MSVC 2013, a user-declared move only causes the @@ -9804,7 +9833,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, !getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015); if (RD->hasUserDeclaredMoveConstructor() && - (!DeletesOnlyMatchingCopy || CSM == CXXCopyConstructor)) { + (!DeletesOnlyMatchingCopy || + CSM == CXXSpecialMemberKind::CopyConstructor)) { if (!Diagnose) return true; // Find any user-declared move constructor. @@ -9816,7 +9846,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, } assert(UserDeclaredMove); } else if (RD->hasUserDeclaredMoveAssignment() && - (!DeletesOnlyMatchingCopy || CSM == CXXCopyAssignment)) { + (!DeletesOnlyMatchingCopy || + CSM == CXXSpecialMemberKind::CopyAssignment)) { if (!Diagnose) return true; // Find any user-declared move assignment operator. @@ -9832,8 +9863,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, if (UserDeclaredMove) { Diag(UserDeclaredMove->getLocation(), diag::note_deleted_copy_user_declared_move) - << (CSM == CXXCopyAssignment) << RD - << UserDeclaredMove->isMoveAssignmentOperator(); + << (CSM == CXXSpecialMemberKind::CopyAssignment) << RD + << UserDeclaredMove->isMoveAssignmentOperator(); return true; } } @@ -9844,7 +9875,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, // C++11 [class.dtor]p5: // -- for a virtual destructor, lookup of the non-array deallocation function // results in an ambiguity or in a function that is deleted or inaccessible - if (CSM == CXXDestructor && MD->isVirtual()) { + if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) { FunctionDecl *OperatorDelete = nullptr; DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete); @@ -9876,15 +9907,15 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, // failed. // For inherited constructors (non-null ICI), CSM may be passed so that MD // is treated as certain special member, which may not reflect what special - // member MD really is. However inferCUDATargetForImplicitSpecialMember + // member MD really is. However inferTargetForImplicitSpecialMember // expects CSM to match MD, therefore recalculate CSM. assert(ICI || CSM == getSpecialMember(MD)); auto RealCSM = CSM; if (ICI) RealCSM = getSpecialMember(MD); - return inferCUDATargetForImplicitSpecialMember(RD, RealCSM, MD, - SMI.ConstArg, Diagnose); + return CUDA().inferTargetForImplicitSpecialMember(RD, RealCSM, MD, + SMI.ConstArg, Diagnose); } return false; @@ -9918,7 +9949,7 @@ void Sema::DiagnoseDeletedDefaultedFunction(FunctionDecl *FD) { /// If \p ForCall is true, look at CXXRecord::HasTrivialSpecialMembersForCall to /// determine whether the special member is trivial. static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, - Sema::CXXSpecialMember CSM, unsigned Quals, + CXXSpecialMemberKind CSM, unsigned Quals, bool ConstRHS, Sema::TrivialABIHandling TAH, CXXMethodDecl **Selected) { @@ -9926,10 +9957,10 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, *Selected = nullptr; switch (CSM) { - case Sema::CXXInvalid: + case CXXSpecialMemberKind::Invalid: llvm_unreachable("not a special member"); - case Sema::CXXDefaultConstructor: + case CXXSpecialMemberKind::DefaultConstructor: // C++11 [class.ctor]p5: // A default constructor is trivial if: // - all the [direct subobjects] have trivial default constructors @@ -9958,7 +9989,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, return false; - case Sema::CXXDestructor: + case CXXSpecialMemberKind::Destructor: // C++11 [class.dtor]p5: // A destructor is trivial if: // - all the direct [subobjects] have trivial destructors @@ -9975,7 +10006,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, return false; - case Sema::CXXCopyConstructor: + case CXXSpecialMemberKind::CopyConstructor: // C++11 [class.copy]p12: // A copy constructor is trivial if: // - the constructor selected to copy each direct [subobject] is trivial @@ -9996,7 +10027,7 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, // struct B { mutable A a; }; goto NeedOverloadResolution; - case Sema::CXXCopyAssignment: + case CXXSpecialMemberKind::CopyAssignment: // C++11 [class.copy]p25: // A copy assignment operator is trivial if: // - the assignment operator selected to copy each direct [subobject] is @@ -10011,8 +10042,8 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, // treat that as a language defect. goto NeedOverloadResolution; - case Sema::CXXMoveConstructor: - case Sema::CXXMoveAssignment: + case CXXSpecialMemberKind::MoveConstructor: + case CXXSpecialMemberKind::MoveAssignment: NeedOverloadResolution: Sema::SpecialMemberOverloadResult SMOR = lookupCallFromSpecialMember(S, RD, CSM, Quals, ConstRHS); @@ -10036,7 +10067,8 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, *Selected = SMOR.getMethod(); if (TAH == Sema::TAH_ConsiderTrivialABI && - (CSM == Sema::CXXCopyConstructor || CSM == Sema::CXXMoveConstructor)) + (CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::MoveConstructor)) return SMOR.getMethod()->isTrivialForCall(); return SMOR.getMethod()->isTrivial(); } @@ -10074,9 +10106,10 @@ enum TrivialSubobjectKind { /// Check whether the special member selected for a given type would be trivial. static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc, QualType SubType, bool ConstRHS, - Sema::CXXSpecialMember CSM, + CXXSpecialMemberKind CSM, TrivialSubobjectKind Kind, - Sema::TrivialABIHandling TAH, bool Diagnose) { + Sema::TrivialABIHandling TAH, + bool Diagnose) { CXXRecordDecl *SubRD = SubType->getAsCXXRecordDecl(); if (!SubRD) return true; @@ -10090,27 +10123,28 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc, if (ConstRHS) SubType.addConst(); - if (!Selected && CSM == Sema::CXXDefaultConstructor) { + if (!Selected && CSM == CXXSpecialMemberKind::DefaultConstructor) { S.Diag(SubobjLoc, diag::note_nontrivial_no_def_ctor) << Kind << SubType.getUnqualifiedType(); if (CXXConstructorDecl *CD = findUserDeclaredCtor(SubRD)) S.Diag(CD->getLocation(), diag::note_user_declared_ctor); } else if (!Selected) S.Diag(SubobjLoc, diag::note_nontrivial_no_copy) - << Kind << SubType.getUnqualifiedType() << CSM << SubType; + << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM) + << SubType; else if (Selected->isUserProvided()) { if (Kind == TSK_CompleteObject) S.Diag(Selected->getLocation(), diag::note_nontrivial_user_provided) - << Kind << SubType.getUnqualifiedType() << CSM; + << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM); else { S.Diag(SubobjLoc, diag::note_nontrivial_user_provided) - << Kind << SubType.getUnqualifiedType() << CSM; + << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM); S.Diag(Selected->getLocation(), diag::note_declared_at); } } else { if (Kind != TSK_CompleteObject) S.Diag(SubobjLoc, diag::note_nontrivial_subobject) - << Kind << SubType.getUnqualifiedType() << CSM; + << Kind << SubType.getUnqualifiedType() << llvm::to_underlying(CSM); // Explain why the defaulted or deleted special member isn't trivial. S.SpecialMemberIsTrivial(Selected, CSM, Sema::TAH_IgnoreTrivialABI, @@ -10124,12 +10158,11 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc, /// Check whether the members of a class type allow a special member to be /// trivial. static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD, - Sema::CXXSpecialMember CSM, - bool ConstArg, + CXXSpecialMemberKind CSM, bool ConstArg, Sema::TrivialABIHandling TAH, bool Diagnose) { for (const auto *FI : RD->fields()) { - if (FI->isInvalidDecl() || FI->isUnnamedBitfield()) + if (FI->isInvalidDecl() || FI->isUnnamedBitField()) continue; QualType FieldType = S.Context.getBaseElementType(FI->getType()); @@ -10146,7 +10179,8 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD, // A default constructor is trivial if [...] // -- no non-static data member of its class has a // brace-or-equal-initializer - if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor && + FI->hasInClassInitializer()) { if (Diagnose) S.Diag(FI->getLocation(), diag::note_nontrivial_default_member_init) << FI; @@ -10175,10 +10209,12 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD, /// Diagnose why the specified class does not have a trivial special member of /// the given kind. -void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) { +void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, + CXXSpecialMemberKind CSM) { QualType Ty = Context.getRecordType(RD); - bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment); + bool ConstArg = (CSM == CXXSpecialMemberKind::CopyConstructor || + CSM == CXXSpecialMemberKind::CopyAssignment); checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM, TSK_CompleteObject, TAH_IgnoreTrivialABI, /*Diagnose*/true); @@ -10187,9 +10223,10 @@ void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) { /// Determine whether a defaulted or deleted special member function is trivial, /// as specified in C++11 [class.ctor]p5, C++11 [class.copy]p12, /// C++11 [class.copy]p25, and C++11 [class.dtor]p5. -bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, +bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMemberKind CSM, TrivialABIHandling TAH, bool Diagnose) { - assert(!MD->isUserProvided() && CSM != CXXInvalid && "not special enough"); + assert(!MD->isUserProvided() && CSM != CXXSpecialMemberKind::Invalid && + "not special enough"); CXXRecordDecl *RD = MD->getParent(); @@ -10199,13 +10236,13 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, // A [special member] is trivial if [...] its parameter-type-list is // equivalent to the parameter-type-list of an implicit declaration [...] switch (CSM) { - case CXXDefaultConstructor: - case CXXDestructor: + case CXXSpecialMemberKind::DefaultConstructor: + case CXXSpecialMemberKind::Destructor: // Trivial default constructors and destructors cannot have parameters. break; - case CXXCopyConstructor: - case CXXCopyAssignment: { + case CXXSpecialMemberKind::CopyConstructor: + case CXXSpecialMemberKind::CopyAssignment: { const ParmVarDecl *Param0 = MD->getNonObjectParameter(0); const ReferenceType *RT = Param0->getType()->getAs(); @@ -10234,8 +10271,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, break; } - case CXXMoveConstructor: - case CXXMoveAssignment: { + case CXXSpecialMemberKind::MoveConstructor: + case CXXSpecialMemberKind::MoveAssignment: { // Trivial move operations always have non-cv-qualified parameters. const ParmVarDecl *Param0 = MD->getNonObjectParameter(0); const RValueReferenceType *RT = @@ -10250,7 +10287,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, break; } - case CXXInvalid: + case CXXSpecialMemberKind::Invalid: llvm_unreachable("not a special member"); } @@ -10299,7 +10336,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, // C++11 [class.dtor]p5: // A destructor is trivial if [...] // -- the destructor is not virtual - if (CSM == CXXDestructor && MD->isVirtual()) { + if (CSM == CXXSpecialMemberKind::Destructor && MD->isVirtual()) { if (Diagnose) Diag(MD->getLocation(), diag::note_nontrivial_virtual_dtor) << RD; return false; @@ -10308,7 +10345,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25: // A [special member] for class X is trivial if [...] // -- class X has no virtual functions and no virtual base classes - if (CSM != CXXDestructor && MD->getParent()->isDynamicClass()) { + if (CSM != CXXSpecialMemberKind::Destructor && + MD->getParent()->isDynamicClass()) { if (!Diagnose) return false; @@ -11705,7 +11743,7 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope, // look through using directives, just look for any ordinary names // as if by qualified name lookup. LookupResult R(*this, II, IdentLoc, LookupOrdinaryName, - ForExternalRedeclaration); + RedeclarationKind::ForExternalRedeclaration); LookupQualifiedName(R, CurContext->getRedeclContext()); NamedDecl *PrevDecl = R.isSingleResult() ? R.getRepresentativeDecl() : nullptr; @@ -12906,7 +12944,7 @@ NamedDecl *Sema::BuildUsingDeclaration( // Do the redeclaration lookup in the current scope. LookupResult Previous(*this, UsingName, LookupUsingDeclName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); Previous.setHideTags(false); if (S) { LookupName(Previous, S); @@ -13149,7 +13187,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, /// In class scope, check if this is a duplicate, for better a diagnostic. DeclarationNameInfo UsingEnumName(ED->getDeclName(), NameLoc); LookupResult Previous(*this, UsingEnumName, LookupUsingDeclName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S); @@ -13182,7 +13220,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, UsingShadowDecl *PrevDecl = nullptr; DeclarationNameInfo DNI(EC->getDeclName(), EC->getLocation()); LookupResult Previous(*this, DNI, LookupOrdinaryName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S); FilterUsingLookup(S, Previous); @@ -13577,7 +13615,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS, LookupResult Previous(*this, NameInfo, LookupOrdinaryName, TemplateParamLists.size() ? forRedeclarationInCurContext() - : ForVisibleRedeclaration); + : RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S); // Warn about shadowing the name of a template parameter. @@ -13727,7 +13765,7 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc, // Check if we have a previous declaration with the same name. LookupResult PrevR(*this, Alias, AliasLoc, LookupOrdinaryName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(PrevR, S); // Check we're not shadowing a template parameter. @@ -13787,7 +13825,7 @@ struct SpecialMemberExceptionSpecInfo Sema::ImplicitExceptionSpecification ExceptSpec; SpecialMemberExceptionSpecInfo(Sema &S, CXXMethodDecl *MD, - Sema::CXXSpecialMember CSM, + CXXSpecialMemberKind CSM, Sema::InheritedConstructorInfo *ICI, SourceLocation Loc) : SpecialMemberVisitor(S, MD, CSM, ICI), Loc(Loc), ExceptSpec(S) {} @@ -13820,7 +13858,8 @@ bool SpecialMemberExceptionSpecInfo::visitBase(CXXBaseSpecifier *Base) { } bool SpecialMemberExceptionSpecInfo::visitField(FieldDecl *FD) { - if (CSM == Sema::CXXDefaultConstructor && FD->hasInClassInitializer()) { + if (CSM == CXXSpecialMemberKind::DefaultConstructor && + FD->hasInClassInitializer()) { Expr *E = FD->getInClassInitializer(); if (!E) // FIXME: It's a little wasteful to build and throw away a @@ -13879,7 +13918,7 @@ ExplicitSpecifier Sema::ActOnExplicitBoolSpecifier(Expr *ExplicitExpr) { static Sema::ImplicitExceptionSpecification ComputeDefaultedSpecialMemberExceptionSpec( - Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM, + Sema &S, SourceLocation Loc, CXXMethodDecl *MD, CXXSpecialMemberKind CSM, Sema::InheritedConstructorInfo *ICI) { ComputingExceptionSpec CES(S, MD, Loc); @@ -13929,7 +13968,7 @@ struct DeclaringSpecialMember { Sema::ContextRAII SavedContext; bool WasAlreadyBeingDeclared; - DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM) + DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, CXXSpecialMemberKind CSM) : S(S), D(RD, CSM), SavedContext(S, RD) { WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D).second; if (WasAlreadyBeingDeclared) @@ -13972,7 +14011,7 @@ void Sema::CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD) { // implicit special members with this name. DeclarationName Name = FD->getDeclName(); LookupResult R(*this, Name, SourceLocation(), LookupOrdinaryName, - ForExternalRedeclaration); + RedeclarationKind::ForExternalRedeclaration); for (auto *D : FD->getParent()->lookup(Name)) if (auto *Acceptable = R.getAcceptableDecl(D)) R.addDecl(Acceptable); @@ -14019,13 +14058,13 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor( assert(ClassDecl->needsImplicitDefaultConstructor() && "Should not build implicit default constructor!"); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXDefaultConstructor); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::DefaultConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXDefaultConstructor, - false); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::DefaultConstructor, false); // Create the actual constructor declaration. CanQualType ClassType @@ -14047,10 +14086,10 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor( setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, std::nullopt); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor, - DefaultCon, - /* ConstRHS */ false, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::DefaultConstructor, DefaultCon, + /* ConstRHS */ false, + /* Diagnose */ false); // We don't need to use SpecialMemberIsTrivial here; triviality for default // constructors is easy to compute. @@ -14062,7 +14101,8 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor( Scope *S = getScopeForContext(ClassDecl); CheckImplicitSpecialMemberDeclaration(S, DefaultCon); - if (ShouldDeleteSpecialMember(DefaultCon, CXXDefaultConstructor)) + if (ShouldDeleteSpecialMember(DefaultCon, + CXXSpecialMemberKind::DefaultConstructor)) SetDeclDeleted(DefaultCon, ClassLoc); if (S) @@ -14154,10 +14194,10 @@ Sema::findInheritingConstructor(SourceLocation Loc, // from which it was inherited. InheritedConstructorInfo ICI(*this, Loc, Shadow); - bool Constexpr = - BaseCtor->isConstexpr() && - defaultedSpecialMemberIsConstexpr(*this, Derived, CXXDefaultConstructor, - false, BaseCtor, &ICI); + bool Constexpr = BaseCtor->isConstexpr() && + defaultedSpecialMemberIsConstexpr( + *this, Derived, CXXSpecialMemberKind::DefaultConstructor, + false, BaseCtor, &ICI); CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create( Context, Derived, UsingLoc, NameInfo, TInfo->getType(), TInfo, @@ -14201,7 +14241,8 @@ Sema::findInheritingConstructor(SourceLocation Loc, DerivedCtor->setParams(ParamDecls); Derived->addDecl(DerivedCtor); - if (ShouldDeleteSpecialMember(DerivedCtor, CXXDefaultConstructor, &ICI)) + if (ShouldDeleteSpecialMember(DerivedCtor, + CXXSpecialMemberKind::DefaultConstructor, &ICI)) SetDeclDeleted(DerivedCtor, UsingLoc); return DerivedCtor; @@ -14210,8 +14251,9 @@ Sema::findInheritingConstructor(SourceLocation Loc, void Sema::NoteDeletedInheritingConstructor(CXXConstructorDecl *Ctor) { InheritedConstructorInfo ICI(*this, Ctor->getLocation(), Ctor->getInheritedConstructor().getShadowDecl()); - ShouldDeleteSpecialMember(Ctor, CXXDefaultConstructor, &ICI, - /*Diagnose*/true); + ShouldDeleteSpecialMember(Ctor, CXXSpecialMemberKind::DefaultConstructor, + &ICI, + /*Diagnose*/ true); } void Sema::DefineInheritingConstructor(SourceLocation CurrentLocation, @@ -14302,13 +14344,13 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) { // inline public member of its class. assert(ClassDecl->needsImplicitDestructor()); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXDestructor); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::Destructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXDestructor, - false); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::Destructor, false); // Create the actual destructor declaration. CanQualType ClassType @@ -14330,10 +14372,10 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) { setupImplicitSpecialMemberType(Destructor, Context.VoidTy, std::nullopt); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor, - Destructor, - /* ConstRHS */ false, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::Destructor, Destructor, + /* ConstRHS */ false, + /* Diagnose */ false); // We don't need to use SpecialMemberIsTrivial here; triviality for // destructors is easy to compute. @@ -14351,7 +14393,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) { // the definition of the class, because its validity depends on the alignment // of the class. We'll check this from ActOnFields once the class is complete. if (ClassDecl->isCompleteDefinition() && - ShouldDeleteSpecialMember(Destructor, CXXDestructor)) + ShouldDeleteSpecialMember(Destructor, CXXSpecialMemberKind::Destructor)) SetDeclDeleted(Destructor, ClassLoc); // Introduce this destructor into its scope. @@ -14932,7 +14974,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { // operators taking an object instead of a reference are allowed. assert(ClassDecl->needsImplicitCopyAssignment()); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyAssignment); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::CopyAssignment); if (DSM.isAlreadyBeingDeclared()) return nullptr; @@ -14949,9 +14992,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { ArgType = Context.getLValueReferenceType(ArgType); - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXCopyAssignment, - Const); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::CopyAssignment, Const); // An implicitly-declared copy assignment operator is an inline public // member of its class. @@ -14972,10 +15014,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment, - CopyAssignment, - /* ConstRHS */ Const, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::CopyAssignment, CopyAssignment, + /* ConstRHS */ Const, + /* Diagnose */ false); // Add the parameter to the operator. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment, @@ -14986,9 +15028,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { CopyAssignment->setParams(FromParam); CopyAssignment->setTrivial( - ClassDecl->needsOverloadResolutionForCopyAssignment() - ? SpecialMemberIsTrivial(CopyAssignment, CXXCopyAssignment) - : ClassDecl->hasTrivialCopyAssignment()); + ClassDecl->needsOverloadResolutionForCopyAssignment() + ? SpecialMemberIsTrivial(CopyAssignment, + CXXSpecialMemberKind::CopyAssignment) + : ClassDecl->hasTrivialCopyAssignment()); // Note that we have added this copy-assignment operator. ++getASTContext().NumImplicitCopyAssignmentOperatorsDeclared; @@ -14996,7 +15039,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { Scope *S = getScopeForContext(ClassDecl); CheckImplicitSpecialMemberDeclaration(S, CopyAssignment); - if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment)) { + if (ShouldDeleteSpecialMember(CopyAssignment, + CXXSpecialMemberKind::CopyAssignment)) { ClassDecl->setImplicitCopyAssignmentIsDeleted(); SetDeclDeleted(CopyAssignment, ClassLoc); } @@ -15185,7 +15229,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, for (auto *Field : ClassDecl->fields()) { // FIXME: We should form some kind of AST representation for the implied // memcpy in a union copy operation. - if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) + if (Field->isUnnamedBitField() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { @@ -15283,7 +15327,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { assert(ClassDecl->needsImplicitMoveAssignment()); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveAssignment); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::MoveAssignment); if (DSM.isAlreadyBeingDeclared()) return nullptr; @@ -15299,9 +15344,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { QualType RetType = Context.getLValueReferenceType(ArgType); ArgType = Context.getRValueReferenceType(ArgType); - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXMoveAssignment, - false); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::MoveAssignment, false); // An implicitly-declared move assignment operator is an inline public // member of its class. @@ -15322,10 +15366,10 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment, - MoveAssignment, - /* ConstRHS */ false, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::MoveAssignment, MoveAssignment, + /* ConstRHS */ false, + /* Diagnose */ false); // Add the parameter to the operator. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment, @@ -15336,9 +15380,10 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { MoveAssignment->setParams(FromParam); MoveAssignment->setTrivial( - ClassDecl->needsOverloadResolutionForMoveAssignment() - ? SpecialMemberIsTrivial(MoveAssignment, CXXMoveAssignment) - : ClassDecl->hasTrivialMoveAssignment()); + ClassDecl->needsOverloadResolutionForMoveAssignment() + ? SpecialMemberIsTrivial(MoveAssignment, + CXXSpecialMemberKind::MoveAssignment) + : ClassDecl->hasTrivialMoveAssignment()); // Note that we have added this copy-assignment operator. ++getASTContext().NumImplicitMoveAssignmentOperatorsDeclared; @@ -15346,7 +15391,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { Scope *S = getScopeForContext(ClassDecl); CheckImplicitSpecialMemberDeclaration(S, MoveAssignment); - if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) { + if (ShouldDeleteSpecialMember(MoveAssignment, + CXXSpecialMemberKind::MoveAssignment)) { ClassDecl->setImplicitMoveAssignmentIsDeleted(); SetDeclDeleted(MoveAssignment, ClassLoc); } @@ -15395,10 +15441,10 @@ static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class, // If we're not actually going to call a move assignment for this base, // or the selected move assignment is trivial, skip it. Sema::SpecialMemberOverloadResult SMOR = - S.LookupSpecialMember(Base, Sema::CXXMoveAssignment, - /*ConstArg*/false, /*VolatileArg*/false, - /*RValueThis*/true, /*ConstThis*/false, - /*VolatileThis*/false); + S.LookupSpecialMember(Base, CXXSpecialMemberKind::MoveAssignment, + /*ConstArg*/ false, /*VolatileArg*/ false, + /*RValueThis*/ true, /*ConstThis*/ false, + /*VolatileThis*/ false); if (!SMOR.getMethod() || SMOR.getMethod()->isTrivial() || !SMOR.getMethod()->isMoveAssignmentOperator()) continue; @@ -15568,7 +15614,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, for (auto *Field : ClassDecl->fields()) { // FIXME: We should form some kind of AST representation for the implied // memcpy in a union copy operation. - if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) + if (Field->isUnnamedBitField() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { @@ -15675,7 +15721,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( // constructor, one is declared implicitly. assert(ClassDecl->needsImplicitCopyConstructor()); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyConstructor); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::CopyConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; @@ -15693,9 +15740,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( ArgType = Context.getLValueReferenceType(ArgType); - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXCopyConstructor, - Const); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::CopyConstructor, Const); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( @@ -15718,10 +15764,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor, - CopyConstructor, - /* ConstRHS */ Const, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::CopyConstructor, CopyConstructor, + /* ConstRHS */ Const, + /* Diagnose */ false); // During template instantiation of special member functions we need a // reliable TypeSourceInfo for the parameter types in order to allow functions @@ -15739,14 +15785,16 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( CopyConstructor->setTrivial( ClassDecl->needsOverloadResolutionForCopyConstructor() - ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor) + ? SpecialMemberIsTrivial(CopyConstructor, + CXXSpecialMemberKind::CopyConstructor) : ClassDecl->hasTrivialCopyConstructor()); CopyConstructor->setTrivialForCall( ClassDecl->hasAttr() || (ClassDecl->needsOverloadResolutionForCopyConstructor() - ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor, - TAH_ConsiderTrivialABI) + ? SpecialMemberIsTrivial(CopyConstructor, + CXXSpecialMemberKind::CopyConstructor, + TAH_ConsiderTrivialABI) : ClassDecl->hasTrivialCopyConstructorForCall())); // Note that we have declared this constructor. @@ -15755,7 +15803,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( Scope *S = getScopeForContext(ClassDecl); CheckImplicitSpecialMemberDeclaration(S, CopyConstructor); - if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor)) { + if (ShouldDeleteSpecialMember(CopyConstructor, + CXXSpecialMemberKind::CopyConstructor)) { ClassDecl->setImplicitCopyConstructorIsDeleted(); SetDeclDeleted(CopyConstructor, ClassLoc); } @@ -15820,7 +15869,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( CXXRecordDecl *ClassDecl) { assert(ClassDecl->needsImplicitMoveConstructor()); - DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveConstructor); + DeclaringSpecialMember DSM(*this, ClassDecl, + CXXSpecialMemberKind::MoveConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; @@ -15834,9 +15884,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( ArgType = Context.getAddrSpaceQualType(ClassType, AS); ArgType = Context.getRValueReferenceType(ArgType); - bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, - CXXMoveConstructor, - false); + bool Constexpr = defaultedSpecialMemberIsConstexpr( + *this, ClassDecl, CXXSpecialMemberKind::MoveConstructor, false); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( @@ -15860,10 +15909,10 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType); if (getLangOpts().CUDA) - inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor, - MoveConstructor, - /* ConstRHS */ false, - /* Diagnose */ false); + CUDA().inferTargetForImplicitSpecialMember( + ClassDecl, CXXSpecialMemberKind::MoveConstructor, MoveConstructor, + /* ConstRHS */ false, + /* Diagnose */ false); // Add the parameter to the constructor. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor, @@ -15875,13 +15924,15 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( MoveConstructor->setTrivial( ClassDecl->needsOverloadResolutionForMoveConstructor() - ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor) + ? SpecialMemberIsTrivial(MoveConstructor, + CXXSpecialMemberKind::MoveConstructor) : ClassDecl->hasTrivialMoveConstructor()); MoveConstructor->setTrivialForCall( ClassDecl->hasAttr() || (ClassDecl->needsOverloadResolutionForMoveConstructor() - ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor, + ? SpecialMemberIsTrivial(MoveConstructor, + CXXSpecialMemberKind::MoveConstructor, TAH_ConsiderTrivialABI) : ClassDecl->hasTrivialMoveConstructorForCall())); @@ -15891,7 +15942,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( Scope *S = getScopeForContext(ClassDecl); CheckImplicitSpecialMemberDeclaration(S, MoveConstructor); - if (ShouldDeleteSpecialMember(MoveConstructor, CXXMoveConstructor)) { + if (ShouldDeleteSpecialMember(MoveConstructor, + CXXSpecialMemberKind::MoveConstructor)) { ClassDecl->setImplicitMoveConstructorIsDeleted(); SetDeclDeleted(MoveConstructor, ClassLoc); } @@ -16163,7 +16215,7 @@ ExprResult Sema::BuildCXXConstructExpr( DeclInitType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) && "given constructor for wrong type"); MarkFunctionReferenced(ConstructLoc, Constructor); - if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor)) + if (getLangOpts().CUDA && !CUDA().CheckCall(ConstructLoc, Constructor)) return ExprError(); return CheckForImmediateInvocation( @@ -16940,11 +16992,10 @@ Decl *Sema::ActOnEmptyDeclaration(Scope *S, /// Perform semantic analysis for the variable declaration that /// occurs within a C++ catch clause, returning the newly-created /// variable. -VarDecl *Sema::BuildExceptionDeclaration(Scope *S, - TypeSourceInfo *TInfo, +VarDecl *Sema::BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation Loc, - IdentifierInfo *Name) { + const IdentifierInfo *Name) { bool Invalid = false; QualType ExDeclType = TInfo->getType(); @@ -17089,10 +17140,10 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) { Invalid = true; } - IdentifierInfo *II = D.getIdentifier(); - if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(), - LookupOrdinaryName, - ForVisibleRedeclaration)) { + const IdentifierInfo *II = D.getIdentifier(); + if (NamedDecl *PrevDecl = + LookupSingleName(S, II, D.getIdentifierLoc(), LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration)) { // The scope should be freshly made just for us. There is just no way // it contains any previous declaration, except for function parameters in // a function-try-block's catch statement. @@ -17883,7 +17934,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D, DeclContext *DC; Scope *DCScope = S; LookupResult Previous(*this, NameInfo, LookupOrdinaryName, - ForExternalRedeclaration); + RedeclarationKind::ForExternalRedeclaration); bool isTemplateId = D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId; @@ -18138,7 +18189,8 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D, return ND; } -void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) { +void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc, + StringLiteral *Message) { AdjustDeclIfTemplate(Dcl); FunctionDecl *Fn = dyn_cast_or_null(Dcl); @@ -18187,7 +18239,7 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) { // C++11 [dcl.fct.def.delete]p4: // A deleted function is implicitly inline. Fn->setImplicitlyInline(); - Fn->setDeletedAsWritten(); + Fn->setDeletedAsWritten(true, Message); } void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) { @@ -18300,11 +18352,11 @@ void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) { } } -void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc, - FnBodyKind BodyKind) { +void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind, + StringLiteral *DeletedMessage) { switch (BodyKind) { case FnBodyKind::Delete: - SetDeclDeleted(D, Loc); + SetDeclDeleted(D, Loc, DeletedMessage); break; case FnBodyKind::Default: SetDeclDefaulted(D, Loc); @@ -18631,8 +18683,8 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, // Do not mark as used if compiling for the device outside of the target // region. if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice && - !isInOpenMPDeclareTargetContext() && - !isInOpenMPTargetExecutionDirective()) { + !OpenMP().isInOpenMPDeclareTargetContext() && + !OpenMP().isInOpenMPTargetExecutionDirective()) { if (!DefinitionRequired) MarkVirtualMembersReferenced(Loc, Class); return; @@ -19185,7 +19237,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr) { - IdentifierInfo *II = D.getIdentifier(); + const IdentifierInfo *II = D.getIdentifier(); if (!II) { Diag(DeclStart, diag::err_anonymous_property); return nullptr; @@ -19218,7 +19270,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record, // Check to see if this name was declared as a member previously NamedDecl *PrevDecl = nullptr; LookupResult Previous(*this, II, Loc, LookupMemberName, - ForVisibleRedeclaration); + RedeclarationKind::ForVisibleRedeclaration); LookupName(Previous, S); switch (Previous.getResultKind()) { case LookupResult::Found: diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp index 94a245f0f905f..74d6f0700b0e4 100644 --- a/clang/lib/Sema/SemaDeclObjC.cpp +++ b/clang/lib/Sema/SemaDeclObjC.cpp @@ -1818,9 +1818,9 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc, } ObjCCategoryDecl *Sema::ActOnStartCategoryInterface( - SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, + SourceLocation AtInterfaceLoc, const IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, - IdentifierInfo *CategoryName, SourceLocation CategoryLoc, + const IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList) { @@ -1916,9 +1916,9 @@ ObjCCategoryDecl *Sema::ActOnStartCategoryInterface( /// category implementation declaration and build an ObjCCategoryImplDecl /// object. ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation( - SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, - SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, - const ParsedAttributesView &Attrs) { + SourceLocation AtCatImplLoc, const IdentifierInfo *ClassName, + SourceLocation ClassLoc, const IdentifierInfo *CatName, + SourceLocation CatLoc, const ParsedAttributesView &Attrs) { ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true); ObjCCategoryDecl *CatIDecl = nullptr; if (IDecl && IDecl->hasDefinition()) { @@ -1982,8 +1982,8 @@ ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation( } ObjCImplementationDecl *Sema::ActOnStartClassImplementation( - SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, - SourceLocation ClassLoc, IdentifierInfo *SuperClassname, + SourceLocation AtClassImplLoc, const IdentifierInfo *ClassName, + SourceLocation ClassLoc, const IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &Attrs) { ObjCInterfaceDecl *IDecl = nullptr; // Check for another declaration kind with the same name. @@ -2751,7 +2751,7 @@ static void CheckProtocolMethodDefs( // implemented in the class, we should not issue "Method definition not // found" warnings. // FIXME: Use a general GetUnarySelector method for this. - IdentifierInfo* II = &S.Context.Idents.get("forwardInvocation"); + const IdentifierInfo *II = &S.Context.Idents.get("forwardInvocation"); Selector fISelector = S.Context.Selectors.getSelector(1, &II); if (InsMap.count(fISelector)) // Is IDecl derived from 'NSProxy'? If so, no instance methods @@ -5105,8 +5105,8 @@ bool Sema::CheckObjCDeclScope(Decl *D) { /// Called whenever \@defs(ClassName) is encountered in the source. Inserts the /// instance variables of ClassName into Decls. void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, - IdentifierInfo *ClassName, - SmallVectorImpl &Decls) { + const IdentifierInfo *ClassName, + SmallVectorImpl &Decls) { // Check that ClassName is a valid class ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName, DeclStart); if (!Class) { @@ -5148,8 +5148,7 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, - bool Invalid) { + const IdentifierInfo *Id, bool Invalid) { // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage // duration shall not be qualified by an address-space qualifier." // Since all parameters have automatic store duration, they can not have diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index e37dda77a9db4..8faa1d9aeb40b 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -49,10 +49,13 @@ #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaFixItUtils.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaOpenMP.h" #include "clang/Sema/Template.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ConvertUTF.h" @@ -233,28 +236,28 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, bool IsRuntimeEvaluated = ExprEvalContexts.empty() || (!isUnevaluatedContext() && !isConstantEvaluatedContext()); - bool IsEsimdPrivateGlobal = isSYCLEsimdPrivateGlobal(VD); + bool IsEsimdPrivateGlobal = SYCL().isSYCLEsimdPrivateGlobal(VD); // Non-const statics are not allowed in SYCL except for ESIMD or with the // SYCLGlobalVar or SYCLGlobalVariableAllowed attribute. if (IsRuntimeEvaluated && !IsEsimdPrivateGlobal && !IsConst && VD->getStorageClass() == SC_Static && !VD->hasAttr() && - !isTypeDecoratedWithDeclAttribute( - VD->getType())) - SYCLDiagIfDeviceCode(*Locs.begin(), diag::err_sycl_restrict) - << Sema::KernelNonConstStaticDataVariable; + !SemaSYCL::isTypeDecoratedWithDeclAttribute< + SYCLGlobalVariableAllowedAttr>(VD->getType())) + SYCL().DiagIfDeviceCode(*Locs.begin(), diag::err_sycl_restrict) + << SemaSYCL::KernelNonConstStaticDataVariable; // Non-const globals are not allowed in SYCL except for ESIMD or with the // SYCLGlobalVar or SYCLGlobalVariableAllowed attribute. else if (IsRuntimeEvaluated && !IsEsimdPrivateGlobal && !IsConst && VD->hasGlobalStorage() && !VD->hasAttr() && - !isTypeDecoratedWithDeclAttribute( - VD->getType())) - SYCLDiagIfDeviceCode(*Locs.begin(), diag::err_sycl_restrict) - << Sema::KernelGlobalVariable; + !SemaSYCL::isTypeDecoratedWithDeclAttribute< + SYCLGlobalVariableAllowedAttr>(VD->getType())) + SYCL().DiagIfDeviceCode(*Locs.begin(), diag::err_sycl_restrict) + << SemaSYCL::KernelGlobalVariable; // ESIMD globals cannot be used in a SYCL context. else if (IsRuntimeEvaluated && IsEsimdPrivateGlobal && VD->hasGlobalStorage()) - SYCLDiagIfDeviceCode(*Locs.begin(), + SYCL().DiagIfDeviceCode(*Locs.begin(), diag::err_esimd_global_in_sycl_context, Sema::DeviceDiagnosticReason::Sycl); } else if (auto *FDecl = dyn_cast(D)) { @@ -288,7 +291,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, !Id->getName().starts_with("__devicelib_ConvertFToBF16INTEL") && !Id->getName().starts_with("__assert_fail") && !isMsvcMathFn(Id->getName())) { - SYCLDiagIfDeviceCode( + SYCL().DiagIfDeviceCode( *Locs.begin(), diag::err_sycl_device_function_is_called_from_esimd, Sema::DeviceDiagnosticReason::Esimd); } @@ -339,8 +342,11 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, Diag(Loc, diag::err_deleted_inherited_ctor_use) << Ctor->getParent() << Ctor->getInheritedConstructor().getConstructor()->getParent(); - else - Diag(Loc, diag::err_deleted_function_use); + else { + StringLiteral *Msg = FD->getDeletedMessage(); + Diag(Loc, diag::err_deleted_function_use) + << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef()); + } NoteDeletedFunction(FD); return true; } @@ -375,7 +381,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, DeduceReturnType(FD, Loc)) return true; - if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD)) + if (getLangOpts().CUDA && !CUDA().CheckCall(Loc, FD)) return true; } @@ -423,9 +429,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef Locs, // at the same location. // [OpenMP 5.2] Also allow iterator declared variables. if (LangOpts.OpenMP && isa(D) && - !isOpenMPDeclareMapperVarDeclAllowed(cast(D))) { + !OpenMP().isOpenMPDeclareMapperVarDeclAllowed(cast(D))) { Diag(Loc, diag::err_omp_declare_mapper_wrong_var) - << getOpenMPDeclareMapperVarName(); + << OpenMP().getOpenMPDeclareMapperVarName(); Diag(D->getLocation(), diag::note_entity_declared_at) << D; return true; } @@ -2330,7 +2336,7 @@ NonOdrUseReason Sema::getNonOdrUseReasonInCurrentContext(ValueDecl *D) { // be loaded from the captured. if (VarDecl *VD = dyn_cast(D)) { if (VD->getType()->isReferenceType() && - !(getLangOpts().OpenMP && isOpenMPCapturedDecl(D)) && + !(getLangOpts().OpenMP && OpenMP().isOpenMPCapturedDecl(D)) && !isCapturingReferenceToHostVarInCUDADeviceLambda(*this, VD) && VD->isUsableInConstantExpressions(Context)) return NOUR_Constant; @@ -3510,11 +3516,10 @@ static bool ShouldLookupResultBeMultiVersionOverload(const LookupResult &R) { ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, - bool AcceptInvalidDecl, - bool NeedUnresolved) { + bool AcceptInvalidDecl) { // If this is a single, fully-resolved result and we don't need ADL, // just build an ordinary singleton decl ref. - if (!NeedUnresolved && !NeedsADL && R.isSingleResult() && + if (!NeedsADL && R.isSingleResult() && !R.getAsSingle() && !ShouldLookupResultBeMultiVersionOverload(R)) return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(), @@ -3863,65 +3868,6 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc, SL); } -ExprResult Sema::BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, - TypeSourceInfo *TSI) { - return SYCLUniqueStableNameExpr::Create(Context, OpLoc, LParen, RParen, TSI); -} - -ExprResult Sema::ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, - ParsedType ParsedTy) { - TypeSourceInfo *TSI = nullptr; - QualType Ty = GetTypeFromParser(ParsedTy, &TSI); - - if (Ty.isNull()) - return ExprError(); - if (!TSI) - TSI = Context.getTrivialTypeSourceInfo(Ty, LParen); - - return BuildSYCLUniqueStableNameExpr(OpLoc, LParen, RParen, TSI); -} - -ExprResult Sema::BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, Expr *E) { - if (!E->isInstantiationDependent()) { - // Special handling to get us better error messages for a member variable. - if (auto *ME = dyn_cast(E->IgnoreUnlessSpelledInSource())) { - if (isa(ME->getMemberDecl())) - Diag(E->getExprLoc(), diag::err_unique_stable_id_global_storage); - else - Diag(E->getExprLoc(), diag::err_unique_stable_id_expected_var); - return ExprError(); - } - - auto *DRE = dyn_cast(E->IgnoreUnlessSpelledInSource()); - - if (!DRE || !isa_and_nonnull(DRE->getDecl())) { - Diag(E->getExprLoc(), diag::err_unique_stable_id_expected_var); - return ExprError(); - } - - auto *Var = cast(DRE->getDecl()); - - if (!Var->hasGlobalStorage()) { - Diag(E->getExprLoc(), diag::err_unique_stable_id_global_storage); - return ExprError(); - } - } - - return SYCLUniqueStableIdExpr::Create(Context, OpLoc, LParen, RParen, E); -} - -ExprResult Sema::ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc, - SourceLocation LParen, - SourceLocation RParen, Expr *E) { - return BuildSYCLUniqueStableIdExpr(OpLoc, LParen, RParen, E); -} - ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) { return BuildPredefinedExpr(Loc, getPredefinedExprKind(Kind)); } @@ -5203,9 +5149,10 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, if (base && !base->getType().isNull() && base->hasPlaceholderType(BuiltinType::OMPArraySection)) - return ActOnOMPArraySectionExpr(base, lbLoc, ArgExprs.front(), SourceLocation(), - SourceLocation(), /*Length*/ nullptr, - /*Stride=*/nullptr, rbLoc); + return OpenMP().ActOnOMPArraySectionExpr(base, lbLoc, ArgExprs.front(), + SourceLocation(), SourceLocation(), + /*Length*/ nullptr, + /*Stride=*/nullptr, rbLoc); // Since this might be a postfix expression, get rid of ParenListExprs. if (isa(base)) { @@ -5477,558 +5424,6 @@ void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) { } } -ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, - Expr *LowerBound, - SourceLocation ColonLocFirst, - SourceLocation ColonLocSecond, - Expr *Length, Expr *Stride, - SourceLocation RBLoc) { - if (Base->hasPlaceholderType() && - !Base->hasPlaceholderType(BuiltinType::OMPArraySection)) { - ExprResult Result = CheckPlaceholderExpr(Base); - if (Result.isInvalid()) - return ExprError(); - Base = Result.get(); - } - if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) { - ExprResult Result = CheckPlaceholderExpr(LowerBound); - if (Result.isInvalid()) - return ExprError(); - Result = DefaultLvalueConversion(Result.get()); - if (Result.isInvalid()) - return ExprError(); - LowerBound = Result.get(); - } - if (Length && Length->getType()->isNonOverloadPlaceholderType()) { - ExprResult Result = CheckPlaceholderExpr(Length); - if (Result.isInvalid()) - return ExprError(); - Result = DefaultLvalueConversion(Result.get()); - if (Result.isInvalid()) - return ExprError(); - Length = Result.get(); - } - if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) { - ExprResult Result = CheckPlaceholderExpr(Stride); - if (Result.isInvalid()) - return ExprError(); - Result = DefaultLvalueConversion(Result.get()); - if (Result.isInvalid()) - return ExprError(); - Stride = Result.get(); - } - - // Build an unanalyzed expression if either operand is type-dependent. - if (Base->isTypeDependent() || - (LowerBound && - (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) || - (Length && (Length->isTypeDependent() || Length->isValueDependent())) || - (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) { - return new (Context) OMPArraySectionExpr( - Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue, - OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); - } - - // Perform default conversions. - QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base); - QualType ResultTy; - if (OriginalTy->isAnyPointerType()) { - ResultTy = OriginalTy->getPointeeType(); - } else if (OriginalTy->isArrayType()) { - ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType(); - } else { - return ExprError( - Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value) - << Base->getSourceRange()); - } - // C99 6.5.2.1p1 - if (LowerBound) { - auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(), - LowerBound); - if (Res.isInvalid()) - return ExprError(Diag(LowerBound->getExprLoc(), - diag::err_omp_typecheck_section_not_integer) - << 0 << LowerBound->getSourceRange()); - LowerBound = Res.get(); - - if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || - LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) - Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char) - << 0 << LowerBound->getSourceRange(); - } - if (Length) { - auto Res = - PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length); - if (Res.isInvalid()) - return ExprError(Diag(Length->getExprLoc(), - diag::err_omp_typecheck_section_not_integer) - << 1 << Length->getSourceRange()); - Length = Res.get(); - - if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || - Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) - Diag(Length->getExprLoc(), diag::warn_omp_section_is_char) - << 1 << Length->getSourceRange(); - } - if (Stride) { - ExprResult Res = - PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride); - if (Res.isInvalid()) - return ExprError(Diag(Stride->getExprLoc(), - diag::err_omp_typecheck_section_not_integer) - << 1 << Stride->getSourceRange()); - Stride = Res.get(); - - if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || - Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) - Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char) - << 1 << Stride->getSourceRange(); - } - - // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly, - // C++ [expr.sub]p1: The type "T" shall be a completely-defined object - // type. Note that functions are not objects, and that (in C99 parlance) - // incomplete types are not object types. - if (ResultTy->isFunctionType()) { - Diag(Base->getExprLoc(), diag::err_omp_section_function_type) - << ResultTy << Base->getSourceRange(); - return ExprError(); - } - - if (RequireCompleteType(Base->getExprLoc(), ResultTy, - diag::err_omp_section_incomplete_type, Base)) - return ExprError(); - - if (LowerBound && !OriginalTy->isAnyPointerType()) { - Expr::EvalResult Result; - if (LowerBound->EvaluateAsInt(Result, Context)) { - // OpenMP 5.0, [2.1.5 Array Sections] - // The array section must be a subset of the original array. - llvm::APSInt LowerBoundValue = Result.Val.getInt(); - if (LowerBoundValue.isNegative()) { - Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array) - << LowerBound->getSourceRange(); - return ExprError(); - } - } - } - - if (Length) { - Expr::EvalResult Result; - if (Length->EvaluateAsInt(Result, Context)) { - // OpenMP 5.0, [2.1.5 Array Sections] - // The length must evaluate to non-negative integers. - llvm::APSInt LengthValue = Result.Val.getInt(); - if (LengthValue.isNegative()) { - Diag(Length->getExprLoc(), diag::err_omp_section_length_negative) - << toString(LengthValue, /*Radix=*/10, /*Signed=*/true) - << Length->getSourceRange(); - return ExprError(); - } - } - } else if (ColonLocFirst.isValid() && - (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() && - !OriginalTy->isVariableArrayType()))) { - // OpenMP 5.0, [2.1.5 Array Sections] - // When the size of the array dimension is not known, the length must be - // specified explicitly. - Diag(ColonLocFirst, diag::err_omp_section_length_undefined) - << (!OriginalTy.isNull() && OriginalTy->isArrayType()); - return ExprError(); - } - - if (Stride) { - Expr::EvalResult Result; - if (Stride->EvaluateAsInt(Result, Context)) { - // OpenMP 5.0, [2.1.5 Array Sections] - // The stride must evaluate to a positive integer. - llvm::APSInt StrideValue = Result.Val.getInt(); - if (!StrideValue.isStrictlyPositive()) { - Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive) - << toString(StrideValue, /*Radix=*/10, /*Signed=*/true) - << Stride->getSourceRange(); - return ExprError(); - } - } - } - - if (!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) { - ExprResult Result = DefaultFunctionArrayLvalueConversion(Base); - if (Result.isInvalid()) - return ExprError(); - Base = Result.get(); - } - return new (Context) OMPArraySectionExpr( - Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue, - OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); -} - -ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, - SourceLocation RParenLoc, - ArrayRef Dims, - ArrayRef Brackets) { - if (Base->hasPlaceholderType()) { - ExprResult Result = CheckPlaceholderExpr(Base); - if (Result.isInvalid()) - return ExprError(); - Result = DefaultLvalueConversion(Result.get()); - if (Result.isInvalid()) - return ExprError(); - Base = Result.get(); - } - QualType BaseTy = Base->getType(); - // Delay analysis of the types/expressions if instantiation/specialization is - // required. - if (!BaseTy->isPointerType() && Base->isTypeDependent()) - return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base, - LParenLoc, RParenLoc, Dims, Brackets); - if (!BaseTy->isPointerType() || - (!Base->isTypeDependent() && - BaseTy->getPointeeType()->isIncompleteType())) - return ExprError(Diag(Base->getExprLoc(), - diag::err_omp_non_pointer_type_array_shaping_base) - << Base->getSourceRange()); - - SmallVector NewDims; - bool ErrorFound = false; - for (Expr *Dim : Dims) { - if (Dim->hasPlaceholderType()) { - ExprResult Result = CheckPlaceholderExpr(Dim); - if (Result.isInvalid()) { - ErrorFound = true; - continue; - } - Result = DefaultLvalueConversion(Result.get()); - if (Result.isInvalid()) { - ErrorFound = true; - continue; - } - Dim = Result.get(); - } - if (!Dim->isTypeDependent()) { - ExprResult Result = - PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim); - if (Result.isInvalid()) { - ErrorFound = true; - Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer) - << Dim->getSourceRange(); - continue; - } - Dim = Result.get(); - Expr::EvalResult EvResult; - if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) { - // OpenMP 5.0, [2.1.4 Array Shaping] - // Each si is an integral type expression that must evaluate to a - // positive integer. - llvm::APSInt Value = EvResult.Val.getInt(); - if (!Value.isStrictlyPositive()) { - Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive) - << toString(Value, /*Radix=*/10, /*Signed=*/true) - << Dim->getSourceRange(); - ErrorFound = true; - continue; - } - } - } - NewDims.push_back(Dim); - } - if (ErrorFound) - return ExprError(); - return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base, - LParenLoc, RParenLoc, NewDims, Brackets); -} - -ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, - SourceLocation LLoc, SourceLocation RLoc, - ArrayRef Data) { - SmallVector ID; - bool IsCorrect = true; - for (const OMPIteratorData &D : Data) { - TypeSourceInfo *TInfo = nullptr; - SourceLocation StartLoc; - QualType DeclTy; - if (!D.Type.getAsOpaquePtr()) { - // OpenMP 5.0, 2.1.6 Iterators - // In an iterator-specifier, if the iterator-type is not specified then - // the type of that iterator is of int type. - DeclTy = Context.IntTy; - StartLoc = D.DeclIdentLoc; - } else { - DeclTy = GetTypeFromParser(D.Type, &TInfo); - StartLoc = TInfo->getTypeLoc().getBeginLoc(); - } - - bool IsDeclTyDependent = DeclTy->isDependentType() || - DeclTy->containsUnexpandedParameterPack() || - DeclTy->isInstantiationDependentType(); - if (!IsDeclTyDependent) { - if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) { - // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ - // The iterator-type must be an integral or pointer type. - Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) - << DeclTy; - IsCorrect = false; - continue; - } - if (DeclTy.isConstant(Context)) { - // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ - // The iterator-type must not be const qualified. - Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) - << DeclTy; - IsCorrect = false; - continue; - } - } - - // Iterator declaration. - assert(D.DeclIdent && "Identifier expected."); - // Always try to create iterator declarator to avoid extra error messages - // about unknown declarations use. - auto *VD = VarDecl::Create(Context, CurContext, StartLoc, D.DeclIdentLoc, - D.DeclIdent, DeclTy, TInfo, SC_None); - VD->setImplicit(); - if (S) { - // Check for conflicting previous declaration. - DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc); - LookupResult Previous(*this, NameInfo, LookupOrdinaryName, - ForVisibleRedeclaration); - Previous.suppressDiagnostics(); - LookupName(Previous, S); - - FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage=*/false, - /*AllowInlineNamespace=*/false); - if (!Previous.empty()) { - NamedDecl *Old = Previous.getRepresentativeDecl(); - Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName(); - Diag(Old->getLocation(), diag::note_previous_definition); - } else { - PushOnScopeChains(VD, S); - } - } else { - CurContext->addDecl(VD); - } - - /// Act on the iterator variable declaration. - ActOnOpenMPIteratorVarDecl(VD); - - Expr *Begin = D.Range.Begin; - if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) { - ExprResult BeginRes = - PerformImplicitConversion(Begin, DeclTy, AA_Converting); - Begin = BeginRes.get(); - } - Expr *End = D.Range.End; - if (!IsDeclTyDependent && End && !End->isTypeDependent()) { - ExprResult EndRes = PerformImplicitConversion(End, DeclTy, AA_Converting); - End = EndRes.get(); - } - Expr *Step = D.Range.Step; - if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) { - if (!Step->getType()->isIntegralType(Context)) { - Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral) - << Step << Step->getSourceRange(); - IsCorrect = false; - continue; - } - std::optional Result = - Step->getIntegerConstantExpr(Context); - // OpenMP 5.0, 2.1.6 Iterators, Restrictions - // If the step expression of a range-specification equals zero, the - // behavior is unspecified. - if (Result && Result->isZero()) { - Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero) - << Step << Step->getSourceRange(); - IsCorrect = false; - continue; - } - } - if (!Begin || !End || !IsCorrect) { - IsCorrect = false; - continue; - } - OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back(); - IDElem.IteratorDecl = VD; - IDElem.AssignmentLoc = D.AssignLoc; - IDElem.Range.Begin = Begin; - IDElem.Range.End = End; - IDElem.Range.Step = Step; - IDElem.ColonLoc = D.ColonLoc; - IDElem.SecondColonLoc = D.SecColonLoc; - } - if (!IsCorrect) { - // Invalidate all created iterator declarations if error is found. - for (const OMPIteratorExpr::IteratorDefinition &D : ID) { - if (Decl *ID = D.IteratorDecl) - ID->setInvalidDecl(); - } - return ExprError(); - } - SmallVector Helpers; - if (!CurContext->isDependentContext()) { - // Build number of ityeration for each iteration range. - // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) : - // ((Begini-Stepi-1-Endi) / -Stepi); - for (OMPIteratorExpr::IteratorDefinition &D : ID) { - // (Endi - Begini) - ExprResult Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, D.Range.End, - D.Range.Begin); - if(!Res.isUsable()) { - IsCorrect = false; - continue; - } - ExprResult St, St1; - if (D.Range.Step) { - St = D.Range.Step; - // (Endi - Begini) + Stepi - Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), St.get()); - if (!Res.isUsable()) { - IsCorrect = false; - continue; - } - // (Endi - Begini) + Stepi - 1 - Res = - CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res.get(), - ActOnIntegerConstant(D.AssignmentLoc, 1).get()); - if (!Res.isUsable()) { - IsCorrect = false; - continue; - } - // ((Endi - Begini) + Stepi - 1) / Stepi - Res = CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), St.get()); - if (!Res.isUsable()) { - IsCorrect = false; - continue; - } - St1 = CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, D.Range.Step); - // (Begini - Endi) - ExprResult Res1 = CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, - D.Range.Begin, D.Range.End); - if (!Res1.isUsable()) { - IsCorrect = false; - continue; - } - // (Begini - Endi) - Stepi - Res1 = - CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), St1.get()); - if (!Res1.isUsable()) { - IsCorrect = false; - continue; - } - // (Begini - Endi) - Stepi - 1 - Res1 = - CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, Res1.get(), - ActOnIntegerConstant(D.AssignmentLoc, 1).get()); - if (!Res1.isUsable()) { - IsCorrect = false; - continue; - } - // ((Begini - Endi) - Stepi - 1) / (-Stepi) - Res1 = - CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), St1.get()); - if (!Res1.isUsable()) { - IsCorrect = false; - continue; - } - // Stepi > 0. - ExprResult CmpRes = - CreateBuiltinBinOp(D.AssignmentLoc, BO_GT, D.Range.Step, - ActOnIntegerConstant(D.AssignmentLoc, 0).get()); - if (!CmpRes.isUsable()) { - IsCorrect = false; - continue; - } - Res = ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, CmpRes.get(), - Res.get(), Res1.get()); - if (!Res.isUsable()) { - IsCorrect = false; - continue; - } - } - Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false); - if (!Res.isUsable()) { - IsCorrect = false; - continue; - } - - // Build counter update. - // Build counter. - auto *CounterVD = - VarDecl::Create(Context, CurContext, D.IteratorDecl->getBeginLoc(), - D.IteratorDecl->getBeginLoc(), nullptr, - Res.get()->getType(), nullptr, SC_None); - CounterVD->setImplicit(); - ExprResult RefRes = - BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue, - D.IteratorDecl->getBeginLoc()); - // Build counter update. - // I = Begini + counter * Stepi; - ExprResult UpdateRes; - if (D.Range.Step) { - UpdateRes = CreateBuiltinBinOp( - D.AssignmentLoc, BO_Mul, - DefaultLvalueConversion(RefRes.get()).get(), St.get()); - } else { - UpdateRes = DefaultLvalueConversion(RefRes.get()); - } - if (!UpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, D.Range.Begin, - UpdateRes.get()); - if (!UpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - ExprResult VDRes = - BuildDeclRefExpr(cast(D.IteratorDecl), - cast(D.IteratorDecl)->getType(), VK_LValue, - D.IteratorDecl->getBeginLoc()); - UpdateRes = CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, VDRes.get(), - UpdateRes.get()); - if (!UpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - UpdateRes = - ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true); - if (!UpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - ExprResult CounterUpdateRes = - CreateBuiltinUnaryOp(D.AssignmentLoc, UO_PreInc, RefRes.get()); - if (!CounterUpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - CounterUpdateRes = - ActOnFinishFullExpr(CounterUpdateRes.get(), /*DiscardedValue=*/true); - if (!CounterUpdateRes.isUsable()) { - IsCorrect = false; - continue; - } - OMPIteratorHelperData &HD = Helpers.emplace_back(); - HD.CounterVD = CounterVD; - HD.Upper = Res.get(); - HD.Update = UpdateRes.get(); - HD.CounterUpdate = CounterUpdateRes.get(); - } - } else { - Helpers.assign(ID.size(), {}); - } - if (!IsCorrect) { - // Invalidate all created iterator declarations if error is found. - for (const OMPIteratorExpr::IteratorDefinition &D : ID) { - if (Decl *ID = D.IteratorDecl) - ID->setInvalidDecl(); - } - return ExprError(); - } - return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc, - LLoc, RLoc, ID, Helpers); -} - ExprResult Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc) { @@ -6437,7 +5832,6 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc, // Pass down lifetime extending flag, and collect temporaries in // CreateMaterializeTemporaryExpr when we rewrite the call argument. keepInLifetimeExtendingContext(); - keepInMaterializeTemporaryObjectContext(); EnsureImmediateInvocationInDefaultArgs Immediate(*this); ExprResult Res; runWithSufficientStackSpace(CallLoc, [&] { @@ -7319,8 +6713,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, } if (LangOpts.OpenMP) - Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc, - ExecConfig); + Call = OpenMP().ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc, + ExecConfig); if (LangOpts.CPlusPlus) { if (const auto *CE = dyn_cast(Call.get())) DiagnosedUnqualifiedCallsToStdFunctions(*this, CE); @@ -7868,7 +7262,8 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, } if (CXXMethodDecl *Method = dyn_cast_or_null(FDecl)) - if (Method->isImplicitObjectMemberFunction()) + if (!isa(CurContext) && + Method->isImplicitObjectMemberFunction()) return ExprError(Diag(LParenLoc, diag::err_member_call_without_object) << Fn->getSourceRange() << 0); @@ -8009,7 +7404,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, if (!LiteralExpr->isTypeDependent() && !LiteralExpr->isValueDependent() && !literalType->isDependentType()) // C99 6.5.2.5p3 - if (CheckForConstantInitializer(LiteralExpr, literalType)) + if (CheckForConstantInitializer(LiteralExpr)) return ExprError(); } else if (literalType.getAddressSpace() != LangAS::opencl_private && literalType.getAddressSpace() != LangAS::Default) { @@ -10855,8 +10250,9 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS, // diagnostics and just checking for errors, e.g., during overload // resolution, return Incompatible to indicate the failure. if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && - CheckObjCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion, - Diagnose, DiagnoseCFAudited) != ACR_okay) { + CheckObjCConversion(SourceRange(), Ty, E, + CheckedConversionKind::Implicit, Diagnose, + DiagnoseCFAudited) != ACR_okay) { if (!Diagnose) return Incompatible; } @@ -13577,14 +12973,15 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS, Expr *E = LHS.get(); if (getLangOpts().ObjCAutoRefCount) CheckObjCConversion(SourceRange(), RHSType, E, - CCK_ImplicitConversion); + CheckedConversionKind::Implicit); LHS = ImpCastExprToType(E, RHSType, RPT ? CK_BitCast :CK_CPointerToObjCPointerCast); } else { Expr *E = RHS.get(); if (getLangOpts().ObjCAutoRefCount) - CheckObjCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion, + CheckObjCConversion(SourceRange(), LHSType, E, + CheckedConversionKind::Implicit, /*Diagnose=*/true, /*DiagnoseCFAudited=*/false, Opc); RHS = ImpCastExprToType(E, LHSType, @@ -15007,8 +14404,8 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op, return QualType(); } else if (ResType->isAnyComplexType()) { // C99 does not support ++/-- on complex types, we allow as an extension. - S.Diag(OpLoc, diag::ext_integer_increment_complex) - << ResType << Op->getSourceRange(); + S.Diag(OpLoc, diag::ext_increment_complex) + << IsInc << Op->getSourceRange(); } else if (ResType->isPlaceholderType()) { ExprResult PR = S.CheckPlaceholderExpr(Op); if (PR.isInvalid()) return QualType(); @@ -17455,8 +16852,9 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc, // CUDA device code does not support varargs. if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { if (const FunctionDecl *F = dyn_cast(CurContext)) { - CUDAFunctionTarget T = IdentifyCUDATarget(F); - if (T == CFT_Global || T == CFT_Device || T == CFT_HostDevice) + CUDAFunctionTarget T = CUDA().IdentifyTarget(F); + if (T == CUDAFunctionTarget::Global || T == CUDAFunctionTarget::Device || + T == CUDAFunctionTarget::HostDevice) return ExprError(Diag(E->getBeginLoc(), diag::err_va_arg_in_device)); } } @@ -18806,9 +18204,9 @@ void Sema::PopExpressionEvaluationContext() { // Append the collected materialized temporaries into previous context before // exit if the previous also is a lifetime extending context. auto &PrevRecord = ExprEvalContexts[ExprEvalContexts.size() - 2]; - if (getLangOpts().CPlusPlus23 && isInLifetimeExtendingContext() && - PrevRecord.InLifetimeExtendingContext && !ExprEvalContexts.empty()) { - auto &PrevRecord = ExprEvalContexts[ExprEvalContexts.size() - 2]; + if (getLangOpts().CPlusPlus23 && Rec.InLifetimeExtendingContext && + PrevRecord.InLifetimeExtendingContext && + !Rec.ForRangeLifetimeExtendTemps.empty()) { PrevRecord.ForRangeLifetimeExtendTemps.append( Rec.ForRangeLifetimeExtendTemps); } @@ -19108,7 +18506,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, checkSpecializationReachability(Loc, Func); if (getLangOpts().CUDA) - CheckCUDACall(Loc, Func); + CUDA().CheckCall(Loc, Func); // If we need a definition, try to create one. if (NeedDefinition && !Func->getBody()) { @@ -19255,7 +18653,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, // side. Therefore keep trying until it is recorded. if (LangOpts.OffloadImplicitHostDeviceTemplates && LangOpts.CUDAIsDevice && !getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Func)) - CUDARecordImplicitHostDeviceFuncUsedByDevice(Func); + CUDA().RecordImplicitHostDeviceFuncUsedByDevice(Func); // If this is the first "real" use, act on that. if (OdrUse == OdrUseContext::Used && !Func->isUsed(/*CheckUsedAttr=*/false)) { @@ -19320,7 +18718,7 @@ MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef, } QualType CaptureType, DeclRefType; if (SemaRef.LangOpts.OpenMP) - SemaRef.tryCaptureOpenMPLambdas(V); + SemaRef.OpenMP().tryCaptureOpenMPLambdas(V); SemaRef.tryCaptureVariable(V, Loc, Sema::TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/ true, CaptureType, @@ -19328,26 +18726,28 @@ MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef, if (SemaRef.LangOpts.CUDA && Var->hasGlobalStorage()) { auto *FD = dyn_cast_or_null(SemaRef.CurContext); - auto VarTarget = SemaRef.IdentifyCUDATarget(Var); - auto UserTarget = SemaRef.IdentifyCUDATarget(FD); - if (VarTarget == Sema::CVT_Host && - (UserTarget == Sema::CFT_Device || UserTarget == Sema::CFT_HostDevice || - UserTarget == Sema::CFT_Global)) { + auto VarTarget = SemaRef.CUDA().IdentifyTarget(Var); + auto UserTarget = SemaRef.CUDA().IdentifyTarget(FD); + if (VarTarget == SemaCUDA::CVT_Host && + (UserTarget == CUDAFunctionTarget::Device || + UserTarget == CUDAFunctionTarget::HostDevice || + UserTarget == CUDAFunctionTarget::Global)) { // Diagnose ODR-use of host global variables in device functions. // Reference of device global variables in host functions is allowed // through shadow variables therefore it is not diagnosed. if (SemaRef.LangOpts.CUDAIsDevice && !SemaRef.LangOpts.HIPStdPar) { SemaRef.targetDiag(Loc, diag::err_ref_bad_target) - << /*host*/ 2 << /*variable*/ 1 << Var << UserTarget; + << /*host*/ 2 << /*variable*/ 1 << Var + << llvm::to_underlying(UserTarget); SemaRef.targetDiag(Var->getLocation(), Var->getType().isConstQualified() ? diag::note_cuda_const_var_unpromoted : diag::note_cuda_host_var); } - } else if (VarTarget == Sema::CVT_Device && + } else if (VarTarget == SemaCUDA::CVT_Device && !Var->hasAttr() && - (UserTarget == Sema::CFT_Host || - UserTarget == Sema::CFT_HostDevice)) { + (UserTarget == CUDAFunctionTarget::Host || + UserTarget == CUDAFunctionTarget::HostDevice)) { // Record a CUDA/HIP device side variable if it is ODR-used // by host code. This is done conservatively, when the variable is // referenced in any of the following contexts: @@ -19599,7 +18999,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, ValueDecl *Var, const bool HasBlocksAttr = Var->hasAttr(); if (HasBlocksAttr || CaptureType->isReferenceType() || - (S.getLangOpts().OpenMP && S.isOpenMPCapturedDecl(Var))) { + (S.getLangOpts().OpenMP && S.OpenMP().isOpenMPCapturedDecl(Var))) { // Block capture by reference does not change the capture or // declaration reference types. ByRef = true; @@ -19629,7 +19029,7 @@ static bool captureInCapturedRegion( ByRef = (Kind == Sema::TryCapture_ExplicitByRef); } else if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) { // Using an LValue reference type is consistent with Lambdas (see below). - if (S.isOpenMPCapturedDecl(Var)) { + if (S.OpenMP().isOpenMPCapturedDecl(Var)) { bool HasConst = DeclRefType.isConstQualified(); DeclRefType = DeclRefType.getUnqualifiedType(); // Don't lose diagnostics about assignments to const. @@ -19637,11 +19037,11 @@ static bool captureInCapturedRegion( DeclRefType.addConst(); } // Do not capture firstprivates in tasks. - if (S.isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel) != - OMPC_unknown) + if (S.OpenMP().isOpenMPPrivateDecl(Var, RSI->OpenMPLevel, + RSI->OpenMPCaptureLevel) != OMPC_unknown) return true; - ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel, - RSI->OpenMPCaptureLevel); + ByRef = S.OpenMP().isOpenMPCapturedByRef(Var, RSI->OpenMPLevel, + RSI->OpenMPCaptureLevel); } if (ByRef) @@ -19902,9 +19302,9 @@ bool Sema::tryCaptureVariable( // Capture global variables if it is required to use private copy of this // variable. bool IsGlobal = !VD->hasLocalStorage(); - if (IsGlobal && - !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true, - MaxFunctionScopesIndex))) + if (IsGlobal && !(LangOpts.OpenMP && + OpenMP().isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true, + MaxFunctionScopesIndex))) return true; if (isa(Var)) @@ -20022,7 +19422,7 @@ bool Sema::tryCaptureVariable( } return true; } - OpenMPClauseKind IsOpenMPPrivateDecl = isOpenMPPrivateDecl( + OpenMPClauseKind IsOpenMPPrivateDecl = OpenMP().isOpenMPPrivateDecl( Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); // If the variable is private (i.e. not captured) and has variably // modified type, we still need to capture the type for correct @@ -20033,7 +19433,8 @@ bool Sema::tryCaptureVariable( QualType QTy = Var->getType(); if (ParmVarDecl *PVD = dyn_cast_or_null(Var)) QTy = PVD->getOriginalType(); - for (int I = 1, E = getNumberOfConstructScopes(RSI->OpenMPLevel); + for (int I = 1, + E = OpenMP().getNumberOfConstructScopes(RSI->OpenMPLevel); I < E; ++I) { auto *OuterRSI = cast( FunctionScopes[FunctionScopesIndex - I]); @@ -20045,18 +19446,19 @@ bool Sema::tryCaptureVariable( } bool IsTargetCap = IsOpenMPPrivateDecl != OMPC_private && - isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel, - RSI->OpenMPCaptureLevel); + OpenMP().isOpenMPTargetCapturedDecl(Var, RSI->OpenMPLevel, + RSI->OpenMPCaptureLevel); // Do not capture global if it is not privatized in outer regions. bool IsGlobalCap = - IsGlobal && isOpenMPGlobalCapturedDecl(Var, RSI->OpenMPLevel, - RSI->OpenMPCaptureLevel); + IsGlobal && OpenMP().isOpenMPGlobalCapturedDecl( + Var, RSI->OpenMPLevel, RSI->OpenMPCaptureLevel); // When we detect target captures we are looking from inside the // target region, therefore we need to propagate the capture from the // enclosing region. Therefore, the capture is not initially nested. if (IsTargetCap) - adjustOpenMPTargetScopeIndex(FunctionScopesIndex, RSI->OpenMPLevel); + OpenMP().adjustOpenMPTargetScopeIndex(FunctionScopesIndex, + RSI->OpenMPLevel); if (IsTargetCap || IsOpenMPPrivateDecl == OMPC_private || (IsGlobal && !IsGlobalCap)) { @@ -20878,8 +20280,8 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E, bool MightBeOdrUse, llvm::DenseMap &RefsMinusAssignments) { - if (SemaRef.isInOpenMPDeclareTargetContext()) - SemaRef.checkDeclIsAllowedInOpenMPTarget(E, D); + if (SemaRef.OpenMP().isInOpenMPDeclareTargetContext()) + SemaRef.OpenMP().checkDeclIsAllowedInOpenMPTarget(E, D); if (VarDecl *Var = dyn_cast(D)) { DoMarkVarDeclReferenced(SemaRef, Loc, Var, E, RefsMinusAssignments); diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index cc0b86716447b..8642fe142c113 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -38,12 +38,14 @@ #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaLambda.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateDeduction.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TypeSize.h" @@ -57,7 +59,7 @@ using namespace sema; /// name of the corresponding type. ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, - IdentifierInfo &Name) { + const IdentifierInfo &Name) { NestedNameSpecifier *NNS = SS.getScopeRep(); // Convert the nested-name-specifier into a type. @@ -89,10 +91,9 @@ ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS, Context.getTrivialTypeSourceInfo(Type, NameLoc)); } -ParsedType Sema::getConstructorName(IdentifierInfo &II, - SourceLocation NameLoc, - Scope *S, CXXScopeSpec &SS, - bool EnteringContext) { +ParsedType Sema::getConstructorName(const IdentifierInfo &II, + SourceLocation NameLoc, Scope *S, + CXXScopeSpec &SS, bool EnteringContext) { CXXRecordDecl *CurClass = getCurrentClass(S, &SS); assert(CurClass && &II == CurClass->getIdentifier() && "not a constructor name"); @@ -140,9 +141,9 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II, return ParsedType::make(T); } -ParsedType Sema::getDestructorName(IdentifierInfo &II, SourceLocation NameLoc, - Scope *S, CXXScopeSpec &SS, - ParsedType ObjectTypePtr, +ParsedType Sema::getDestructorName(const IdentifierInfo &II, + SourceLocation NameLoc, Scope *S, + CXXScopeSpec &SS, ParsedType ObjectTypePtr, bool EnteringContext) { // Determine where to perform name lookup. @@ -500,7 +501,7 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS, // // double operator""_Bq(long double); // OK: not a reserved identifier // double operator"" _Bq(long double); // ill-formed, no diagnostic required - IdentifierInfo *II = Name.Identifier; + const IdentifierInfo *II = Name.Identifier; ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts()); SourceLocation Loc = Name.getEndLoc(); if (!PP.getSourceManager().isInSystemHeader(Loc)) { @@ -884,13 +885,13 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, // Exceptions aren't allowed in CUDA device code. if (getLangOpts().CUDA) - CUDADiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions) - << "throw" << CurrentCUDATarget(); + CUDA().DiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions) + << "throw" << llvm::to_underlying(CUDA().CurrentTarget()); // Exceptions aren't allowed in SYCL device code. if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(OpLoc, diag::err_sycl_restrict) - << Sema::KernelUseExceptions; + SYCL().DiagIfDeviceCode(OpLoc, diag::err_sycl_restrict) + << SemaSYCL::KernelUseExceptions; if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw"; @@ -1420,42 +1421,26 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit, } ExprResult Sema::ActOnCXXThis(SourceLocation Loc) { - // C++20 [expr.prim.this]p1: - // The keyword this names a pointer to the object for which an - // implicit object member function is invoked or a non-static - // data member's initializer is evaluated. + /// C++ 9.3.2: In the body of a non-static member function, the keyword this + /// is a non-lvalue expression whose value is the address of the object for + /// which the function is called. QualType ThisTy = getCurrentThisType(); - if (CheckCXXThisType(Loc, ThisTy)) - return ExprError(); + if (ThisTy.isNull()) { + DeclContext *DC = getFunctionLevelDeclContext(); - return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false); -} + if (const auto *Method = dyn_cast(DC); + Method && Method->isExplicitObjectMemberFunction()) { + return Diag(Loc, diag::err_invalid_this_use) << 1; + } -bool Sema::CheckCXXThisType(SourceLocation Loc, QualType Type) { - if (!Type.isNull()) - return false; + if (isLambdaCallWithExplicitObjectParameter(CurContext)) + return Diag(Loc, diag::err_invalid_this_use) << 1; - // C++20 [expr.prim.this]p3: - // If a declaration declares a member function or member function template - // of a class X, the expression this is a prvalue of type - // "pointer to cv-qualifier-seq X" wherever X is the current class between - // the optional cv-qualifier-seq and the end of the function-definition, - // member-declarator, or declarator. It shall not appear within the - // declaration of either a static member function or an explicit object - // member function of the current class (although its type and value - // category are defined within such member functions as they are within - // an implicit object member function). - DeclContext *DC = getFunctionLevelDeclContext(); - if (const auto *Method = dyn_cast(DC); - Method && Method->isExplicitObjectMemberFunction()) { - Diag(Loc, diag::err_invalid_this_use) << 1; - } else if (isLambdaCallWithExplicitObjectParameter(CurContext)) { - Diag(Loc, diag::err_invalid_this_use) << 1; - } else { - Diag(Loc, diag::err_invalid_this_use) << 0; + return Diag(Loc, diag::err_invalid_this_use) << 0; } - return true; + + return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false); } Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type, @@ -1713,17 +1698,17 @@ bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) { // [CUDA] Ignore this function, if we can't call it. const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); if (getLangOpts().CUDA) { - auto CallPreference = IdentifyCUDAPreference(Caller, Method); + auto CallPreference = CUDA().IdentifyPreference(Caller, Method); // If it's not callable at all, it's not the right function. - if (CallPreference < CFP_WrongSide) + if (CallPreference < SemaCUDA::CFP_WrongSide) return false; - if (CallPreference == CFP_WrongSide) { + if (CallPreference == SemaCUDA::CFP_WrongSide) { // Maybe. We have to check if there are better alternatives. DeclContext::lookup_result R = Method->getDeclContext()->lookup(Method->getDeclName()); for (const auto *D : R) { if (const auto *FD = dyn_cast(D)) { - if (IdentifyCUDAPreference(Caller, FD) > CFP_WrongSide) + if (CUDA().IdentifyPreference(Caller, FD) > SemaCUDA::CFP_WrongSide) return false; } } @@ -1742,7 +1727,7 @@ bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) { return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) { assert(FD->getNumParams() == 1 && "Only single-operand functions should be in PreventedBy"); - return IdentifyCUDAPreference(Caller, FD) >= CFP_HostDevice; + return CUDA().IdentifyPreference(Caller, FD) >= SemaCUDA::CFP_HostDevice; }); } @@ -1779,7 +1764,7 @@ namespace { UsualDeallocFnInfo(Sema &S, DeclAccessPair Found) : Found(Found), FD(dyn_cast(Found->getUnderlyingDecl())), Destroying(false), HasSizeT(false), HasAlignValT(false), - CUDAPref(Sema::CFP_Native) { + CUDAPref(SemaCUDA::CFP_Native) { // A function template declaration is never a usual deallocation function. if (!FD) return; @@ -1805,7 +1790,7 @@ namespace { // In CUDA, determine how much we'd like / dislike to call this. if (S.getLangOpts().CUDA) - CUDAPref = S.IdentifyCUDAPreference( + CUDAPref = S.CUDA().IdentifyPreference( S.getCurFunctionDecl(/*AllowLambda=*/true), FD); } @@ -1836,7 +1821,7 @@ namespace { DeclAccessPair Found; FunctionDecl *FD; bool Destroying, HasSizeT, HasAlignValT; - Sema::CUDAFunctionPreference CUDAPref; + SemaCUDA::CUDAFunctionPreference CUDAPref; }; } @@ -1860,7 +1845,7 @@ static UsualDeallocFnInfo resolveDeallocationOverload( for (auto I = R.begin(), E = R.end(); I != E; ++I) { UsualDeallocFnInfo Info(S, I.getPair()); if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) || - Info.CUDAPref == Sema::CFP_Never) + Info.CUDAPref == SemaCUDA::CFP_Never) continue; if (!Best) { @@ -2535,8 +2520,8 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, MarkFunctionReferenced(StartLoc, OperatorNew); if (getLangOpts().SYCLIsDevice && OperatorNew->isReplaceableGlobalAllocationFunction()) - SYCLDiagIfDeviceCode(StartLoc, diag::err_sycl_restrict) - << KernelAllocateStorage; + SYCL().DiagIfDeviceCode(StartLoc, diag::err_sycl_restrict) + << SemaSYCL::KernelAllocateStorage; } if (OperatorDelete) { if (DiagnoseUseOfDecl(OperatorDelete, StartLoc)) @@ -2724,13 +2709,9 @@ static bool resolveAllocationOverload( return true; case OR_Deleted: { - if (Diagnose) { - Candidates.NoteCandidates( - PartialDiagnosticAt(R.getNameLoc(), - S.PDiag(diag::err_ovl_deleted_call) - << R.getLookupName() << Range), - S, OCD_AllCandidates, Args); - } + if (Diagnose) + S.DiagnoseUseOfDeletedFunction(R.getNameLoc(), Range, R.getLookupName(), + Candidates, Best->Function, Args); return true; } } @@ -2965,8 +2946,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, } if (getLangOpts().CUDA) - EraseUnwantedCUDAMatches(getCurFunctionDecl(/*AllowLambda=*/true), - Matches); + CUDA().EraseUnwantedMatches(getCurFunctionDecl(/*AllowLambda=*/true), + Matches); } else { // C++1y [expr.new]p22: // For a non-placement allocation function, the normal deallocation @@ -3384,7 +3365,9 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, // FIXME: DiagnoseUseOfDecl? if (Operator->isDeleted()) { if (Diagnose) { - Diag(StartLoc, diag::err_deleted_function_use); + StringLiteral *Msg = Operator->getDeletedMessage(); + Diag(StartLoc, diag::err_deleted_function_use) + << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef()); NoteDeletedFunction(Operator); } return true; @@ -3988,14 +3971,11 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall, S, OCD_AmbiguousCandidates, Args); return true; - case OR_Deleted: { - Candidates.NoteCandidates( - PartialDiagnosticAt(R.getNameLoc(), S.PDiag(diag::err_ovl_deleted_call) - << R.getLookupName() << Range), - S, OCD_AllCandidates, Args); + case OR_Deleted: + S.DiagnoseUseOfDeletedFunction(R.getNameLoc(), Range, R.getLookupName(), + Candidates, Best->Function, Args); return true; } - } llvm_unreachable("Unreachable, bad result from BestViableFunction"); } @@ -4279,7 +4259,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, CheckedConversionKind CCK) { // C++ [over.match.oper]p7: [...] operands of class type are converted [...] - if (CCK == CCK_ForBuiltinOverloadedOp && !From->getType()->isRecordType()) + if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp && + !From->getType()->isRecordType()) return From; switch (ICS.getKind()) { @@ -4340,7 +4321,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, // C++ [over.match.oper]p7: // [...] the second standard conversion sequence of a user-defined // conversion sequence is not applied. - if (CCK == CCK_ForBuiltinOverloadedOp) + if (CCK == CheckedConversionKind::ForBuiltinOverloadedOp) return From; return PerformImplicitConversion(From, ToType, ICS.UserDefined.After, @@ -4381,7 +4362,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK) { - bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast); + bool CStyle = (CCK == CheckedConversionKind::CStyleCast || + CCK == CheckedConversionKind::FunctionalCast); // Overall FIXME: we are recomputing too many types here and doing far too // much extra work. What this means is that we need to keep track of more @@ -5020,6 +5002,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, return From; } +/// Checks that type T is not a VLA. +/// +/// @returns @c true if @p T is VLA and a diagnostic was emitted, +/// @c false otherwise. +static bool DiagnoseVLAInCXXTypeTrait(Sema &S, const TypeSourceInfo *T, + clang::tok::TokenKind TypeTraitID) { + if (!T->getType()->isVariableArrayType()) + return false; + + S.Diag(T->getTypeLoc().getBeginLoc(), diag::err_vla_unsupported) + << 1 << TypeTraitID; + return true; +} + /// Check the completeness of a type in a unary type trait. /// /// If the particular type trait requires a complete type, tries to complete @@ -5196,7 +5192,9 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op, } static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, - SourceLocation KeyLoc, QualType T) { + SourceLocation KeyLoc, + TypeSourceInfo *TInfo) { + QualType T = TInfo->getType(); assert(!T->isDependentType() && "Cannot evaluate traits of dependent type"); ASTContext &C = Self.Context; @@ -5213,21 +5211,13 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, case UTT_IsArray: return T->isArrayType(); case UTT_IsBoundedArray: - if (!T->isVariableArrayType()) { - return T->isArrayType() && !T->isIncompleteArrayType(); - } - - Self.Diag(KeyLoc, diag::err_vla_unsupported) - << 1 << tok::kw___is_bounded_array; - return false; + if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_bounded_array)) + return false; + return T->isArrayType() && !T->isIncompleteArrayType(); case UTT_IsUnboundedArray: - if (!T->isVariableArrayType()) { - return T->isIncompleteArrayType(); - } - - Self.Diag(KeyLoc, diag::err_vla_unsupported) - << 1 << tok::kw___is_unbounded_array; - return false; + if (DiagnoseVLAInCXXTypeTrait(Self, TInfo, tok::kw___is_unbounded_array)) + return false; + return T->isIncompleteArrayType(); case UTT_IsPointer: return T->isAnyPointerType(); case UTT_IsNullPointer: @@ -5639,7 +5629,7 @@ static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind, return false; if (Kind <= UTT_Last) - return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType()); + return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]); // Evaluate ReferenceBindsToTemporary and ReferenceConstructsFromTemporary // alongside the IsConstructible traits to avoid duplication. @@ -6101,13 +6091,24 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, const TypeSourceI Self.RequireCompleteType(Rhs->getTypeLoc().getBeginLoc(), RhsT, diag::err_incomplete_type); - if (LhsT->isVariableArrayType()) - Self.Diag(Lhs->getTypeLoc().getBeginLoc(), diag::err_vla_unsupported) - << 1 << tok::kw___is_layout_compatible; - if (RhsT->isVariableArrayType()) - Self.Diag(Rhs->getTypeLoc().getBeginLoc(), diag::err_vla_unsupported) - << 1 << tok::kw___is_layout_compatible; + DiagnoseVLAInCXXTypeTrait(Self, Lhs, tok::kw___is_layout_compatible); + DiagnoseVLAInCXXTypeTrait(Self, Rhs, tok::kw___is_layout_compatible); + return Self.IsLayoutCompatible(LhsT, RhsT); + } + case BTT_IsPointerInterconvertibleBaseOf: { + if (LhsT->isStructureOrClassType() && RhsT->isStructureOrClassType() && + !Self.getASTContext().hasSameUnqualifiedType(LhsT, RhsT)) { + Self.RequireCompleteType(Rhs->getTypeLoc().getBeginLoc(), RhsT, + diag::err_incomplete_type); + } + + DiagnoseVLAInCXXTypeTrait(Self, Lhs, + tok::kw___is_pointer_interconvertible_base_of); + DiagnoseVLAInCXXTypeTrait(Self, Rhs, + tok::kw___is_pointer_interconvertible_base_of); + + return Self.IsPointerInterconvertibleBaseOf(Lhs, Rhs); } default: llvm_unreachable("not a BTT"); } @@ -8439,7 +8440,7 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) { // unnecessary temporary objects. If we skip this step, IR generation is // able to synthesize the storage for itself in the aggregate case, and // adding the extra node to the AST is just clutter. - if (isInMaterializeTemporaryObjectContext() && getLangOpts().CPlusPlus17 && + if (isInLifetimeExtendingContext() && getLangOpts().CPlusPlus17 && E->isPRValue() && !E->getType()->isVoidType()) { ExprResult Res = TemporaryMaterializationConversion(E); if (Res.isInvalid()) @@ -9161,7 +9162,7 @@ Sema::CheckMicrosoftIfExistsSymbol(Scope *S, // Do the redeclaration lookup in the current scope. LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName, - Sema::NotForRedeclaration); + RedeclarationKind::NotForRedeclaration); LookupParsedName(R, S, &SS); R.suppressDiagnostics(); @@ -9203,10 +9204,9 @@ concepts::Requirement *Sema::ActOnSimpleRequirement(Expr *E) { /*ReturnTypeRequirement=*/{}); } -concepts::Requirement * -Sema::ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS, - SourceLocation NameLoc, IdentifierInfo *TypeName, - TemplateIdAnnotation *TemplateId) { +concepts::Requirement *Sema::ActOnTypeRequirement( + SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, + const IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId) { assert(((!TypeName && TemplateId) || (TypeName && !TemplateId)) && "Exactly one of TypeName and TemplateId must be specified."); TypeSourceInfo *TSI = nullptr; diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp index 8cd2288d279cc..c79128bc8f39e 100644 --- a/clang/lib/Sema/SemaExprMember.cpp +++ b/clang/lib/Sema/SemaExprMember.cpp @@ -9,7 +9,6 @@ // This file implements semantic analysis member access expressions. // //===----------------------------------------------------------------------===// -#include "clang/Sema/Overload.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" @@ -18,9 +17,11 @@ #include "clang/AST/ExprObjC.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" +#include "clang/Sema/Overload.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaOpenMP.h" using namespace clang; using namespace sema; @@ -61,10 +62,6 @@ enum IMAKind { /// The reference is a contextually-permitted abstract member reference. IMA_Abstract, - /// Whether the context is static is dependent on the enclosing template (i.e. - /// in a dependent class scope explicit specialization). - IMA_Dependent, - /// The reference may be to an unresolved using declaration and the /// context is not an instance method. IMA_Unresolved_StaticOrExplicitContext, @@ -95,18 +92,10 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef, DeclContext *DC = SemaRef.getFunctionLevelDeclContext(); - bool couldInstantiateToStatic = false; - bool isStaticOrExplicitContext = SemaRef.CXXThisTypeOverride.isNull(); - - if (auto *MD = dyn_cast(DC)) { - if (MD->isImplicitObjectMemberFunction()) { - isStaticOrExplicitContext = false; - // A dependent class scope function template explicit specialization - // that is neither declared 'static' nor with an explicit object - // parameter could instantiate to a static or non-static member function. - couldInstantiateToStatic = MD->getDependentSpecializationInfo(); - } - } + bool isStaticOrExplicitContext = + SemaRef.CXXThisTypeOverride.isNull() && + (!isa(DC) || cast(DC)->isStatic() || + cast(DC)->isExplicitObjectMemberFunction()); if (R.isUnresolvableResult()) return isStaticOrExplicitContext ? IMA_Unresolved_StaticOrExplicitContext @@ -135,9 +124,6 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef, if (Classes.empty()) return IMA_Static; - if (couldInstantiateToStatic) - return IMA_Dependent; - // C++11 [expr.prim.general]p12: // An id-expression that denotes a non-static data member or non-static // member function of a class can only be used: @@ -283,30 +269,27 @@ ExprResult Sema::BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE) { - switch (IMAKind Classification = ClassifyImplicitMemberAccess(*this, R)) { + switch (ClassifyImplicitMemberAccess(*this, R)) { case IMA_Instance: + return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true, S); + case IMA_Mixed: case IMA_Mixed_Unrelated: case IMA_Unresolved: - return BuildImplicitMemberExpr( - SS, TemplateKWLoc, R, TemplateArgs, - /*IsKnownInstance=*/Classification == IMA_Instance, S); + return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false, + S); + case IMA_Field_Uneval_Context: Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use) << R.getLookupNameInfo().getName(); [[fallthrough]]; case IMA_Static: case IMA_Abstract: - case IMA_Dependent: case IMA_Mixed_StaticOrExplicitContext: case IMA_Unresolved_StaticOrExplicitContext: if (TemplateArgs || TemplateKWLoc.isValid()) - return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*RequiresADL=*/false, - TemplateArgs); - return AsULE ? AsULE - : BuildDeclarationNameExpr( - SS, R, /*NeedsADL=*/false, /*AcceptInvalidDecl=*/false, - /*NeedUnresolved=*/Classification == IMA_Dependent); + return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs); + return AsULE ? AsULE : BuildDeclarationNameExpr(SS, R, false); case IMA_Error_StaticOrExplicitContext: case IMA_Error_Unrelated: @@ -745,7 +728,7 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R, Sema &SemaRef; DeclarationNameInfo NameInfo; Sema::LookupNameKind LookupKind; - Sema::RedeclarationKind Redecl; + RedeclarationKind Redecl; }; QueryState Q = {R.getSema(), R.getLookupNameInfo(), R.getLookupKind(), R.redeclarationKind()}; @@ -1918,9 +1901,9 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, if (getLangOpts().OpenMP && IsArrow && !CurContext->isDependentContext() && isa(Base.get()->IgnoreParenImpCasts())) { - if (auto *PrivateCopy = isOpenMPCapturedDecl(Field)) { - return getOpenMPCapturedExpr(PrivateCopy, VK, OK, - MemberNameInfo.getLoc()); + if (auto *PrivateCopy = OpenMP().isOpenMPCapturedDecl(Field)) { + return OpenMP().getOpenMPCapturedExpr(PrivateCopy, VK, OK, + MemberNameInfo.getLoc()); } } diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp index a8853f634c9cc..b13a9d426983b 100644 --- a/clang/lib/Sema/SemaExprObjC.cpp +++ b/clang/lib/Sema/SemaExprObjC.cpp @@ -663,10 +663,8 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) { } if (!ValueWithBytesObjCTypeMethod) { - IdentifierInfo *II[] = { - &Context.Idents.get("valueWithBytes"), - &Context.Idents.get("objCType") - }; + const IdentifierInfo *II[] = {&Context.Idents.get("valueWithBytes"), + &Context.Idents.get("objCType")}; Selector ValueWithBytesObjCType = Context.Selectors.getSelector(2, II); // Look for the appropriate method within NSValue. @@ -2155,13 +2153,12 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, return ExprError(); } -ExprResult Sema:: -ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, - IdentifierInfo &propertyName, - SourceLocation receiverNameLoc, - SourceLocation propertyNameLoc) { +ExprResult Sema::ActOnClassPropertyRefExpr(const IdentifierInfo &receiverName, + const IdentifierInfo &propertyName, + SourceLocation receiverNameLoc, + SourceLocation propertyNameLoc) { - IdentifierInfo *receiverNamePtr = &receiverName; + const IdentifierInfo *receiverNamePtr = &receiverName; ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(receiverNamePtr, receiverNameLoc); @@ -3748,22 +3745,22 @@ bool Sema::isKnownName(StringRef name) { template static void addFixitForObjCARCConversion( - Sema &S, DiagBuilderT &DiagB, Sema::CheckedConversionKind CCK, + Sema &S, DiagBuilderT &DiagB, CheckedConversionKind CCK, SourceLocation afterLParen, QualType castType, Expr *castExpr, Expr *realCast, const char *bridgeKeyword, const char *CFBridgeName) { // We handle C-style and implicit casts here. switch (CCK) { - case Sema::CCK_ImplicitConversion: - case Sema::CCK_ForBuiltinOverloadedOp: - case Sema::CCK_CStyleCast: - case Sema::CCK_OtherCast: + case CheckedConversionKind::Implicit: + case CheckedConversionKind::ForBuiltinOverloadedOp: + case CheckedConversionKind::CStyleCast: + case CheckedConversionKind::OtherCast: break; - case Sema::CCK_FunctionalCast: + case CheckedConversionKind::FunctionalCast: return; } if (CFBridgeName) { - if (CCK == Sema::CCK_OtherCast) { + if (CCK == CheckedConversionKind::OtherCast) { if (const CXXNamedCastExpr *NCE = dyn_cast(realCast)) { SourceRange range(NCE->getOperatorLoc(), NCE->getAngleBrackets().getEnd()); @@ -3808,9 +3805,9 @@ static void addFixitForObjCARCConversion( return; } - if (CCK == Sema::CCK_CStyleCast) { + if (CCK == CheckedConversionKind::CStyleCast) { DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword)); - } else if (CCK == Sema::CCK_OtherCast) { + } else if (CCK == CheckedConversionKind::OtherCast) { if (const CXXNamedCastExpr *NCE = dyn_cast(realCast)) { std::string castCode = "("; castCode += bridgeKeyword; @@ -3869,12 +3866,12 @@ static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T, return nullptr; } -static void -diagnoseObjCARCConversion(Sema &S, SourceRange castRange, - QualType castType, ARCConversionTypeClass castACTC, - Expr *castExpr, Expr *realCast, - ARCConversionTypeClass exprACTC, - Sema::CheckedConversionKind CCK) { +static void diagnoseObjCARCConversion(Sema &S, SourceRange castRange, + QualType castType, + ARCConversionTypeClass castACTC, + Expr *castExpr, Expr *realCast, + ARCConversionTypeClass exprACTC, + CheckedConversionKind CCK) { SourceLocation loc = (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc()); @@ -3930,7 +3927,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange, assert(CreateRule != ACC_bottom && "This cast should already be accepted."); if (CreateRule != ACC_plusOne) { - auto DiagB = (CCK != Sema::CCK_OtherCast) + auto DiagB = (CCK != CheckedConversionKind::OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge) : S.Diag(noteLoc, diag::note_arc_cstyle_bridge); @@ -3940,7 +3937,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange, } if (CreateRule != ACC_plusZero) { - auto DiagB = (CCK == Sema::CCK_OtherCast && !br) + auto DiagB = (CCK == CheckedConversionKind::OtherCast && !br) ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer) << castExprType : S.Diag(br ? castExpr->getExprLoc() : noteLoc, @@ -3971,7 +3968,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange, assert(CreateRule != ACC_bottom && "This cast should already be accepted."); if (CreateRule != ACC_plusOne) { - auto DiagB = (CCK != Sema::CCK_OtherCast) + auto DiagB = (CCK != CheckedConversionKind::OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge) : S.Diag(noteLoc, diag::note_arc_cstyle_bridge); addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen, @@ -3980,7 +3977,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange, } if (CreateRule != ACC_plusZero) { - auto DiagB = (CCK == Sema::CCK_OtherCast && !br) + auto DiagB = (CCK == CheckedConversionKind::OtherCast && !br) ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained) << castType : S.Diag(br ? castExpr->getExprLoc() : noteLoc, @@ -4406,7 +4403,8 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType, // Check for viability and report error if casting an rvalue to a // life-time qualifier. if (castACTC == ACTC_retainable && - (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) && + (CCK == CheckedConversionKind::CStyleCast || + CCK == CheckedConversionKind::OtherCast) && castType != castExprType) { const Type *DT = castType.getTypePtr(); QualType QDT = castType; @@ -4520,11 +4518,11 @@ void Sema::diagnoseARCUnbridgedCast(Expr *e) { if (CStyleCastExpr *cast = dyn_cast(realCast)) { castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc()); castType = cast->getTypeAsWritten(); - CCK = CCK_CStyleCast; + CCK = CheckedConversionKind::CStyleCast; } else if (ExplicitCastExpr *cast = dyn_cast(realCast)) { castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange(); castType = cast->getTypeAsWritten(); - CCK = CCK_OtherCast; + CCK = CheckedConversionKind::OtherCast; } else { llvm_unreachable("Unexpected ImplicitCastExpr"); } diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 681849d6e6c8a..bb9e37f18d370 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -9,17 +9,25 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/SemaHLSL.h" +#include "clang/Basic/DiagnosticSema.h" +#include "clang/Basic/LLVM.h" +#include "clang/Basic/TargetInfo.h" #include "clang/Sema/Sema.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/TargetParser/Triple.h" +#include using namespace clang; SemaHLSL::SemaHLSL(Sema &S) : SemaBase(S) {} -Decl *SemaHLSL::ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer, - SourceLocation KwLoc, - IdentifierInfo *Ident, - SourceLocation IdentLoc, - SourceLocation LBrace) { +Decl *SemaHLSL::ActOnStartBuffer(Scope *BufferScope, bool CBuffer, + SourceLocation KwLoc, IdentifierInfo *Ident, + SourceLocation IdentLoc, + SourceLocation LBrace) { // For anonymous namespace, take the location of the left brace. DeclContext *LexicalParent = SemaRef.getCurLexicalContext(); HLSLBufferDecl *Result = HLSLBufferDecl::Create( @@ -31,8 +39,174 @@ Decl *SemaHLSL::ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer, return Result; } -void SemaHLSL::ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace) { +void SemaHLSL::ActOnFinishBuffer(Decl *Dcl, SourceLocation RBrace) { auto *BufDecl = cast(Dcl); BufDecl->setRBraceLoc(RBrace); SemaRef.PopDeclContext(); } + +HLSLNumThreadsAttr *SemaHLSL::mergeNumThreadsAttr(Decl *D, + const AttributeCommonInfo &AL, + int X, int Y, int Z) { + if (HLSLNumThreadsAttr *NT = D->getAttr()) { + if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) { + Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL; + Diag(AL.getLoc(), diag::note_conflicting_attribute); + } + return nullptr; + } + return ::new (getASTContext()) + HLSLNumThreadsAttr(getASTContext(), AL, X, Y, Z); +} + +HLSLShaderAttr * +SemaHLSL::mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL, + HLSLShaderAttr::ShaderType ShaderType) { + if (HLSLShaderAttr *NT = D->getAttr()) { + if (NT->getType() != ShaderType) { + Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL; + Diag(AL.getLoc(), diag::note_conflicting_attribute); + } + return nullptr; + } + return HLSLShaderAttr::Create(getASTContext(), ShaderType, AL); +} + +HLSLParamModifierAttr * +SemaHLSL::mergeParamModifierAttr(Decl *D, const AttributeCommonInfo &AL, + HLSLParamModifierAttr::Spelling Spelling) { + // We can only merge an `in` attribute with an `out` attribute. All other + // combinations of duplicated attributes are ill-formed. + if (HLSLParamModifierAttr *PA = D->getAttr()) { + if ((PA->isIn() && Spelling == HLSLParamModifierAttr::Keyword_out) || + (PA->isOut() && Spelling == HLSLParamModifierAttr::Keyword_in)) { + D->dropAttr(); + SourceRange AdjustedRange = {PA->getLocation(), AL.getRange().getEnd()}; + return HLSLParamModifierAttr::Create( + getASTContext(), /*MergedSpelling=*/true, AdjustedRange, + HLSLParamModifierAttr::Keyword_inout); + } + Diag(AL.getLoc(), diag::err_hlsl_duplicate_parameter_modifier) << AL; + Diag(PA->getLocation(), diag::note_conflicting_attribute); + return nullptr; + } + return HLSLParamModifierAttr::Create(getASTContext(), AL); +} + +void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) { + auto &TargetInfo = getASTContext().getTargetInfo(); + + if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry) + return; + + StringRef Env = TargetInfo.getTriple().getEnvironmentName(); + HLSLShaderAttr::ShaderType ShaderType; + if (HLSLShaderAttr::ConvertStrToShaderType(Env, ShaderType)) { + if (const auto *Shader = FD->getAttr()) { + // The entry point is already annotated - check that it matches the + // triple. + if (Shader->getType() != ShaderType) { + Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch) + << Shader; + FD->setInvalidDecl(); + } + } else { + // Implicitly add the shader attribute if the entry function isn't + // explicitly annotated. + FD->addAttr(HLSLShaderAttr::CreateImplicit(getASTContext(), ShaderType, + FD->getBeginLoc())); + } + } else { + switch (TargetInfo.getTriple().getEnvironment()) { + case llvm::Triple::UnknownEnvironment: + case llvm::Triple::Library: + break; + default: + llvm_unreachable("Unhandled environment in triple"); + } + } +} + +void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) { + const auto *ShaderAttr = FD->getAttr(); + assert(ShaderAttr && "Entry point has no shader attribute"); + HLSLShaderAttr::ShaderType ST = ShaderAttr->getType(); + + switch (ST) { + case HLSLShaderAttr::Pixel: + case HLSLShaderAttr::Vertex: + case HLSLShaderAttr::Geometry: + case HLSLShaderAttr::Hull: + case HLSLShaderAttr::Domain: + case HLSLShaderAttr::RayGeneration: + case HLSLShaderAttr::Intersection: + case HLSLShaderAttr::AnyHit: + case HLSLShaderAttr::ClosestHit: + case HLSLShaderAttr::Miss: + case HLSLShaderAttr::Callable: + if (const auto *NT = FD->getAttr()) { + DiagnoseAttrStageMismatch(NT, ST, + {HLSLShaderAttr::Compute, + HLSLShaderAttr::Amplification, + HLSLShaderAttr::Mesh}); + FD->setInvalidDecl(); + } + break; + + case HLSLShaderAttr::Compute: + case HLSLShaderAttr::Amplification: + case HLSLShaderAttr::Mesh: + if (!FD->hasAttr()) { + Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads) + << HLSLShaderAttr::ConvertShaderTypeToStr(ST); + FD->setInvalidDecl(); + } + break; + } + + for (ParmVarDecl *Param : FD->parameters()) { + if (const auto *AnnotationAttr = Param->getAttr()) { + CheckSemanticAnnotation(FD, Param, AnnotationAttr); + } else { + // FIXME: Handle struct parameters where annotations are on struct fields. + // See: https://github.com/llvm/llvm-project/issues/57875 + Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation); + Diag(Param->getLocation(), diag::note_previous_decl) << Param; + FD->setInvalidDecl(); + } + } + // FIXME: Verify return type semantic annotation. +} + +void SemaHLSL::CheckSemanticAnnotation( + FunctionDecl *EntryPoint, const Decl *Param, + const HLSLAnnotationAttr *AnnotationAttr) { + auto *ShaderAttr = EntryPoint->getAttr(); + assert(ShaderAttr && "Entry point has no shader attribute"); + HLSLShaderAttr::ShaderType ST = ShaderAttr->getType(); + + switch (AnnotationAttr->getKind()) { + case attr::HLSLSV_DispatchThreadID: + case attr::HLSLSV_GroupIndex: + if (ST == HLSLShaderAttr::Compute) + return; + DiagnoseAttrStageMismatch(AnnotationAttr, ST, {HLSLShaderAttr::Compute}); + break; + default: + llvm_unreachable("Unknown HLSLAnnotationAttr"); + } +} + +void SemaHLSL::DiagnoseAttrStageMismatch( + const Attr *A, HLSLShaderAttr::ShaderType Stage, + std::initializer_list AllowedStages) { + SmallVector StageStrings; + llvm::transform(AllowedStages, std::back_inserter(StageStrings), + [](HLSLShaderAttr::ShaderType ST) { + return StringRef( + HLSLShaderAttr::ConvertShaderTypeToStr(ST)); + }); + Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage) + << A << HLSLShaderAttr::ConvertShaderTypeToStr(Stage) + << (AllowedStages.size() != 1) << join(StageStrings, ", "); +} diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp index e42fd9b5a21a7..771a56c1bc21b 100644 --- a/clang/lib/Sema/SemaInit.cpp +++ b/clang/lib/Sema/SemaInit.cpp @@ -31,6 +31,7 @@ #include "llvm/ADT/APInt.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" @@ -848,7 +849,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity, } for (auto *Field : RDecl->fields()) { - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; if (hadError) @@ -1026,7 +1027,7 @@ int InitListChecker::numStructUnionElements(QualType DeclType) { if (auto *CXXRD = dyn_cast(structDecl)) InitializableMembers += CXXRD->getNumBases(); for (const auto *Field : structDecl->fields()) - if (!Field->isUnnamedBitfield()) + if (!Field->isUnnamedBitField()) ++InitializableMembers; if (structDecl->isUnion()) @@ -2174,7 +2175,7 @@ void InitListChecker::CheckStructUnionTypes( // bitfield. for (RecordDecl::field_iterator FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { - if (!Field->isUnnamedBitfield()) { + if (!Field->isUnnamedBitField()) { CheckEmptyInitializable( InitializedEntity::InitializeMember(*Field, &Entity), IList->getEndLoc()); @@ -2337,7 +2338,7 @@ void InitListChecker::CheckStructUnionTypes( if (Field->getType()->isIncompleteArrayType()) break; - if (Field->isUnnamedBitfield()) { + if (Field->isUnnamedBitField()) { // Don't initialize unnamed bitfields, e.g. "int : 20;" ++Field; continue; @@ -2397,7 +2398,7 @@ void InitListChecker::CheckStructUnionTypes( if (HasDesignatedInit && InitializedFields.count(*it)) continue; - if (!it->isUnnamedBitfield() && !it->hasInClassInitializer() && + if (!it->isUnnamedBitField() && !it->hasInClassInitializer() && !it->getType()->isIncompleteArrayType()) { auto Diag = HasDesignatedInit ? diag::warn_missing_designated_field_initializers @@ -2413,7 +2414,7 @@ void InitListChecker::CheckStructUnionTypes( if (!StructuredList && Field != FieldEnd && !RD->isUnion() && !Field->getType()->isIncompleteArrayType()) { for (; Field != FieldEnd && !hadError; ++Field) { - if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer()) + if (!Field->isUnnamedBitField() && !Field->hasInClassInitializer()) CheckEmptyInitializable( InitializedEntity::InitializeMember(*Field, &Entity), IList->getEndLoc()); @@ -2783,7 +2784,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity, unsigned FieldIndex = NumBases; for (auto *FI : RD->fields()) { - if (FI->isUnnamedBitfield()) + if (FI->isUnnamedBitField()) continue; if (declaresSameEntity(KnownField, FI)) { KnownField = FI; @@ -2857,7 +2858,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity, // Find the field that we just initialized. FieldDecl *PrevField = nullptr; for (auto FI = RD->field_begin(); FI != RD->field_end(); ++FI) { - if (FI->isUnnamedBitfield()) + if (FI->isUnnamedBitField()) continue; if (*NextField != RD->field_end() && declaresSameEntity(*FI, **NextField)) @@ -2975,7 +2976,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity, // If this the first designator, our caller will continue checking // the rest of this struct/class/union subobject. if (IsFirstDesignator) { - if (Field != RD->field_end() && Field->isUnnamedBitfield()) + if (Field != RD->field_end() && Field->isUnnamedBitField()) ++Field; if (NextField) @@ -5585,7 +5586,7 @@ static void TryOrBuildParenListInitialization( for (FieldDecl *FD : RD->fields()) { // Unnamed bitfields should not be initialized at all, either with an arg // or by default. - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; InitializedEntity SubEntity = @@ -7930,7 +7931,7 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path, for (const auto *I : RD->fields()) { if (Index >= ILE->getNumInits()) break; - if (I->isUnnamedBitfield()) + if (I->isUnnamedBitField()) continue; Expr *SubInit = ILE->getInit(Index); if (I->getType()->isReferenceType()) @@ -9057,11 +9058,11 @@ ExprResult InitializationSequence::Perform(Sema &S, } } - Sema::CheckedConversionKind CCK - = Kind.isCStyleCast()? Sema::CCK_CStyleCast - : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast - : Kind.isExplicitCast()? Sema::CCK_OtherCast - : Sema::CCK_ImplicitConversion; + CheckedConversionKind CCK = + Kind.isCStyleCast() ? CheckedConversionKind::CStyleCast + : Kind.isFunctionalCast() ? CheckedConversionKind::FunctionalCast + : Kind.isExplicitCast() ? CheckedConversionKind::OtherCast + : CheckedConversionKind::Implicit; ExprResult CurInitExprRes = S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS, getAssignmentAction(Entity), CCK); @@ -9542,7 +9543,7 @@ static bool DiagnoseUninitializedReference(Sema &S, SourceLocation Loc, return false; for (const auto *FI : RD->fields()) { - if (FI->isUnnamedBitfield()) + if (FI->isUnnamedBitField()) continue; if (DiagnoseUninitializedReference(S, FI->getLocation(), FI->getType())) { @@ -9773,12 +9774,15 @@ bool InitializationSequence::Diagnose(Sema &S, break; } case OR_Deleted: { - S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function) - << OnlyArg->getType() << DestType.getNonReferenceType() - << Args[0]->getSourceRange(); OverloadCandidateSet::iterator Best; OverloadingResult Ovl = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best); + + StringLiteral *Msg = Best->Function->getDeletedMessage(); + S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function) + << OnlyArg->getType() << DestType.getNonReferenceType() + << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef()) + << Args[0]->getSourceRange(); if (Ovl == OR_Deleted) { S.NoteDeletedFunction(Best->Function); } else { @@ -10034,11 +10038,15 @@ bool InitializationSequence::Diagnose(Sema &S, // implicit. if (S.isImplicitlyDeleted(Best->Function)) S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init) - << S.getSpecialMember(cast(Best->Function)) - << DestType << ArgsRange; - else - S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init) + << llvm::to_underlying( + S.getSpecialMember(cast(Best->Function))) << DestType << ArgsRange; + else { + StringLiteral *Msg = Best->Function->getDeletedMessage(); + S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init) + << DestType << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) << ArgsRange; + } S.NoteDeletedFunction(Best->Function); break; @@ -11070,6 +11078,9 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer( } case OR_Deleted: { + // FIXME: There are no tests for this diagnostic, and it doesn't seem + // like we ever get here; attempts to trigger this seem to yield a + // generic c'all to deleted function' diagnostic instead. Diag(Kind.getLocation(), diag::err_deduced_class_template_deleted) << TemplateName; NoteDeletedFunction(Best->Function); diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp index 73cd730af112f..ce861188550b1 100644 --- a/clang/lib/Sema/SemaLambda.cpp +++ b/clang/lib/Sema/SemaLambda.cpp @@ -9,17 +9,19 @@ // This file implements semantic analysis for C++ lambda expressions. // //===----------------------------------------------------------------------===// -#include "clang/Sema/DeclSpec.h" +#include "clang/Sema/SemaLambda.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprCXX.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Sema/DeclSpec.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" -#include "clang/Sema/SemaLambda.h" +#include "clang/Sema/SemaOpenMP.h" #include "clang/Sema/Template.h" #include "llvm/ADT/STLExtras.h" #include @@ -1393,11 +1395,11 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, // CUDA lambdas get implicit host and device attributes. if (getLangOpts().CUDA) - CUDASetLambdaAttrs(Method); + CUDA().SetLambdaAttrs(Method); // OpenMP lambdas might get assumumption attributes. if (LangOpts.OpenMP) - ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method); + OpenMP().ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method); handleLambdaNumbering(Class, Method); @@ -2149,7 +2151,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, CaptureInits.push_back(Init.get()); if (LangOpts.CUDA) - CUDACheckLambdaCapture(CallOperator, From); + CUDA().CheckLambdaCapture(CallOperator, From); } Class->setCaptures(Context, Captures); diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp index c0567f073a609..9355dcd1707c3 100644 --- a/clang/lib/Sema/SemaLookup.cpp +++ b/clang/lib/Sema/SemaLookup.cpp @@ -37,6 +37,7 @@ #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/TypoCorrection.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/ADT/edit_distance.h" @@ -3392,21 +3393,20 @@ void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, Functions.append(Operators.begin(), Operators.end()); } -Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, - CXXSpecialMember SM, - bool ConstArg, - bool VolatileArg, - bool RValueThis, - bool ConstThis, - bool VolatileThis) { +Sema::SpecialMemberOverloadResult +Sema::LookupSpecialMember(CXXRecordDecl *RD, CXXSpecialMemberKind SM, + bool ConstArg, bool VolatileArg, bool RValueThis, + bool ConstThis, bool VolatileThis) { assert(CanDeclareSpecialMemberFunction(RD) && "doing special member lookup into record that isn't fully complete"); RD = RD->getDefinition(); if (RValueThis || ConstThis || VolatileThis) - assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) && + assert((SM == CXXSpecialMemberKind::CopyAssignment || + SM == CXXSpecialMemberKind::MoveAssignment) && "constructors and destructors always have unqualified lvalue this"); if (ConstArg || VolatileArg) - assert((SM != CXXDefaultConstructor && SM != CXXDestructor) && + assert((SM != CXXSpecialMemberKind::DefaultConstructor && + SM != CXXSpecialMemberKind::Destructor) && "parameter-less special members can't have qualified arguments"); // FIXME: Get the caller to pass in a location for the lookup. @@ -3414,7 +3414,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, llvm::FoldingSetNodeID ID; ID.AddPointer(RD); - ID.AddInteger(SM); + ID.AddInteger(llvm::to_underlying(SM)); ID.AddInteger(ConstArg); ID.AddInteger(VolatileArg); ID.AddInteger(RValueThis); @@ -3433,7 +3433,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, Result = new (Result) SpecialMemberOverloadResultEntry(ID); SpecialMemberCache.InsertNode(Result, InsertPoint); - if (SM == CXXDestructor) { + if (SM == CXXSpecialMemberKind::Destructor) { if (RD->needsImplicitDestructor()) { runWithSufficientStackSpace(RD->getLocation(), [&] { DeclareImplicitDestructor(RD); @@ -3457,7 +3457,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, QualType ArgType = CanTy; ExprValueKind VK = VK_LValue; - if (SM == CXXDefaultConstructor) { + if (SM == CXXSpecialMemberKind::DefaultConstructor) { Name = Context.DeclarationNames.getCXXConstructorName(CanTy); NumArgs = 0; if (RD->needsImplicitDefaultConstructor()) { @@ -3466,7 +3466,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, }); } } else { - if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) { + if (SM == CXXSpecialMemberKind::CopyConstructor || + SM == CXXSpecialMemberKind::MoveConstructor) { Name = Context.DeclarationNames.getCXXConstructorName(CanTy); if (RD->needsImplicitCopyConstructor()) { runWithSufficientStackSpace(RD->getLocation(), [&] { @@ -3504,7 +3505,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, // Possibly an XValue is actually correct in the case of move, but // there is no semantic difference for class types in this restricted // case. - if (SM == CXXCopyConstructor || SM == CXXCopyAssignment) + if (SM == CXXSpecialMemberKind::CopyConstructor || + SM == CXXSpecialMemberKind::CopyAssignment) VK = VK_LValue; else VK = VK_PRValue; @@ -3512,7 +3514,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, OpaqueValueExpr FakeArg(LookupLoc, ArgType, VK); - if (SM != CXXDefaultConstructor) { + if (SM != CXXSpecialMemberKind::DefaultConstructor) { NumArgs = 1; Arg = &FakeArg; } @@ -3538,7 +3540,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, // type, rather than because there's some other declared constructor. // Every class has a copy/move constructor, copy/move assignment, and // destructor. - assert(SM == CXXDefaultConstructor && + assert(SM == CXXSpecialMemberKind::DefaultConstructor && "lookup for a constructor or assignment operator was empty"); Result->setMethod(nullptr); Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted); @@ -3556,7 +3558,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, DeclAccessPair Cand = DeclAccessPair::make(CandDecl, AS_public); auto CtorInfo = getConstructorInfo(Cand); if (CXXMethodDecl *M = dyn_cast(Cand->getUnderlyingDecl())) { - if (SM == CXXCopyAssignment || SM == CXXMoveAssignment) + if (SM == CXXSpecialMemberKind::CopyAssignment || + SM == CXXSpecialMemberKind::MoveAssignment) AddMethodCandidate(M, Cand, RD, ThisTy, Classification, llvm::ArrayRef(&Arg, NumArgs), OCS, true); else if (CtorInfo) @@ -3568,7 +3571,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, /*SuppressUserConversions*/ true); } else if (FunctionTemplateDecl *Tmpl = dyn_cast(Cand->getUnderlyingDecl())) { - if (SM == CXXCopyAssignment || SM == CXXMoveAssignment) + if (SM == CXXSpecialMemberKind::CopyAssignment || + SM == CXXSpecialMemberKind::MoveAssignment) AddMethodTemplateCandidate(Tmpl, Cand, RD, nullptr, ThisTy, Classification, llvm::ArrayRef(&Arg, NumArgs), OCS, true); @@ -3614,8 +3618,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD, /// Look up the default constructor for the given class. CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) { SpecialMemberOverloadResult Result = - LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false, - false, false); + LookupSpecialMember(Class, CXXSpecialMemberKind::DefaultConstructor, + false, false, false, false, false); return cast_or_null(Result.getMethod()); } @@ -3625,9 +3629,9 @@ CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals) { assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy ctor arg"); - SpecialMemberOverloadResult Result = - LookupSpecialMember(Class, CXXCopyConstructor, Quals & Qualifiers::Const, - Quals & Qualifiers::Volatile, false, false, false); + SpecialMemberOverloadResult Result = LookupSpecialMember( + Class, CXXSpecialMemberKind::CopyConstructor, Quals & Qualifiers::Const, + Quals & Qualifiers::Volatile, false, false, false); return cast_or_null(Result.getMethod()); } @@ -3635,9 +3639,9 @@ CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class, /// Look up the moving constructor for the given class. CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals) { - SpecialMemberOverloadResult Result = - LookupSpecialMember(Class, CXXMoveConstructor, Quals & Qualifiers::Const, - Quals & Qualifiers::Volatile, false, false, false); + SpecialMemberOverloadResult Result = LookupSpecialMember( + Class, CXXSpecialMemberKind::MoveConstructor, Quals & Qualifiers::Const, + Quals & Qualifiers::Volatile, false, false, false); return cast_or_null(Result.getMethod()); } @@ -3669,11 +3673,10 @@ CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class, "non-const, non-volatile qualifiers for copy assignment arg"); assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy assignment this"); - SpecialMemberOverloadResult Result = - LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const, - Quals & Qualifiers::Volatile, RValueThis, - ThisQuals & Qualifiers::Const, - ThisQuals & Qualifiers::Volatile); + SpecialMemberOverloadResult Result = LookupSpecialMember( + Class, CXXSpecialMemberKind::CopyAssignment, Quals & Qualifiers::Const, + Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const, + ThisQuals & Qualifiers::Volatile); return Result.getMethod(); } @@ -3685,11 +3688,10 @@ CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class, unsigned ThisQuals) { assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy assignment this"); - SpecialMemberOverloadResult Result = - LookupSpecialMember(Class, CXXMoveAssignment, Quals & Qualifiers::Const, - Quals & Qualifiers::Volatile, RValueThis, - ThisQuals & Qualifiers::Const, - ThisQuals & Qualifiers::Volatile); + SpecialMemberOverloadResult Result = LookupSpecialMember( + Class, CXXSpecialMemberKind::MoveAssignment, Quals & Qualifiers::Const, + Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const, + ThisQuals & Qualifiers::Volatile); return Result.getMethod(); } @@ -3702,8 +3704,8 @@ CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class, /// \returns The destructor for this class. CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) { return cast_or_null( - LookupSpecialMember(Class, CXXDestructor, false, false, false, false, - false) + LookupSpecialMember(Class, CXXSpecialMemberKind::Destructor, false, false, + false, false, false) .getMethod()); } @@ -4498,7 +4500,8 @@ LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc, } // Not a GNU local label. - Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration); + Res = LookupSingleName(CurScope, II, Loc, LookupLabel, + RedeclarationKind::NotForRedeclaration); // If we found a label, check to see if it is in the same context as us. // When in a Block, we don't want to reuse a label in an enclosing function. if (Res && Res->getDeclContext() != CurContext) @@ -5938,7 +5941,8 @@ void Sema::clearDelayedTypo(TypoExpr *TE) { void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) { DeclarationNameInfo Name(II, IILoc); - LookupResult R(*this, Name, LookupAnyName, Sema::NotForRedeclaration); + LookupResult R(*this, Name, LookupAnyName, + RedeclarationKind::NotForRedeclaration); R.suppressDiagnostics(); R.setHideTags(false); LookupName(R, S); @@ -5948,3 +5952,13 @@ void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) { void Sema::ActOnPragmaDump(Expr *E) { E->dump(); } + +RedeclarationKind Sema::forRedeclarationInCurContext() const { + // A declaration with an owning module for linkage can never link against + // anything that is not visible. We don't need to check linkage here; if + // the context has internal linkage, redeclaration lookup won't find things + // from other TUs, and we can't safely compute linkage yet in general. + if (cast(CurContext)->getOwningModuleForLinkage(/*IgnoreLinkage*/ true)) + return RedeclarationKind::ForVisibleRedeclaration; + return RedeclarationKind::ForExternalRedeclaration; +} diff --git a/clang/lib/Sema/SemaModule.cpp b/clang/lib/Sema/SemaModule.cpp index 2ddf9d70263a0..67658c93ed3ba 100644 --- a/clang/lib/Sema/SemaModule.cpp +++ b/clang/lib/Sema/SemaModule.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTMutationListener.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/SemaInternal.h" @@ -475,6 +476,9 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, getASTContext().setCurrentNamedModule(Mod); + if (auto *Listener = getASTMutationListener()) + Listener->EnteringModulePurview(); + // We already potentially made an implicit import (in the case of a module // implementation unit importing its interface). Make this module visible // and return the import decl to be added to the current TU. diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp index f9e1ad0121e2a..222a65a13dd0b 100644 --- a/clang/lib/Sema/SemaObjCProperty.cpp +++ b/clang/lib/Sema/SemaObjCProperty.cpp @@ -419,7 +419,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl = cast(CurContext); // Diagnose if this property is already in continuation class. DeclContext *DC = CurContext; - IdentifierInfo *PropertyId = FD.D.getIdentifier(); + const IdentifierInfo *PropertyId = FD.D.getIdentifier(); ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface(); // We need to look in the @interface to see if the @property was @@ -571,7 +571,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, TypeSourceInfo *TInfo, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC){ - IdentifierInfo *PropertyId = FD.D.getIdentifier(); + const IdentifierInfo *PropertyId = FD.D.getIdentifier(); // Property defaults to 'assign' if it is readwrite, unless this is ARC // and the type is retainable. diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index 2ba1e49b5739d..59f65eaf47a6d 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -15,6 +15,7 @@ #include "clang/AST/StmtOpenACC.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Sema/Sema.h" +#include "llvm/Support/Casting.h" using namespace clang; @@ -39,12 +40,78 @@ bool diagnoseConstructAppertainment(SemaOpenACC &S, OpenACCDirectiveKind K, bool doesClauseApplyToDirective(OpenACCDirectiveKind DirectiveKind, OpenACCClauseKind ClauseKind) { - // FIXME: For each clause as we implement them, we can add the - // 'legalization' list here. + switch (ClauseKind) { + // FIXME: For each clause as we implement them, we can add the + // 'legalization' list here. + case OpenACCClauseKind::Default: + switch (DirectiveKind) { + case OpenACCDirectiveKind::Parallel: + case OpenACCDirectiveKind::Serial: + case OpenACCDirectiveKind::Kernels: + case OpenACCDirectiveKind::ParallelLoop: + case OpenACCDirectiveKind::SerialLoop: + case OpenACCDirectiveKind::KernelsLoop: + case OpenACCDirectiveKind::Data: + return true; + default: + return false; + } + case OpenACCClauseKind::If: + switch (DirectiveKind) { + case OpenACCDirectiveKind::Parallel: + case OpenACCDirectiveKind::Serial: + case OpenACCDirectiveKind::Kernels: + case OpenACCDirectiveKind::Data: + case OpenACCDirectiveKind::EnterData: + case OpenACCDirectiveKind::ExitData: + case OpenACCDirectiveKind::HostData: + case OpenACCDirectiveKind::Init: + case OpenACCDirectiveKind::Shutdown: + case OpenACCDirectiveKind::Set: + case OpenACCDirectiveKind::Update: + case OpenACCDirectiveKind::Wait: + case OpenACCDirectiveKind::ParallelLoop: + case OpenACCDirectiveKind::SerialLoop: + case OpenACCDirectiveKind::KernelsLoop: + return true; + default: + return false; + } + case OpenACCClauseKind::Self: + switch (DirectiveKind) { + case OpenACCDirectiveKind::Parallel: + case OpenACCDirectiveKind::Serial: + case OpenACCDirectiveKind::Kernels: + case OpenACCDirectiveKind::Update: + case OpenACCDirectiveKind::ParallelLoop: + case OpenACCDirectiveKind::SerialLoop: + case OpenACCDirectiveKind::KernelsLoop: + return true; + default: + return false; + } + default: + // Do nothing so we can go to the 'unimplemented' diagnostic instead. + return true; + } + llvm_unreachable("Invalid clause kind"); +} - // Do nothing so we can go to the 'unimplemented' diagnostic instead. - return true; +bool checkAlreadyHasClauseOfKind( + SemaOpenACC &S, ArrayRef ExistingClauses, + SemaOpenACC::OpenACCParsedClause &Clause) { + const auto *Itr = llvm::find_if(ExistingClauses, [&](const OpenACCClause *C) { + return C->getClauseKind() == Clause.getClauseKind(); + }); + if (Itr != ExistingClauses.end()) { + S.Diag(Clause.getBeginLoc(), diag::err_acc_duplicate_clause_disallowed) + << Clause.getDirectiveKind() << Clause.getClauseKind(); + S.Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here); + return true; + } + return false; } + } // namespace SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {} @@ -63,8 +130,97 @@ SemaOpenACC::ActOnClause(ArrayRef ExistingClauses, return nullptr; } - // TODO OpenACC: Switch over the clauses we implement here and 'create' - // them. + switch (Clause.getClauseKind()) { + case OpenACCClauseKind::Default: { + // Restrictions only properly implemented on 'compute' constructs, and + // 'compute' constructs are the only construct that can do anything with + // this yet, so skip/treat as unimplemented in this case. + if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind())) + break; + + // Don't add an invalid clause to the AST. + if (Clause.getDefaultClauseKind() == OpenACCDefaultClauseKind::Invalid) + return nullptr; + + // OpenACC 3.3, Section 2.5.4: + // At most one 'default' clause may appear, and it must have a value of + // either 'none' or 'present'. + // Second half of the sentence is diagnosed during parsing. + if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause)) + return nullptr; + + return OpenACCDefaultClause::Create( + getASTContext(), Clause.getDefaultClauseKind(), Clause.getBeginLoc(), + Clause.getLParenLoc(), Clause.getEndLoc()); + } + + case OpenACCClauseKind::If: { + // Restrictions only properly implemented on 'compute' constructs, and + // 'compute' constructs are the only construct that can do anything with + // this yet, so skip/treat as unimplemented in this case. + if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind())) + break; + + // There is no prose in the standard that says duplicates aren't allowed, + // but this diagnostic is present in other compilers, as well as makes + // sense. + if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause)) + return nullptr; + + // The parser has ensured that we have a proper condition expr, so there + // isn't really much to do here. + + // If the 'if' clause is true, it makes the 'self' clause have no effect, + // diagnose that here. + // TODO OpenACC: When we add these two to other constructs, we might not + // want to warn on this (for example, 'update'). + const auto *Itr = + llvm::find_if(ExistingClauses, llvm::IsaPred); + if (Itr != ExistingClauses.end()) { + Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict); + Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here); + } + + return OpenACCIfClause::Create( + getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(), + Clause.getConditionExpr(), Clause.getEndLoc()); + } + + case OpenACCClauseKind::Self: { + // Restrictions only properly implemented on 'compute' constructs, and + // 'compute' constructs are the only construct that can do anything with + // this yet, so skip/treat as unimplemented in this case. + if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind())) + break; + + // TODO OpenACC: When we implement this for 'update', this takes a + // 'var-list' instead of a condition expression, so semantics/handling has + // to happen differently here. + + // There is no prose in the standard that says duplicates aren't allowed, + // but this diagnostic is present in other compilers, as well as makes + // sense. + if (checkAlreadyHasClauseOfKind(*this, ExistingClauses, Clause)) + return nullptr; + + // If the 'if' clause is true, it makes the 'self' clause have no effect, + // diagnose that here. + // TODO OpenACC: When we add these two to other constructs, we might not + // want to warn on this (for example, 'update'). + const auto *Itr = + llvm::find_if(ExistingClauses, llvm::IsaPred); + if (Itr != ExistingClauses.end()) { + Diag(Clause.getBeginLoc(), diag::warn_acc_if_self_conflict); + Diag((*Itr)->getBeginLoc(), diag::note_acc_previous_clause_here); + } + + return OpenACCSelfClause::Create( + getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(), + Clause.getConditionExpr(), Clause.getEndLoc()); + } + default: + break; + } Diag(Clause.getBeginLoc(), diag::warn_acc_clause_unimplemented) << Clause.getClauseKind(); diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index 256430f469f99..fb231e7da7d62 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -11,6 +11,7 @@ /// //===----------------------------------------------------------------------===// +#include "clang/Sema/SemaOpenMP.h" #include "TreeTransform.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" @@ -33,6 +34,7 @@ #include "clang/Sema/ParsedAttr.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/Sema.h" #include "clang/Sema/SemaInternal.h" #include "llvm/ADT/IndexedMap.h" #include "llvm/ADT/PointerEmbeddedInt.h" @@ -1808,9 +1810,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, return DVar; } const_iterator End = end(); - if (!SemaRef.isOpenMPCapturedByRef(D, - std::distance(ParentIterTarget, End), - /*OpenMPCaptureLevel=*/0)) { + if (!SemaRef.OpenMP().isOpenMPCapturedByRef( + D, std::distance(ParentIterTarget, End), + /*OpenMPCaptureLevel=*/0)) { DVar.RefExpr = buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(), IterTarget->ConstructLoc); @@ -2018,22 +2020,22 @@ bool DSAStackTy::hasDirective( return false; } -void Sema::InitDataSharingAttributesStack() { - VarDataSharingAttributesStack = new DSAStackTy(*this); +void SemaOpenMP::InitDataSharingAttributesStack() { + VarDataSharingAttributesStack = new DSAStackTy(SemaRef); } #define DSAStack static_cast(VarDataSharingAttributesStack) -void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); } +void SemaOpenMP::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); } -void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) { +void SemaOpenMP::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) { DSAStack->popFunction(OldFSI); } static bool isOpenMPDeviceDelayedContext(Sema &S) { assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice && "Expected OpenMP device compilation."); - return !S.isInOpenMPTargetExecutionDirective(); + return !S.OpenMP().isInOpenMPTargetExecutionDirective(); } namespace { @@ -2045,20 +2047,20 @@ enum class FunctionEmissionStatus { }; } // anonymous namespace -Sema::SemaDiagnosticBuilder -Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, - const FunctionDecl *FD) { - assert(LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice && +SemaBase::SemaDiagnosticBuilder +SemaOpenMP::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, + const FunctionDecl *FD) { + assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && "Expected OpenMP device compilation."); SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop; if (FD) { - FunctionEmissionStatus FES = getEmissionStatus(FD); + Sema::FunctionEmissionStatus FES = SemaRef.getEmissionStatus(FD); switch (FES) { - case FunctionEmissionStatus::Emitted: + case Sema::FunctionEmissionStatus::Emitted: Kind = SemaDiagnosticBuilder::K_Immediate; break; - case FunctionEmissionStatus::Unknown: + case Sema::FunctionEmissionStatus::Unknown: // TODO: We should always delay diagnostics here in case a target // region is in a function we do not emit. However, as the // current diagnostics are associated with the function containing @@ -2066,49 +2068,49 @@ Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, // on diagnostics for the target region itself. We need to anchor // the diagnostics with the new generated function *or* ensure we // emit diagnostics associated with the surrounding function. - Kind = isOpenMPDeviceDelayedContext(*this) + Kind = isOpenMPDeviceDelayedContext(SemaRef) ? SemaDiagnosticBuilder::K_Deferred : SemaDiagnosticBuilder::K_Immediate; break; - case FunctionEmissionStatus::TemplateDiscarded: - case FunctionEmissionStatus::OMPDiscarded: + case Sema::FunctionEmissionStatus::TemplateDiscarded: + case Sema::FunctionEmissionStatus::OMPDiscarded: Kind = SemaDiagnosticBuilder::K_Nop; break; - case FunctionEmissionStatus::CUDADiscarded: + case Sema::FunctionEmissionStatus::CUDADiscarded: llvm_unreachable("CUDADiscarded unexpected in OpenMP device compilation"); break; } } - return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this, + return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef, DeviceDiagnosticReason::OmpDevice); } -Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc, - unsigned DiagID, - const FunctionDecl *FD) { - assert(LangOpts.OpenMP && !LangOpts.OpenMPIsTargetDevice && +SemaBase::SemaDiagnosticBuilder +SemaOpenMP::diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, + const FunctionDecl *FD) { + assert(getLangOpts().OpenMP && !getLangOpts().OpenMPIsTargetDevice && "Expected OpenMP host compilation."); SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop; if (FD) { - FunctionEmissionStatus FES = getEmissionStatus(FD); + Sema::FunctionEmissionStatus FES = SemaRef.getEmissionStatus(FD); switch (FES) { - case FunctionEmissionStatus::Emitted: + case Sema::FunctionEmissionStatus::Emitted: Kind = SemaDiagnosticBuilder::K_Immediate; break; - case FunctionEmissionStatus::Unknown: + case Sema::FunctionEmissionStatus::Unknown: Kind = SemaDiagnosticBuilder::K_Deferred; break; - case FunctionEmissionStatus::TemplateDiscarded: - case FunctionEmissionStatus::OMPDiscarded: - case FunctionEmissionStatus::CUDADiscarded: + case Sema::FunctionEmissionStatus::TemplateDiscarded: + case Sema::FunctionEmissionStatus::OMPDiscarded: + case Sema::FunctionEmissionStatus::CUDADiscarded: Kind = SemaDiagnosticBuilder::K_Nop; break; } } - return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this, + return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, SemaRef, DeviceDiagnosticReason::OmpHost); } @@ -2126,9 +2128,9 @@ getVariableCategoryFromDecl(const LangOptions &LO, const ValueDecl *VD) { return OMPC_DEFAULTMAP_aggregate; } -bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, - unsigned OpenMPCaptureLevel) const { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +bool SemaOpenMP::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, + unsigned OpenMPCaptureLevel) const { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); ASTContext &Ctx = getASTContext(); bool IsByRef = true; @@ -2254,7 +2256,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, !Ty->isAnyPointerType()) || !Ty->isScalarType() || DSAStack->isDefaultmapCapturedByRef( - Level, getVariableCategoryFromDecl(LangOpts, D)) || + Level, getVariableCategoryFromDecl(getLangOpts(), D)) || DSAStack->hasExplicitDSA( D, [](OpenMPClauseKind K, bool AppliedToPointee) { @@ -2305,17 +2307,17 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, return IsByRef; } -unsigned Sema::getOpenMPNestingLevel() const { +unsigned SemaOpenMP::getOpenMPNestingLevel() const { assert(getLangOpts().OpenMP); return DSAStack->getNestingLevel(); } -bool Sema::isInOpenMPTaskUntiedContext() const { +bool SemaOpenMP::isInOpenMPTaskUntiedContext() const { return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) && DSAStack->isUntiedRegion(); } -bool Sema::isInOpenMPTargetExecutionDirective() const { +bool SemaOpenMP::isInOpenMPTargetExecutionDirective() const { return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) && !DSAStack->isClauseParsingMode()) || DSAStack->hasDirective( @@ -2326,7 +2328,7 @@ bool Sema::isInOpenMPTargetExecutionDirective() const { false); } -bool Sema::isOpenMPRebuildMemberExpr(ValueDecl *D) { +bool SemaOpenMP::isOpenMPRebuildMemberExpr(ValueDecl *D) { // Only rebuild for Field. if (!dyn_cast(D)) return false; @@ -2349,9 +2351,9 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id, DeclContext *CurContext, bool AsExpression); -VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, - unsigned StopAt) { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +VarDecl *SemaOpenMP::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, + unsigned StopAt) { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); D = getCanonicalDecl(D); auto *VD = dyn_cast(D); @@ -2370,7 +2372,8 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, // 'target' we return true so that this global is also mapped to the device. // if (VD && !VD->hasLocalStorage() && - (getCurCapturedRegion() || getCurBlock() || getCurLambda())) { + (SemaRef.getCurCapturedRegion() || SemaRef.getCurBlock() || + SemaRef.getCurLambda())) { if (isInOpenMPTargetExecutionDirective()) { DSAStackTy::DSAVarData DVarTop = DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode()); @@ -2383,8 +2386,9 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, return nullptr; CapturedRegionScopeInfo *CSI = nullptr; for (FunctionScopeInfo *FSI : llvm::drop_begin( - llvm::reverse(FunctionScopes), - CheckScopeInfo ? (FunctionScopes.size() - (StopAt + 1)) : 0)) { + llvm::reverse(SemaRef.FunctionScopes), + CheckScopeInfo ? (SemaRef.FunctionScopes.size() - (StopAt + 1)) + : 0)) { if (!isa(FSI)) return nullptr; if (auto *RSI = dyn_cast(FSI)) @@ -2403,7 +2407,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, if (isInOpenMPDeclareTargetContext()) { // Try to mark variable as declare target if it is used in capturing // regions. - if (LangOpts.OpenMP <= 45 && + if (getLangOpts().OpenMP <= 45 && !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) checkDeclIsAllowedInOpenMPTarget(nullptr, VD); return nullptr; @@ -2413,7 +2417,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, if (CheckScopeInfo) { bool OpenMPFound = false; for (unsigned I = StopAt + 1; I > 0; --I) { - FunctionScopeInfo *FSI = FunctionScopes[I - 1]; + FunctionScopeInfo *FSI = SemaRef.FunctionScopes[I - 1]; if (!isa(FSI)) return nullptr; if (auto *RSI = dyn_cast(FSI)) @@ -2478,22 +2482,23 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD); if (VD) return VD; - if (getCurrentThisType().isNull()) + if (SemaRef.getCurrentThisType().isNull()) return nullptr; - Expr *ThisExpr = BuildCXXThisExpr(SourceLocation(), getCurrentThisType(), - /*IsImplicit=*/true); + Expr *ThisExpr = SemaRef.BuildCXXThisExpr(SourceLocation(), + SemaRef.getCurrentThisType(), + /*IsImplicit=*/true); const CXXScopeSpec CS = CXXScopeSpec(); - Expr *ME = BuildMemberExpr(ThisExpr, /*IsArrow=*/true, SourceLocation(), - NestedNameSpecifierLoc(), SourceLocation(), FD, - DeclAccessPair::make(FD, FD->getAccess()), - /*HadMultipleCandidates=*/false, - DeclarationNameInfo(), FD->getType(), - VK_LValue, OK_Ordinary); + Expr *ME = SemaRef.BuildMemberExpr( + ThisExpr, /*IsArrow=*/true, SourceLocation(), + NestedNameSpecifierLoc(), SourceLocation(), FD, + DeclAccessPair::make(FD, FD->getAccess()), + /*HadMultipleCandidates=*/false, DeclarationNameInfo(), FD->getType(), + VK_LValue, OK_Ordinary); OMPCapturedExprDecl *CD = buildCaptureDecl( - *this, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private, - CurContext->getParent(), /*AsExpression=*/false); + SemaRef, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private, + SemaRef.CurContext->getParent(), /*AsExpression=*/false); DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr( - *this, CD, CD->getType().getNonReferenceType(), SourceLocation()); + SemaRef, CD, CD->getType().getNonReferenceType(), SourceLocation()); VD = cast(VDPrivateRefExpr->getDecl()); DSAStack->addImplicitDefaultFirstprivateFD(FD, VD); return VD; @@ -2507,28 +2512,28 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, return nullptr; } -void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, - unsigned Level) const { +void SemaOpenMP::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, + unsigned Level) const { FunctionScopesIndex -= getOpenMPCaptureLevels(DSAStack->getDirective(Level)); } -void Sema::startOpenMPLoop() { - assert(LangOpts.OpenMP && "OpenMP must be enabled."); +void SemaOpenMP::startOpenMPLoop() { + assert(getLangOpts().OpenMP && "OpenMP must be enabled."); if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) DSAStack->loopInit(); } -void Sema::startOpenMPCXXRangeFor() { - assert(LangOpts.OpenMP && "OpenMP must be enabled."); +void SemaOpenMP::startOpenMPCXXRangeFor() { + assert(getLangOpts().OpenMP && "OpenMP must be enabled."); if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) { DSAStack->resetPossibleLoopCounter(); DSAStack->loopStart(); } } -OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, - unsigned CapLevel) const { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +OpenMPClauseKind SemaOpenMP::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, + unsigned CapLevel) const { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); if (DSAStack->getCurrentDirective() != OMPD_unknown && (!DSAStack->isClauseParsingMode() || DSAStack->getParentDirective() != OMPD_unknown)) { @@ -2548,7 +2553,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, } if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) { bool IsTriviallyCopyable = - D->getType().getNonReferenceType().isTriviallyCopyableType(Context) && + D->getType().getNonReferenceType().isTriviallyCopyableType( + getASTContext()) && !D->getType() .getNonReferenceType() .getCanonicalType() @@ -2622,9 +2628,9 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, : OMPC_unknown; } -void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, - unsigned Level) { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +void SemaOpenMP::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, + unsigned Level) { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); D = getCanonicalDecl(D); OpenMPClauseKind OMPC = OMPC_unknown; for (unsigned I = DSAStack->getNestingLevel() + 1; I > Level; --I) { @@ -2651,18 +2657,19 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, NewLevel)) { OMPC = OMPC_map; if (DSAStack->mustBeFirstprivateAtLevel( - NewLevel, getVariableCategoryFromDecl(LangOpts, D))) + NewLevel, getVariableCategoryFromDecl(getLangOpts(), D))) OMPC = OMPC_firstprivate; break; } } if (OMPC != OMPC_unknown) - FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, unsigned(OMPC))); + FD->addAttr( + OMPCaptureKindAttr::CreateImplicit(getASTContext(), unsigned(OMPC))); } -bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, - unsigned CaptureLevel) const { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +bool SemaOpenMP::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, + unsigned CaptureLevel) const { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); // Return true if the current level is no longer enclosed in a target region. SmallVector Regions; @@ -2674,9 +2681,9 @@ bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, Regions[CaptureLevel] != OMPD_task; } -bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, - unsigned CaptureLevel) const { - assert(LangOpts.OpenMP && "OpenMP is not allowed"); +bool SemaOpenMP::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, + unsigned CaptureLevel) const { + assert(getLangOpts().OpenMP && "OpenMP is not allowed"); // Return true if the current level is no longer enclosed in a target region. if (const auto *VD = dyn_cast(D)) { @@ -2704,37 +2711,37 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, return true; } -void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; } +void SemaOpenMP::DestroyDataSharingAttributesStack() { delete DSAStack; } -void Sema::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, - OMPTraitInfo &TI) { +void SemaOpenMP::ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, + OMPTraitInfo &TI) { OMPDeclareVariantScopes.push_back(OMPDeclareVariantScope(TI)); } -void Sema::ActOnOpenMPEndDeclareVariant() { +void SemaOpenMP::ActOnOpenMPEndDeclareVariant() { assert(isInOpenMPDeclareVariantScope() && "Not in OpenMP declare variant scope!"); OMPDeclareVariantScopes.pop_back(); } -void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, - const FunctionDecl *Callee, - SourceLocation Loc) { - assert(LangOpts.OpenMP && "Expected OpenMP compilation mode."); +void SemaOpenMP::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, + const FunctionDecl *Callee, + SourceLocation Loc) { + assert(getLangOpts().OpenMP && "Expected OpenMP compilation mode."); std::optional DevTy = OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl()); // Ignore host functions during device analyzis. - if (LangOpts.OpenMPIsTargetDevice && + if (getLangOpts().OpenMPIsTargetDevice && (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host)) return; // Ignore nohost functions during host analyzis. - if (!LangOpts.OpenMPIsTargetDevice && DevTy && + if (!getLangOpts().OpenMPIsTargetDevice && DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) return; const FunctionDecl *FD = Callee->getMostRecentDecl(); DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD); - if (LangOpts.OpenMPIsTargetDevice && DevTy && + if (getLangOpts().OpenMPIsTargetDevice && DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_Host) { // Diagnose host function called during device codegen. StringRef HostDevTy = @@ -2745,8 +2752,9 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, << HostDevTy; return; } - if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.OpenMPOffloadMandatory && - DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) { + if (!getLangOpts().OpenMPIsTargetDevice && + !getLangOpts().OpenMPOffloadMandatory && DevTy && + *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) { // In OpenMP 5.2 or later, if the function has a host variant then allow // that to be called instead auto &&HasHostAttr = [](const FunctionDecl *Callee) { @@ -2775,21 +2783,21 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, } } -void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind, - const DeclarationNameInfo &DirName, - Scope *CurScope, SourceLocation Loc) { +void SemaOpenMP::StartOpenMPDSABlock(OpenMPDirectiveKind DKind, + const DeclarationNameInfo &DirName, + Scope *CurScope, SourceLocation Loc) { DSAStack->push(DKind, DirName, CurScope, Loc); - PushExpressionEvaluationContext( - ExpressionEvaluationContext::PotentiallyEvaluated); + SemaRef.PushExpressionEvaluationContext( + Sema::ExpressionEvaluationContext::PotentiallyEvaluated); } -void Sema::StartOpenMPClause(OpenMPClauseKind K) { +void SemaOpenMP::StartOpenMPClause(OpenMPClauseKind K) { DSAStack->setClauseParsingMode(K); } -void Sema::EndOpenMPClause() { +void SemaOpenMP::EndOpenMPClause() { DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown); - CleanupVarDeclMarking(); + SemaRef.CleanupVarDeclMarking(); } static std::pair @@ -2873,7 +2881,7 @@ static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack, const DSAStackTy::DSAVarData &DVar, bool IsLoopIterVar = false); -void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { +void SemaOpenMP::EndOpenMPDSABlock(Stmt *CurDirective) { // OpenMP [2.14.3.5, Restrictions, C/C++, p.1] // A variable of class type (or array thereof) that appears in a lastprivate // clause requires an accessible, unambiguous default constructor for the @@ -2900,15 +2908,15 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { // variable is not added to IdResolver, so the code in the OpenMP // region uses original variable for proper diagnostics. VarDecl *VDPrivate = buildVarDecl( - *this, DE->getExprLoc(), Type.getUnqualifiedType(), + SemaRef, DE->getExprLoc(), Type.getUnqualifiedType(), VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE); - ActOnUninitializedDecl(VDPrivate); + SemaRef.ActOnUninitializedDecl(VDPrivate); if (VDPrivate->isInvalidDecl()) { PrivateCopies.push_back(nullptr); continue; } PrivateCopies.push_back(buildDeclRefExpr( - *this, VDPrivate, DE->getType(), DE->getExprLoc())); + SemaRef, VDPrivate, DE->getType(), DE->getExprLoc())); } else { // The variable is also a firstprivate, so initialization sequence // for private copy is generated already. @@ -2926,7 +2934,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) // It will be analyzed later. PrivateRefs.push_back(RefExpr); @@ -2979,7 +2987,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { diag::err_omp_allocator_used_in_clauses) << D.Allocator->getSourceRange(); if (DVar.RefExpr) - reportOriginalDsa(*this, DSAStack, VD, DVar); + reportOriginalDsa(SemaRef, DSAStack, VD, DVar); else Diag(MapExpr->getExprLoc(), diag::note_used_here) << MapExpr->getSourceRange(); @@ -2989,14 +2997,14 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { } } // Check allocate clauses. - if (!CurContext->isDependentContext()) - checkAllocateClauses(*this, DSAStack, D->clauses()); - checkReductionClauses(*this, DSAStack, D->clauses()); + if (!SemaRef.CurContext->isDependentContext()) + checkAllocateClauses(SemaRef, DSAStack, D->clauses()); + checkReductionClauses(SemaRef, DSAStack, D->clauses()); } DSAStack->pop(); - DiscardCleanupsInEvaluationContext(); - PopExpressionEvaluationContext(); + SemaRef.DiscardCleanupsInEvaluationContext(); + SemaRef.PopExpressionEvaluationContext(); } static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, @@ -3049,27 +3057,28 @@ class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback { } // namespace -ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, - CXXScopeSpec &ScopeSpec, - const DeclarationNameInfo &Id, - OpenMPDirectiveKind Kind) { - LookupResult Lookup(*this, Id, LookupOrdinaryName); - LookupParsedName(Lookup, CurScope, &ScopeSpec, true); +ExprResult SemaOpenMP::ActOnOpenMPIdExpression(Scope *CurScope, + CXXScopeSpec &ScopeSpec, + const DeclarationNameInfo &Id, + OpenMPDirectiveKind Kind) { + ASTContext &Context = getASTContext(); + LookupResult Lookup(SemaRef, Id, Sema::LookupOrdinaryName); + SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec, true); if (Lookup.isAmbiguous()) return ExprError(); VarDecl *VD; if (!Lookup.isSingleResult()) { - VarDeclFilterCCC CCC(*this); + VarDeclFilterCCC CCC(SemaRef); if (TypoCorrection Corrected = - CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC, - CTK_ErrorRecovery)) { - diagnoseTypo(Corrected, - PDiag(Lookup.empty() - ? diag::err_undeclared_var_use_suggest - : diag::err_omp_expected_var_arg_suggest) - << Id.getName()); + SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr, + CCC, Sema::CTK_ErrorRecovery)) { + SemaRef.diagnoseTypo( + Corrected, + SemaRef.PDiag(Lookup.empty() ? diag::err_undeclared_var_use_suggest + : diag::err_omp_expected_var_arg_suggest) + << Id.getName()); VD = Corrected.getCorrectionDeclAs(); } else { Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use @@ -3103,7 +3112,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, // A threadprivate directive for file-scope variables must appear outside // any definition or declaration. if (CanonicalVD->getDeclContext()->isTranslationUnit() && - !getCurLexicalContext()->isTranslationUnit()) { + !SemaRef.getCurLexicalContext()->isTranslationUnit()) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(Kind) << VD; bool IsDecl = @@ -3118,7 +3127,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, // in the class definition, in the same scope in which the member // variables are declared. if (CanonicalVD->isStaticDataMember() && - !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) { + !CanonicalVD->getDeclContext()->Equals(SemaRef.getCurLexicalContext())) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(Kind) << VD; bool IsDecl = @@ -3133,8 +3142,9 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, // outside any definition or declaration other than the namespace // definition itself. if (CanonicalVD->getDeclContext()->isNamespace() && - (!getCurLexicalContext()->isFileContext() || - !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) { + (!SemaRef.getCurLexicalContext()->isFileContext() || + !SemaRef.getCurLexicalContext()->Encloses( + CanonicalVD->getDeclContext()))) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(Kind) << VD; bool IsDecl = @@ -3148,7 +3158,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, // A threadprivate directive for static block-scope variables must appear // in the scope of the variable and not in a nested scope. if (CanonicalVD->isLocalVarDecl() && CurScope && - !isDeclInScope(ND, getCurLexicalContext(), CurScope)) { + !SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(), CurScope)) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(Kind) << VD; bool IsDecl = @@ -3176,11 +3186,11 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, Id.getLoc(), ExprType, VK_LValue); } -Sema::DeclGroupPtrTy -Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc, - ArrayRef VarList) { +SemaOpenMP::DeclGroupPtrTy +SemaOpenMP::ActOnOpenMPThreadprivateDirective(SourceLocation Loc, + ArrayRef VarList) { if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) { - CurContext->addDecl(D); + SemaRef.CurContext->addDecl(D); return DeclGroupPtrTy::make(DeclGroupRef(D)); } return nullptr; @@ -3217,7 +3227,9 @@ class LocalVarRefChecker final } // namespace OMPThreadPrivateDecl * -Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef VarList) { +SemaOpenMP::CheckOMPThreadPrivateDecl(SourceLocation Loc, + ArrayRef VarList) { + ASTContext &Context = getASTContext(); SmallVector Vars; for (Expr *RefExpr : VarList) { auto *DE = cast(RefExpr); @@ -3237,8 +3249,8 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef VarList) { // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have an incomplete type. - if (RequireCompleteType(ILoc, VD->getType(), - diag::err_omp_threadprivate_incomplete_type)) { + if (SemaRef.RequireCompleteType( + ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) { continue; } @@ -3276,7 +3288,7 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef VarList) { // Check if initial value of threadprivate variable reference variable with // local storage (it is not supported by runtime). if (const Expr *Init = VD->getAnyInitializer()) { - LocalVarRefChecker Checker(*this); + LocalVarRefChecker Checker(SemaRef); if (Checker.Visit(Init)) continue; } @@ -3290,8 +3302,8 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef VarList) { } OMPThreadPrivateDecl *D = nullptr; if (!Vars.empty()) { - D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc, - Vars); + D = OMPThreadPrivateDecl::Create(Context, SemaRef.getCurLexicalContext(), + Loc, Vars); D->setAccess(AS_public); } return D; @@ -3397,10 +3409,9 @@ applyOMPAllocateAttribute(Sema &S, VarDecl *VD, ML->DeclarationMarkedOpenMPAllocate(VD, A); } -Sema::DeclGroupPtrTy -Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, - ArrayRef Clauses, - DeclContext *Owner) { +SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPAllocateDirective( + SourceLocation Loc, ArrayRef VarList, ArrayRef Clauses, + DeclContext *Owner) { assert(Clauses.size() <= 2 && "Expected at most two clauses."); Expr *Alignment = nullptr; Expr *Allocator = nullptr; @@ -3409,9 +3420,9 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, // allocate directives that appear in a target region must specify an // allocator clause unless a requires directive with the dynamic_allocators // clause is present in the same compilation unit. - if (LangOpts.OpenMPIsTargetDevice && + if (getLangOpts().OpenMPIsTargetDevice && !DSAStack->hasRequiresDeclWithClause()) - targetDiag(Loc, diag::err_expected_allocator_clause); + SemaRef.targetDiag(Loc, diag::err_expected_allocator_clause); } else { for (const OMPClause *C : Clauses) if (const auto *AC = dyn_cast(C)) @@ -3422,7 +3433,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, llvm_unreachable("Unexpected clause on allocate directive"); } OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind = - getAllocatorKind(*this, DSAStack, Allocator); + getAllocatorKind(SemaRef, DSAStack, Allocator); SmallVector Vars; for (Expr *RefExpr : VarList) { auto *DE = cast(RefExpr); @@ -3437,7 +3448,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, // If the used several times in the allocate directive, the same allocator // must be used. - if (checkPreviousOMPAllocateAttribute(*this, DSAStack, RefExpr, VD, + if (checkPreviousOMPAllocateAttribute(SemaRef, DSAStack, RefExpr, VD, AllocatorKind, Allocator)) continue; @@ -3450,7 +3461,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, Diag(Allocator->getExprLoc(), diag::err_omp_expected_predefined_allocator) << Allocator->getSourceRange(); - bool IsDecl = VD->isThisDeclarationADefinition(Context) == + bool IsDecl = VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -3460,45 +3471,46 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef VarList, } Vars.push_back(RefExpr); - applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment, + applyOMPAllocateAttribute(SemaRef, VD, AllocatorKind, Allocator, Alignment, DE->getSourceRange()); } if (Vars.empty()) return nullptr; if (!Owner) - Owner = getCurLexicalContext(); - auto *D = OMPAllocateDecl::Create(Context, Owner, Loc, Vars, Clauses); + Owner = SemaRef.getCurLexicalContext(); + auto *D = OMPAllocateDecl::Create(getASTContext(), Owner, Loc, Vars, Clauses); D->setAccess(AS_public); Owner->addDecl(D); return DeclGroupPtrTy::make(DeclGroupRef(D)); } -Sema::DeclGroupPtrTy -Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc, - ArrayRef ClauseList) { +SemaOpenMP::DeclGroupPtrTy +SemaOpenMP::ActOnOpenMPRequiresDirective(SourceLocation Loc, + ArrayRef ClauseList) { OMPRequiresDecl *D = nullptr; - if (!CurContext->isFileContext()) { + if (!SemaRef.CurContext->isFileContext()) { Diag(Loc, diag::err_omp_invalid_scope) << "requires"; } else { D = CheckOMPRequiresDecl(Loc, ClauseList); if (D) { - CurContext->addDecl(D); + SemaRef.CurContext->addDecl(D); DSAStack->addRequiresDecl(D); } } return DeclGroupPtrTy::make(DeclGroupRef(D)); } -void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc, - OpenMPDirectiveKind DKind, - ArrayRef Assumptions, - bool SkippedClauses) { +void SemaOpenMP::ActOnOpenMPAssumesDirective(SourceLocation Loc, + OpenMPDirectiveKind DKind, + ArrayRef Assumptions, + bool SkippedClauses) { if (!SkippedClauses && Assumptions.empty()) Diag(Loc, diag::err_omp_no_clause_for_directive) << llvm::omp::getAllAssumeClauseOptions() << llvm::omp::getOpenMPDirectiveName(DKind); - auto *AA = OMPAssumeAttr::Create(Context, llvm::join(Assumptions, ","), Loc); + auto *AA = + OMPAssumeAttr::Create(getASTContext(), llvm::join(Assumptions, ","), Loc); if (DKind == llvm::omp::Directive::OMPD_begin_assumes) { OMPAssumeScoped.push_back(AA); return; @@ -3517,7 +3529,7 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc, // declarations in included headers. To this end, we traverse all existing // declaration contexts and annotate function declarations here. SmallVector DeclContexts; - auto *Ctx = CurContext; + auto *Ctx = SemaRef.CurContext; while (Ctx->getLexicalParent()) Ctx = Ctx->getLexicalParent(); DeclContexts.push_back(Ctx); @@ -3541,13 +3553,14 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc, } } -void Sema::ActOnOpenMPEndAssumesDirective() { +void SemaOpenMP::ActOnOpenMPEndAssumesDirective() { assert(isInOpenMPAssumeScope() && "Not in OpenMP assumes scope!"); OMPAssumeScoped.pop_back(); } -OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc, - ArrayRef ClauseList) { +OMPRequiresDecl * +SemaOpenMP::CheckOMPRequiresDecl(SourceLocation Loc, + ArrayRef ClauseList) { /// For target specific clauses, the requires directive cannot be /// specified after the handling of any of the target regions in the /// current compilation unit. @@ -3578,8 +3591,8 @@ OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc, } if (!DSAStack->hasDuplicateRequiresClause(ClauseList)) - return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc, - ClauseList); + return OMPRequiresDecl::Create( + getASTContext(), SemaRef.getCurLexicalContext(), Loc, ClauseList); return nullptr; } @@ -3697,7 +3710,7 @@ class DSAAttrChecker final : public StmtVisitor { llvm::SmallVector ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete]; llvm::SmallVector ImplicitMapModifier[DefaultmapKindNum]; - Sema::VarsWithInheritedDSAType VarsWithInheritedDSA; + SemaOpenMP::VarsWithInheritedDSAType VarsWithInheritedDSA; llvm::SmallDenseSet ImplicitDeclarations; void VisitSubCaptures(OMPExecutableDirective *S) { @@ -4163,7 +4176,7 @@ class DSAAttrChecker final : public StmtVisitor { getImplicitMapModifier(OpenMPDefaultmapClauseKind Kind) const { return ImplicitMapModifier[Kind]; } - const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const { + const SemaOpenMP::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const { return VarsWithInheritedDSA; } @@ -4195,7 +4208,9 @@ static void handleDeclareVariantConstructTrait(DSAStackTy *Stack, Stack->handleConstructTrait(Traits, ScopeEntry); } -void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { +void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, + Scope *CurScope) { + ASTContext &Context = getASTContext(); switch (DKind) { case OMPD_parallel: case OMPD_parallel_for: @@ -4210,13 +4225,13 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst(); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty).withConst().withRestrict(); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); break; } case OMPD_target_teams: @@ -4234,7 +4249,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4244,31 +4259,33 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".task_t.", Context.VoidPtrTy.withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params, /*OpenMPCaptureLevel=*/0); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params, + /*OpenMPCaptureLevel=*/0); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); - SmallVector ParamsTarget; + SmallVector ParamsTarget; if (getLangOpts().OpenMPIsTargetDevice) ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy)); ParamsTarget.push_back( std::make_pair(StringRef(), QualType())); // __context with shared vars; // Start a captured region for 'target' with no implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTarget, - /*OpenMPCaptureLevel=*/1); - Sema::CapturedParamNameType ParamsTeamsOrParallel[] = { + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTarget, + /*OpenMPCaptureLevel=*/1); + SemaOpenMP::CapturedParamNameType ParamsTeamsOrParallel[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; // Start a captured region for 'teams' or 'parallel'. Both regions have // the same implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTeamsOrParallel, /*OpenMPCaptureLevel=*/2); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTeamsOrParallel, + /*OpenMPCaptureLevel=*/2); break; } case OMPD_target: @@ -4281,7 +4298,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4291,21 +4308,22 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".task_t.", Context.VoidPtrTy.withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params, /*OpenMPCaptureLevel=*/0); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params, + /*OpenMPCaptureLevel=*/0); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); - SmallVector ParamsTarget; + SmallVector ParamsTarget; if (getLangOpts().OpenMPIsTargetDevice) ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy)); ParamsTarget.push_back( std::make_pair(StringRef(), QualType())); // __context with shared vars; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTarget, - /*OpenMPCaptureLevel=*/1); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTarget, + /*OpenMPCaptureLevel=*/1); break; } case OMPD_atomic: @@ -4331,11 +4349,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { case OMPD_scope: case OMPD_target_data: case OMPD_dispatch: { - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); break; } case OMPD_task: { @@ -4347,7 +4365,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4357,11 +4375,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".task_t.", Context.VoidPtrTy.withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); break; @@ -4388,7 +4406,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4403,11 +4421,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".reductions.", VoidPtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); break; @@ -4428,19 +4446,20 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict(); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty).withConst().withRestrict(); - Sema::CapturedParamNameType ParamsParallel[] = { + SemaOpenMP::CapturedParamNameType ParamsParallel[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; // Start a captured region for 'parallel'. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsParallel, /*OpenMPCaptureLevel=*/0); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsParallel, + /*OpenMPCaptureLevel=*/0); QualType Args[] = {VoidPtrTy}; FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4455,11 +4474,12 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".reductions.", VoidPtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params, /*OpenMPCaptureLevel=*/1); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params, + /*OpenMPCaptureLevel=*/1); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); break; @@ -4469,17 +4489,19 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst(); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty).withConst().withRestrict(); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(".previous.lb.", Context.getSizeType().withConst()), std::make_pair(".previous.ub.", Context.getSizeType().withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); break; } + // For 'target teams loop', collect all captured regions so codegen can + // later decide the best IR to emit given the associated loop-nest. case OMPD_target_teams_loop: case OMPD_target_teams_distribute_parallel_for: case OMPD_target_teams_distribute_parallel_for_simd: { @@ -4492,7 +4514,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4502,32 +4524,35 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".task_t.", Context.VoidPtrTy.withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params, /*OpenMPCaptureLevel=*/0); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params, + /*OpenMPCaptureLevel=*/0); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); - SmallVector ParamsTarget; + SmallVector ParamsTarget; if (getLangOpts().OpenMPIsTargetDevice) ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy)); ParamsTarget.push_back( std::make_pair(StringRef(), QualType())); // __context with shared vars; // Start a captured region for 'target' with no implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTarget, /*OpenMPCaptureLevel=*/1); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTarget, + /*OpenMPCaptureLevel=*/1); - Sema::CapturedParamNameType ParamsTeams[] = { + SemaOpenMP::CapturedParamNameType ParamsTeams[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; // Start a captured region for 'target' with no implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTeams, /*OpenMPCaptureLevel=*/2); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTeams, + /*OpenMPCaptureLevel=*/2); - Sema::CapturedParamNameType ParamsParallel[] = { + SemaOpenMP::CapturedParamNameType ParamsParallel[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(".previous.lb.", Context.getSizeType().withConst()), @@ -4536,8 +4561,9 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { }; // Start a captured region for 'teams' or 'parallel'. Both regions have // the same implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsParallel, /*OpenMPCaptureLevel=*/3); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsParallel, + /*OpenMPCaptureLevel=*/3); break; } @@ -4548,16 +4574,17 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty).withConst().withRestrict(); - Sema::CapturedParamNameType ParamsTeams[] = { + SemaOpenMP::CapturedParamNameType ParamsTeams[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; // Start a captured region for 'target' with no implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsTeams, /*OpenMPCaptureLevel=*/0); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsTeams, + /*OpenMPCaptureLevel=*/0); - Sema::CapturedParamNameType ParamsParallel[] = { + SemaOpenMP::CapturedParamNameType ParamsParallel[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(".previous.lb.", Context.getSizeType().withConst()), @@ -4566,8 +4593,9 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { }; // Start a captured region for 'teams' or 'parallel'. Both regions have // the same implicit parameters. - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - ParamsParallel, /*OpenMPCaptureLevel=*/1); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, ParamsParallel, + /*OpenMPCaptureLevel=*/1); break; } case OMPD_target_update: @@ -4581,7 +4609,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); - Sema::CapturedParamNameType Params[] = { + SemaOpenMP::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32PtrTy), std::make_pair(".privates.", VoidPtrTy), @@ -4591,11 +4619,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { std::make_pair(".task_t.", Context.VoidPtrTy.withConst()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; - ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, - Params); + SemaRef.ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, + CR_OpenMP, Params); // Mark this captured region as inlined, because we don't use outlined // function directly. - getCurCapturedRegion()->TheCapturedDecl->addAttr( + SemaRef.getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, {}, AlwaysInlineAttr::Keyword_forceinline)); break; @@ -4626,15 +4654,15 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { default: llvm_unreachable("Unknown OpenMP directive"); } - DSAStack->setContext(CurContext); + DSAStack->setContext(SemaRef.CurContext); handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true); } -int Sema::getNumberOfConstructScopes(unsigned Level) const { +int SemaOpenMP::getNumberOfConstructScopes(unsigned Level) const { return getOpenMPCaptureLevels(DSAStack->getDirective(Level)); } -int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) { +int SemaOpenMP::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) { SmallVector CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, DKind); return CaptureRegions.size(); @@ -4674,7 +4702,7 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id, static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr, bool WithInit) { OMPCapturedExprDecl *CD; - if (VarDecl *VD = S.isOpenMPCapturedDecl(D)) + if (VarDecl *VD = S.OpenMP().isOpenMPCapturedDecl(D)) CD = cast(VD); else CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit, @@ -4726,7 +4754,7 @@ class CaptureRegionUnwinderRAII { : S(S), ErrorFound(ErrorFound), DKind(DKind) {} ~CaptureRegionUnwinderRAII() { if (ErrorFound) { - int ThisCaptureLevel = S.getOpenMPCaptureLevels(DKind); + int ThisCaptureLevel = S.OpenMP().getOpenMPCaptureLevels(DKind); while (--ThisCaptureLevel >= 0) S.ActOnCapturedRegionError(); } @@ -4734,10 +4762,10 @@ class CaptureRegionUnwinderRAII { }; } // namespace -void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) { +void SemaOpenMP::tryCaptureOpenMPLambdas(ValueDecl *V) { // Capture variables captured by reference in lambdas for target-based // directives. - if (!CurContext->isDependentContext() && + if (!SemaRef.CurContext->isDependentContext() && (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) || isOpenMPTargetDataManagementDirective( DSAStack->getCurrentDirective()))) { @@ -4757,14 +4785,14 @@ void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) { if (LC.getCaptureKind() == LCK_ByRef) { VarDecl *VD = cast(LC.getCapturedVar()); DeclContext *VDC = VD->getDeclContext(); - if (!VDC->Encloses(CurContext)) + if (!VDC->Encloses(SemaRef.CurContext)) continue; - MarkVariableReferenced(LC.getLocation(), VD); + SemaRef.MarkVariableReferenced(LC.getLocation(), VD); } else if (LC.getCaptureKind() == LCK_This) { - QualType ThisTy = getCurrentThisType(); - if (!ThisTy.isNull() && - Context.typesAreCompatible(ThisTy, ThisCapture->getType())) - CheckCXXThisCapture(LC.getLocation()); + QualType ThisTy = SemaRef.getCurrentThisType(); + if (!ThisTy.isNull() && getASTContext().typesAreCompatible( + ThisTy, ThisCapture->getType())) + SemaRef.CheckCXXThisCapture(LC.getLocation()); } } } @@ -4804,8 +4832,8 @@ static bool checkOrderedOrderSpecified(Sema &S, return false; } -StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, - ArrayRef Clauses) { +StmtResult SemaOpenMP::ActOnOpenMPRegionEnd(StmtResult S, + ArrayRef Clauses) { handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(), /* ScopeEntry */ false); if (DSAStack->getCurrentDirective() == OMPD_atomic || @@ -4817,7 +4845,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, bool ErrorFound = false; CaptureRegionUnwinderRAII CaptureRegionUnwinder( - *this, ErrorFound, DSAStack->getCurrentDirective()); + SemaRef, ErrorFound, DSAStack->getCurrentDirective()); if (!S.isUsable()) { ErrorFound = true; return StmtError(); @@ -4831,7 +4859,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, SmallVector PICs; // This is required for proper codegen. for (OMPClause *Clause : Clauses) { - if (!LangOpts.OpenMPSimd && + if (!getLangOpts().OpenMPSimd && (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) || DSAStack->getCurrentDirective() == OMPD_target) && Clause->getClauseKind() == OMPC_in_reduction) { @@ -4840,7 +4868,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, auto *IRC = cast(Clause); for (Expr *E : IRC->taskgroup_descriptors()) if (E) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } if (isOpenMPPrivate(Clause->getClauseKind()) || Clause->getClauseKind() == OMPC_copyprivate || @@ -4851,7 +4879,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, // Mark all variables in private list clauses as used in inner region. for (Stmt *VarRef : Clause->children()) { if (auto *E = cast_or_null(VarRef)) { - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } } DSAStack->setForceVarCapturing(/*V=*/false); @@ -4865,7 +4893,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, PICs.push_back(C); if (auto *C = OMPClauseWithPostUpdate::get(Clause)) { if (Expr *E = C->getPostUpdateExpr()) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } } if (Clause->getClauseKind() == OMPC_schedule) @@ -4877,7 +4905,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, } // Capture allocator expressions if used. for (Expr *E : DSAStack->getInnerAllocators()) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); // OpenMP, 2.7.1 Loop Construct, Restrictions // The nonmonotonic modifier cannot be specified if an ordered clause is // specified. @@ -4899,7 +4927,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Restrictions. // If an order(concurrent) clause is present, an ordered clause may not appear // on the same directive. - if (checkOrderedOrderSpecified(*this, Clauses)) + if (checkOrderedOrderSpecified(SemaRef, Clauses)) ErrorFound = true; if (!LCs.empty() && OC && OC->getNumForLoops()) { for (const OMPLinearClause *C : LCs) { @@ -4936,7 +4964,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, CaptureRegion == OMPD_unknown) { if (auto *DS = cast_or_null(C->getPreInitStmt())) { for (Decl *D : DS->decls()) - MarkVariableReferenced(D->getLocation(), cast(D)); + SemaRef.MarkVariableReferenced(D->getLocation(), + cast(D)); } } } @@ -4950,7 +4979,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, ++I) { OMPUsesAllocatorsClause::Data D = UAC->getAllocatorData(I); if (Expr *E = D.AllocatorTraits) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } continue; } @@ -4965,17 +4994,17 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, continue; for (Expr *E : RC->copy_array_temps()) if (E) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } if (auto *AC = dyn_cast(C)) { for (Expr *E : AC->varlists()) - MarkDeclarationsReferencedInExpr(E); + SemaRef.MarkDeclarationsReferencedInExpr(E); } } } if (++CompletedRegions == CaptureRegions.size()) DSAStack->setBodyComplete(); - SR = ActOnCapturedRegionEnd(SR.get()); + SR = SemaRef.ActOnCapturedRegionEnd(SR.get()); } return SR; } @@ -5782,9 +5811,9 @@ static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy, // the OpenMPIRBuilder to know additional C/C++ semantics, such as how to // invoke a copy constructor. QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy); - Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy}, - {"Logical", LogicalTy}, - {StringRef(), QualType()}}; + SemaOpenMP::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy}, + {"Logical", LogicalTy}, + {StringRef(), QualType()}}; Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params); // Capture the initial iterator which represents the LoopVar value at the @@ -5835,7 +5864,7 @@ static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy, AssertSuccess(Actions.ActOnCapturedRegionEnd(Body))); } -StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { +StmtResult SemaOpenMP::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { ASTContext &Ctx = getASTContext(); // Extract the common elements of ForStmt and CXXForRangeStmt: @@ -5946,8 +5975,8 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { if (IncBin->getOpcode() == BO_AddAssign) { Step = IncBin->getRHS(); } else if (IncBin->getOpcode() == BO_SubAssign) { - Step = - AssertSuccess(BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS())); + Step = AssertSuccess( + SemaRef.BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS())); } else llvm_unreachable("unhandled binary increment operator"); } else if (auto *CondCXXOp = dyn_cast(Inc)) { @@ -5965,7 +5994,7 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { break; case OO_MinusEqual: Step = AssertSuccess( - BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1))); + SemaRef.BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1))); break; default: llvm_unreachable("unhandled overloaded increment operator"); @@ -5974,16 +6003,17 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { llvm_unreachable("unknown increment expression"); CapturedStmt *DistanceFunc = - buildDistanceFunc(*this, LogicalTy, CondRel, LHS, RHS, Step); + buildDistanceFunc(SemaRef, LogicalTy, CondRel, LHS, RHS, Step); CapturedStmt *LoopVarFunc = buildLoopVarFunc( - *this, LVTy, LogicalTy, CounterRef, Step, isa(AStmt)); - DeclRefExpr *LVRef = BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue, - {}, nullptr, nullptr, {}, nullptr); + SemaRef, LVTy, LogicalTy, CounterRef, Step, isa(AStmt)); + DeclRefExpr *LVRef = + SemaRef.BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue, {}, + nullptr, nullptr, {}, nullptr); return OMPCanonicalLoop::create(getASTContext(), AStmt, DistanceFunc, LoopVarFunc, LVRef); } -StmtResult Sema::ActOnOpenMPLoopnest(Stmt *AStmt) { +StmtResult SemaOpenMP::ActOnOpenMPLoopnest(Stmt *AStmt) { // Handle a literal loop. if (isa(AStmt) || isa(AStmt)) return ActOnOpenMPCanonicalLoop(AStmt); @@ -6128,7 +6158,7 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack, continue; CXXScopeSpec MapperIdScopeSpec; DeclarationNameInfo MapperId; - if (OMPClause *NewClause = S.ActOnOpenMPMapClause( + if (OMPClause *NewClause = S.OpenMP().ActOnOpenMPMapClause( nullptr, C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(), MapperIdScopeSpec, MapperId, C->getMapType(), /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(), @@ -6137,14 +6167,85 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack, } } -bool Sema::mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, - ArrayRef Clauses, - OpenMPBindClauseKind &BindKind, - OpenMPDirectiveKind &Kind, - OpenMPDirectiveKind &PrevMappedDirective, - SourceLocation StartLoc, SourceLocation EndLoc, - const DeclarationNameInfo &DirName, - OpenMPDirectiveKind CancelRegion) { +namespace { +/// A 'teams loop' with a nested 'loop bind(parallel)' or generic function +/// call in the associated loop-nest cannot be a 'parallel for'. +class TeamsLoopChecker final : public ConstStmtVisitor { + Sema &SemaRef; + +public: + bool teamsLoopCanBeParallelFor() const { return TeamsLoopCanBeParallelFor; } + + // Is there a nested OpenMP loop bind(parallel) + void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { + if (D->getDirectiveKind() == llvm::omp::Directive::OMPD_loop) { + if (const auto *C = D->getSingleClause()) + if (C->getBindKind() == OMPC_BIND_parallel) { + TeamsLoopCanBeParallelFor = false; + // No need to continue visiting any more + return; + } + } + for (const Stmt *Child : D->children()) + if (Child) + Visit(Child); + } + + void VisitCallExpr(const CallExpr *C) { + // Function calls inhibit parallel loop translation of 'target teams loop' + // unless the assume-no-nested-parallelism flag has been specified. + // OpenMP API runtime library calls do not inhibit parallel loop + // translation, regardless of the assume-no-nested-parallelism. + if (C) { + bool IsOpenMPAPI = false; + auto *FD = dyn_cast_or_null(C->getCalleeDecl()); + if (FD) { + std::string Name = FD->getNameInfo().getAsString(); + IsOpenMPAPI = Name.find("omp_") == 0; + } + TeamsLoopCanBeParallelFor = + IsOpenMPAPI || SemaRef.getLangOpts().OpenMPNoNestedParallelism; + if (!TeamsLoopCanBeParallelFor) + return; + } + for (const Stmt *Child : C->children()) + if (Child) + Visit(Child); + } + + void VisitCapturedStmt(const CapturedStmt *S) { + if (!S) + return; + Visit(S->getCapturedDecl()->getBody()); + } + + void VisitStmt(const Stmt *S) { + if (!S) + return; + for (const Stmt *Child : S->children()) + if (Child) + Visit(Child); + } + explicit TeamsLoopChecker(Sema &SemaRef) + : SemaRef(SemaRef), TeamsLoopCanBeParallelFor(true) {} + +private: + bool TeamsLoopCanBeParallelFor; +}; +} // namespace + +static bool teamsLoopCanBeParallelFor(Stmt *AStmt, Sema &SemaRef) { + TeamsLoopChecker Checker(SemaRef); + Checker.Visit(AStmt); + return Checker.teamsLoopCanBeParallelFor(); +} + +bool SemaOpenMP::mapLoopConstruct( + llvm::SmallVector &ClausesWithoutBind, + ArrayRef Clauses, OpenMPBindClauseKind &BindKind, + OpenMPDirectiveKind &Kind, OpenMPDirectiveKind &PrevMappedDirective, + SourceLocation StartLoc, SourceLocation EndLoc, + const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion) { bool UseClausesWithoutBind = false; @@ -6226,7 +6327,7 @@ bool Sema::mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, return UseClausesWithoutBind; } -StmtResult Sema::ActOnOpenMPExecutableDirective( +StmtResult SemaOpenMP::ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, @@ -6251,8 +6352,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( } // First check CancelRegion which is then used in checkNestingOfRegions. - if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) || - checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion, + if (checkCancelRegion(SemaRef, Kind, CancelRegion, StartLoc) || + checkNestingOfRegions(SemaRef, DSAStack, DK, DirName, CancelRegion, BindKind, StartLoc)) { return StmtError(); } @@ -6271,13 +6372,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( } else { ClausesWithImplicit.append(Clauses.begin(), Clauses.end()); } - if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic && - Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master && - Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) { + if (AStmt && !SemaRef.CurContext->isDependentContext() && + Kind != OMPD_atomic && Kind != OMPD_critical && Kind != OMPD_section && + Kind != OMPD_master && Kind != OMPD_masked && + !isOpenMPLoopTransformationDirective(Kind)) { assert(isa(AStmt) && "Captured statement expected"); // Check default data sharing attributes for referenced variables. - DSAAttrChecker DSAChecker(DSAStack, *this, cast(AStmt)); + DSAAttrChecker DSAChecker(DSAStack, SemaRef, cast(AStmt)); int ThisCaptureLevel = getOpenMPCaptureLevels(Kind); Stmt *S = AStmt; while (--ThisCaptureLevel >= 0) @@ -6417,8 +6519,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( } // Build expressions for implicit maps of data members with 'default' // mappers. - if (LangOpts.OpenMP >= 50) - processImplicitMapsWithDefaultMappers(*this, DSAStack, + if (getLangOpts().OpenMP >= 50) + processImplicitMapsWithDefaultMappers(SemaRef, DSAStack, ClausesWithImplicit); } @@ -6432,7 +6534,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( case OMPD_simd: Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_tile: @@ -6450,7 +6552,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( case OMPD_for_simd: Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_sections: @@ -6488,7 +6590,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPParallelForSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_scope: @@ -6625,7 +6727,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPTaskLoopSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_taskloop); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_master_taskloop: @@ -6642,13 +6744,13 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPMasterTaskLoopSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_taskloop); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_masked_taskloop_simd: Res = ActOnOpenMPMaskedTaskLoopSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 51) { + if (getLangOpts().OpenMP >= 51) { AllowedNameModifiers.push_back(OMPD_taskloop); AllowedNameModifiers.push_back(OMPD_simd); } @@ -6662,7 +6764,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( case OMPD_parallel_masked_taskloop: Res = ActOnOpenMPParallelMaskedTaskLoopDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 51) { + if (getLangOpts().OpenMP >= 51) { AllowedNameModifiers.push_back(OMPD_taskloop); AllowedNameModifiers.push_back(OMPD_parallel); } @@ -6672,13 +6774,13 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_taskloop); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_parallel_masked_taskloop_simd: Res = ActOnOpenMPParallelMaskedTaskLoopSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 51) { + if (getLangOpts().OpenMP >= 51) { AllowedNameModifiers.push_back(OMPD_taskloop); AllowedNameModifiers.push_back(OMPD_parallel); AllowedNameModifiers.push_back(OMPD_simd); @@ -6702,13 +6804,13 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPDistributeParallelForSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_distribute_simd: Res = ActOnOpenMPDistributeSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_target_parallel_for_simd: @@ -6716,14 +6818,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_target); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_target_simd: Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_target); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_teams_distribute: @@ -6733,14 +6835,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( case OMPD_teams_distribute_simd: Res = ActOnOpenMPTeamsDistributeSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_teams_distribute_parallel_for_simd: Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_teams_distribute_parallel_for: @@ -6769,14 +6871,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_target); AllowedNameModifiers.push_back(OMPD_parallel); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_target_teams_distribute_simd: Res = ActOnOpenMPTargetTeamsDistributeSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); AllowedNameModifiers.push_back(OMPD_target); - if (LangOpts.OpenMP >= 50) + if (getLangOpts().OpenMP >= 50) AllowedNameModifiers.push_back(OMPD_simd); break; case OMPD_interop: @@ -6833,7 +6935,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( if (DSAStack->getDefaultDSA() == DSA_none || DSAStack->getDefaultDSA() == DSA_private || DSAStack->getDefaultDSA() == DSA_firstprivate) { - DSAAttrChecker DSAChecker(DSAStack, *this, nullptr); + DSAAttrChecker DSAChecker(DSAStack, SemaRef, nullptr); for (OMPClause *C : Clauses) { switch (C->getClauseKind()) { case OMPC_num_threads: @@ -6970,13 +7072,13 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( } if (!AllowedNameModifiers.empty()) - ErrorFound = checkIfClauses(*this, Kind, Clauses, AllowedNameModifiers) || + ErrorFound = checkIfClauses(SemaRef, Kind, Clauses, AllowedNameModifiers) || ErrorFound; if (ErrorFound) return StmtError(); - if (!CurContext->isDependentContext() && + if (!SemaRef.CurContext->isDependentContext() && isOpenMPTargetExecutionDirective(Kind) && !(DSAStack->hasRequiresDeclWithClause() || DSAStack->hasRequiresDeclWithClause() || @@ -6989,7 +7091,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective( return Res; } -Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective( +SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef Uniforms, ArrayRef Aligneds, ArrayRef Alignments, ArrayRef Linears, @@ -7224,13 +7326,15 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective( NewStep = PerformOpenMPImplicitIntegerConversion(Step->getExprLoc(), Step) .get(); if (NewStep) - NewStep = - VerifyIntegerConstantExpression(NewStep, /*FIXME*/ AllowFold).get(); + NewStep = SemaRef + .VerifyIntegerConstantExpression( + NewStep, /*FIXME*/ Sema::AllowFold) + .get(); } NewSteps.push_back(NewStep); } auto *NewAttr = OMPDeclareSimdDeclAttr::CreateImplicit( - Context, BS, SL.get(), const_cast(Uniforms.data()), + getASTContext(), BS, SL.get(), const_cast(Uniforms.data()), Uniforms.size(), const_cast(Aligneds.data()), Aligneds.size(), const_cast(NewAligns.data()), NewAligns.size(), const_cast(Linears.data()), Linears.size(), @@ -7263,7 +7367,7 @@ static void setPrototype(Sema &S, FunctionDecl *FD, FunctionDecl *FDWithProto, FD->setParams(Params); } -void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) { +void SemaOpenMP::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) { if (D->isInvalidDecl()) return; FunctionDecl *FD = nullptr; @@ -7276,7 +7380,7 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) { // If we are instantiating templates we do *not* apply scoped assumptions but // only global ones. We apply scoped assumption to the template definition // though. - if (!inTemplateInstantiation()) { + if (!SemaRef.inTemplateInstantiation()) { for (OMPAssumeAttr *AA : OMPAssumeScoped) FD->addAttr(AA); } @@ -7284,10 +7388,10 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) { FD->addAttr(AA); } -Sema::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI) +SemaOpenMP::OMPDeclareVariantScope::OMPDeclareVariantScope(OMPTraitInfo &TI) : TI(&TI), NameSuffix(TI.getMangledName()) {} -void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( +void SemaOpenMP::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SmallVectorImpl &Bases) { if (!D.getIdentifier()) @@ -7302,12 +7406,12 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( llvm::omp::TraitProperty::implementation_extension_allow_templates)) return; - IdentifierInfo *BaseII = D.getIdentifier(); - LookupResult Lookup(*this, DeclarationName(BaseII), D.getIdentifierLoc(), - LookupOrdinaryName); - LookupParsedName(Lookup, S, &D.getCXXScopeSpec()); + const IdentifierInfo *BaseII = D.getIdentifier(); + LookupResult Lookup(SemaRef, DeclarationName(BaseII), D.getIdentifierLoc(), + Sema::LookupOrdinaryName); + SemaRef.LookupParsedName(Lookup, S, &D.getCXXScopeSpec()); - TypeSourceInfo *TInfo = GetTypeForDeclarator(D); + TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D); QualType FType = TInfo->getType(); bool IsConstexpr = @@ -7336,7 +7440,7 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( QualType UDeclTy = UDecl->getType(); if (!UDeclTy->isDependentType()) { - QualType NewType = Context.mergeFunctionTypes( + QualType NewType = getASTContext().mergeFunctionTypes( FType, UDeclTy, /* OfBlockPointer */ false, /* Unqualified */ false, /* AllowCXX */ true); if (NewType.isNull()) @@ -7352,7 +7456,7 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( // If no base was found we create a declaration that we use as base. if (Bases.empty() && UseImplicitBase) { D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration); - Decl *BaseD = HandleDeclarator(S, D, TemplateParamLists); + Decl *BaseD = SemaRef.HandleDeclarator(S, D, TemplateParamLists); BaseD->setImplicit(true); if (auto *BaseTemplD = dyn_cast(BaseD)) Bases.push_back(BaseTemplD->getTemplatedDecl()); @@ -7364,18 +7468,18 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( MangledName += D.getIdentifier()->getName(); MangledName += getOpenMPVariantManglingSeparatorStr(); MangledName += DVScope.NameSuffix; - IdentifierInfo &VariantII = Context.Idents.get(MangledName); + IdentifierInfo &VariantII = getASTContext().Idents.get(MangledName); VariantII.setMangledOpenMPVariantName(true); D.SetIdentifier(&VariantII, D.getBeginLoc()); } -void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( +void SemaOpenMP::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl &Bases) { // Do not mark function as is used to prevent its emission if this is the // only place where it is used. EnterExpressionEvaluationContext Unevaluated( - *this, Sema::ExpressionEvaluationContext::Unevaluated); + SemaRef, Sema::ExpressionEvaluationContext::Unevaluated); FunctionDecl *FD = nullptr; if (auto *UTemplDecl = dyn_cast(D)) @@ -7383,14 +7487,14 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( else FD = cast(D); auto *VariantFuncRef = DeclRefExpr::Create( - Context, NestedNameSpecifierLoc(), SourceLocation(), FD, + getASTContext(), NestedNameSpecifierLoc(), SourceLocation(), FD, /* RefersToEnclosingVariableOrCapture */ false, /* NameLoc */ FD->getLocation(), FD->getType(), ExprValueKind::VK_PRValue); OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back(); auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit( - Context, VariantFuncRef, DVScope.TI, + getASTContext(), VariantFuncRef, DVScope.TI, /*NothingArgs=*/nullptr, /*NothingArgsSize=*/0, /*NeedDevicePtrArgs=*/nullptr, /*NeedDevicePtrArgsSize=*/0, /*AppendArgs=*/nullptr, /*AppendArgsSize=*/0); @@ -7398,10 +7502,11 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( BaseFD->addAttr(OMPDeclareVariantA); } -ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, - SourceLocation LParenLoc, - MultiExprArg ArgExprs, - SourceLocation RParenLoc, Expr *ExecConfig) { +ExprResult SemaOpenMP::ActOnOpenMPCall(ExprResult Call, Scope *Scope, + SourceLocation LParenLoc, + MultiExprArg ArgExprs, + SourceLocation RParenLoc, + Expr *ExecConfig) { // The common case is a regular call we do not want to specialize at all. Try // to make that case fast by bailing early. CallExpr *CE = dyn_cast(Call.get()); @@ -7412,7 +7517,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, if (!CalleeFnDecl) return Call; - if (LangOpts.OpenMP >= 51 && CalleeFnDecl->getIdentifier() && + if (getLangOpts().OpenMP >= 51 && CalleeFnDecl->getIdentifier() && CalleeFnDecl->getName().starts_with_insensitive("omp_")) { // checking for any calls inside an Order region if (Scope && Scope->isOpenMPOrderClauseScope()) @@ -7431,7 +7536,8 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, << ISATrait; }; TargetOMPContext OMPCtx(Context, std::move(DiagUnknownTrait), - getCurFunctionDecl(), DSAStack->getConstructTraits()); + SemaRef.getCurFunctionDecl(), + DSAStack->getConstructTraits()); QualType CalleeFnType = CalleeFnDecl->getType(); @@ -7476,7 +7582,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, // different type than the base function. This is intended and OK but if // we cannot create a call the difference is not in the "implementation // defined range" we allow. - Sema::TentativeAnalysisScope Trap(*this); + Sema::TentativeAnalysisScope Trap(SemaRef); if (auto *SpecializedMethod = dyn_cast(BestDecl)) { auto *MemberCall = dyn_cast(CE); @@ -7485,12 +7591,12 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, /* IsArrow */ false, SpecializedMethod, Context.BoundMemberTy, MemberCall->getValueKind(), MemberCall->getObjectKind()); } - NewCall = BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, RParenLoc, - ExecConfig); + NewCall = SemaRef.BuildCallExpr(Scope, BestExpr, LParenLoc, ArgExprs, + RParenLoc, ExecConfig); if (NewCall.isUsable()) { if (CallExpr *NCE = dyn_cast(NewCall.get())) { FunctionDecl *NewCalleeFnDecl = NCE->getDirectCallee(); - QualType NewType = Context.mergeFunctionTypes( + QualType NewType = getASTContext().mergeFunctionTypes( CalleeFnType, NewCalleeFnDecl->getType(), /* OfBlockPointer */ false, /* Unqualified */ false, /* AllowCXX */ true); @@ -7508,14 +7614,16 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope, if (!NewCall.isUsable()) return Call; - return PseudoObjectExpr::Create(Context, CE, {NewCall.get()}, 0); + return PseudoObjectExpr::Create(getASTContext(), CE, {NewCall.get()}, 0); } std::optional> -Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, - Expr *VariantRef, OMPTraitInfo &TI, - unsigned NumAppendArgs, - SourceRange SR) { +SemaOpenMP::checkOpenMPDeclareVariantFunction(SemaOpenMP::DeclGroupPtrTy DG, + Expr *VariantRef, + OMPTraitInfo &TI, + unsigned NumAppendArgs, + SourceRange SR) { + ASTContext &Context = getASTContext(); if (!DG || DG.get().isNull()) return std::nullopt; @@ -7558,7 +7666,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, // Check if the function was emitted already. const FunctionDecl *Definition; if (!FD->isThisDeclarationADefinition() && FD->isDefined(Definition) && - (LangOpts.EmitAllDecls || Context.DeclMustBeEmitted(Definition))) + (getLangOpts().EmitAllDecls || Context.DeclMustBeEmitted(Definition))) Diag(SR.getBegin(), diag::warn_omp_declare_variant_after_emitted) << FD->getLocation(); @@ -7581,7 +7689,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, // Deal with non-constant score and user condition expressions. auto HandleNonConstantScoresAndConditions = [this](Expr *&E, bool IsScore) -> bool { - if (!E || E->isIntegerConstantExpr(Context)) + if (!E || E->isIntegerConstantExpr(getASTContext())) return false; if (IsScore) { @@ -7613,9 +7721,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, // Adjust the function type to account for an extra omp_interop_t for each // specified in the append_args clause. const TypeDecl *TD = nullptr; - LookupResult Result(*this, &Context.Idents.get("omp_interop_t"), + LookupResult Result(SemaRef, &Context.Idents.get("omp_interop_t"), SR.getBegin(), Sema::LookupOrdinaryName); - if (LookupName(Result, getCurScope())) { + if (SemaRef.LookupName(Result, SemaRef.getCurScope())) { NamedDecl *ND = Result.getFoundDecl(); TD = dyn_cast_or_null(ND); } @@ -7638,7 +7746,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, // Convert VariantRef expression to the type of the original function to // resolve possible conflicts. ExprResult VariantRefCast = VariantRef; - if (LangOpts.CPlusPlus) { + if (getLangOpts().CPlusPlus) { QualType FnPtrType; auto *Method = dyn_cast(FD); if (Method && !Method->isStatic()) { @@ -7649,9 +7757,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, { // Build adrr_of unary op to correctly handle type checks for member // functions. - Sema::TentativeAnalysisScope Trap(*this); - ER = CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf, - VariantRef); + Sema::TentativeAnalysisScope Trap(SemaRef); + ER = SemaRef.CreateBuiltinUnaryOp(VariantRef->getBeginLoc(), UO_AddrOf, + VariantRef); } if (!ER.isUsable()) { Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected) @@ -7664,9 +7772,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, } QualType VarianPtrType = Context.getPointerType(VariantRef->getType()); if (VarianPtrType.getUnqualifiedType() != FnPtrType.getUnqualifiedType()) { - ImplicitConversionSequence ICS = TryImplicitConversion( + ImplicitConversionSequence ICS = SemaRef.TryImplicitConversion( VariantRef, FnPtrType.getUnqualifiedType(), - /*SuppressUserConversions=*/false, AllowedExplicit::None, + /*SuppressUserConversions=*/false, Sema::AllowedExplicit::None, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false); @@ -7678,8 +7786,8 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, << (NumAppendArgs ? 1 : 0) << VariantRef->getSourceRange(); return std::nullopt; } - VariantRefCast = PerformImplicitConversion( - VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting); + VariantRefCast = SemaRef.PerformImplicitConversion( + VariantRef, FnPtrType.getUnqualifiedType(), Sema::AA_Converting); if (!VariantRefCast.isUsable()) return std::nullopt; } @@ -7692,7 +7800,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, } } - ExprResult ER = CheckPlaceholderExpr(VariantRefCast.get()); + ExprResult ER = SemaRef.CheckPlaceholderExpr(VariantRefCast.get()); if (!ER.isUsable() || !ER.get()->IgnoreParenImpCasts()->getType()->isFunctionType()) { Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected) @@ -7722,7 +7830,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, } // Check if function types are compatible in C. - if (!LangOpts.CPlusPlus) { + if (!getLangOpts().CPlusPlus) { QualType NewType = Context.mergeFunctionTypes(AdjustedFnType, NewFD->getType()); if (NewType.isNull()) { @@ -7734,9 +7842,9 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, } if (NewType->isFunctionProtoType()) { if (FD->getType()->isFunctionNoProtoType()) - setPrototype(*this, FD, NewFD, NewType); + setPrototype(SemaRef, FD, NewFD, NewType); else if (NewFD->getType()->isFunctionNoProtoType()) - setPrototype(*this, NewFD, FD, NewType); + setPrototype(SemaRef, NewFD, FD, NewType); } } @@ -7799,15 +7907,15 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, } // Check general compatibility. - if (areMultiversionVariantFunctionsCompatible( + if (SemaRef.areMultiversionVariantFunctionsCompatible( FD, NewFD, PartialDiagnostic::NullDiagnostic(), PartialDiagnosticAt(SourceLocation(), PartialDiagnostic::NullDiagnostic()), PartialDiagnosticAt( VariantRef->getExprLoc(), - PDiag(diag::err_omp_declare_variant_doesnt_support)), + SemaRef.PDiag(diag::err_omp_declare_variant_doesnt_support)), PartialDiagnosticAt(VariantRef->getExprLoc(), - PDiag(diag::err_omp_declare_variant_diff) + SemaRef.PDiag(diag::err_omp_declare_variant_diff) << FD->getLocation()), /*TemplatesSupported=*/true, /*ConstexprSupported=*/false, /*CLinkageMayDiffer=*/true)) @@ -7815,7 +7923,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG, return std::make_pair(FD, cast(DRE)); } -void Sema::ActOnOpenMPDeclareVariantDirective( +void SemaOpenMP::ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef AdjustArgsNothing, ArrayRef AdjustArgsNeedDevicePtr, @@ -7833,7 +7941,7 @@ void Sema::ActOnOpenMPDeclareVariantDirective( if (!AllAdjustArgs.empty() || !AppendArgs.empty()) { VariantMatchInfo VMI; - TI.getAsVariantMatchInfo(Context, VMI); + TI.getAsVariantMatchInfo(getASTContext(), VMI); if (!llvm::is_contained( VMI.ConstructTraits, llvm::omp::TraitProperty::construct_dispatch_dispatch)) { @@ -7876,18 +7984,18 @@ void Sema::ActOnOpenMPDeclareVariantDirective( } auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit( - Context, VariantRef, &TI, const_cast(AdjustArgsNothing.data()), - AdjustArgsNothing.size(), + getASTContext(), VariantRef, &TI, + const_cast(AdjustArgsNothing.data()), AdjustArgsNothing.size(), const_cast(AdjustArgsNeedDevicePtr.data()), AdjustArgsNeedDevicePtr.size(), const_cast(AppendArgs.data()), AppendArgs.size(), SR); FD->addAttr(NewAttr); } -StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPParallelDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -7899,11 +8007,11 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - DSAStack->getTaskgroupReductionRef(), - DSAStack->isCancelRegion()); + return OMPParallelDirective::Create( + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, + DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } namespace { @@ -8153,7 +8261,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) { if (!NewStep->isValueDependent()) { // Check that the step is integer expression. SourceLocation StepLoc = NewStep->getBeginLoc(); - ExprResult Val = SemaRef.PerformOpenMPImplicitIntegerConversion( + ExprResult Val = SemaRef.OpenMP().PerformOpenMPImplicitIntegerConversion( StepLoc, getExprAsWritten(NewStep)); if (Val.isInvalid()) return true; @@ -9175,7 +9283,7 @@ DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar( DSAStackTy &DSA) const { auto *VD = dyn_cast(LCDecl); if (!VD) { - VD = SemaRef.isOpenMPCapturedDecl(LCDecl); + VD = SemaRef.OpenMP().isOpenMPCapturedDecl(LCDecl); DeclRefExpr *Ref = buildDeclRefExpr( SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc); const DSAStackTy::DSAVarData Data = @@ -9248,14 +9356,15 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData( } } // namespace -void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { +void SemaOpenMP::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, + Stmt *Init) { assert(getLangOpts().OpenMP && "OpenMP is not active."); assert(Init && "Expected loop in canonical form."); unsigned AssociatedLoops = DSAStack->getAssociatedLoops(); if (AssociatedLoops > 0 && isOpenMPLoopDirective(DSAStack->getCurrentDirective())) { DSAStack->loopStart(); - OpenMPIterationSpaceChecker ISC(*this, /*SupportsNonRectangular=*/true, + OpenMPIterationSpaceChecker ISC(SemaRef, /*SupportsNonRectangular=*/true, *DSAStack, ForLoc); if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) { if (ValueDecl *D = ISC.getLoopDecl()) { @@ -9265,7 +9374,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { if (VarDecl *Private = isOpenMPCapturedDecl(D)) { VD = Private; } else { - PrivateRef = buildCapture(*this, D, ISC.getLoopDeclRefExpr(), + PrivateRef = buildCapture(SemaRef, D, ISC.getLoopDeclRefExpr(), /*WithInit=*/false); VD = cast(PrivateRef->getDecl()); } @@ -9275,10 +9384,10 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { if (LD != D->getCanonicalDecl()) { DSAStack->resetPossibleLoopCounter(); if (auto *Var = dyn_cast_or_null(LD)) - MarkDeclarationsReferencedInExpr( - buildDeclRefExpr(*this, const_cast(Var), - Var->getType().getNonLValueExprType(Context), - ForLoc, /*RefersToCapture=*/true)); + SemaRef.MarkDeclarationsReferencedInExpr(buildDeclRefExpr( + SemaRef, const_cast(Var), + Var->getType().getNonLValueExprType(getASTContext()), ForLoc, + /*RefersToCapture=*/true)); } OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables @@ -9299,8 +9408,8 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { : OMPC_private; if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && DVar.CKind != PredeterminedCKind && DVar.RefExpr && - (LangOpts.OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate && - DVar.CKind != OMPC_private))) || + (getLangOpts().OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate && + DVar.CKind != OMPC_private))) || ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop || DKind == OMPD_master_taskloop || DKind == OMPD_masked_taskloop || DKind == OMPD_parallel_master_taskloop || @@ -9315,7 +9424,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { << getOpenMPClauseName(PredeterminedCKind); if (DVar.RefExpr == nullptr) DVar.CKind = PredeterminedCKind; - reportOriginalDsa(*this, DSAStack, D, DVar, + reportOriginalDsa(SemaRef, DSAStack, D, DVar, /*IsLoopIterVar=*/true); } else if (LoopDeclRefExpr) { // Make the loop iteration variable private (for worksharing @@ -9355,7 +9464,7 @@ static bool checkOpenMPIterationSpace( unsigned CurrentNestedLoopCount, unsigned NestedLoopCount, unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr, - Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA, + SemaOpenMP::VarsWithInheritedDSAType &VarsWithImplicitDSA, llvm::MutableArrayRef ResultIterSpaces, llvm::MapVector &Captures) { bool SupportsNonRectangular = !isOpenMPLoopTransformationDirective(DKind); @@ -9744,7 +9853,7 @@ static unsigned checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef, DSAStackTy &DSA, - Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA, + SemaOpenMP::VarsWithInheritedDSAType &VarsWithImplicitDSA, OMPLoopBasedDirective::HelperExprs &Built) { unsigned NestedLoopCount = 1; bool SupportsNonPerfectlyNested = (SemaRef.LangOpts.OpenMP >= 50) && @@ -10493,7 +10602,8 @@ static bool checkGenericLoopLastprivate(Sema &S, ArrayRef Clauses, OpenMPDirectiveKind K, DSAStackTy *Stack); -bool Sema::checkLastPrivateForMappedDirectives(ArrayRef Clauses) { +bool SemaOpenMP::checkLastPrivateForMappedDirectives( + ArrayRef Clauses) { // Check for syntax of lastprivate // Param of the lastprivate have different meanings in the mapped directives @@ -10501,16 +10611,15 @@ bool Sema::checkLastPrivateForMappedDirectives(ArrayRef Clauses) { // "omp for" lastprivate vars must be shared if (getLangOpts().OpenMP >= 50 && DSAStack->getMappedDirective() == OMPD_loop && - checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack)) { + checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_loop, DSAStack)) { return false; } return true; } -StmtResult -Sema::ActOnOpenMPSimdDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA) { +StmtResult SemaOpenMP::ActOnOpenMPSimdDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) return StmtError(); @@ -10523,38 +10632,37 @@ Sema::ActOnOpenMPSimdDirective(ArrayRef Clauses, Stmt *AStmt, // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses), - AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); + AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); auto *SimdDirective = OMPSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getMappedDirective()); return SimdDirective; } -StmtResult -Sema::ActOnOpenMPForDirective(ArrayRef Clauses, Stmt *AStmt, - SourceLocation StartLoc, SourceLocation EndLoc, - VarsWithInheritedDSAType &VarsWithImplicitDSA) { +StmtResult SemaOpenMP::ActOnOpenMPForDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) return StmtError(); @@ -10567,32 +10675,32 @@ Sema::ActOnOpenMPForDirective(ArrayRef Clauses, Stmt *AStmt, // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses), - AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); + AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } auto *ForDirective = OMPForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion(), DSAStack->getMappedDirective()); return ForDirective; } -StmtResult Sema::ActOnOpenMPForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10604,37 +10712,37 @@ StmtResult Sema::ActOnOpenMPForSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack, + getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, - Clauses, AStmt, B); + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPForSimdDirective::Create(getASTContext(), StartLoc, EndLoc, + NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPSectionsDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -10663,23 +10771,23 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef Clauses, return StmtError(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - DSAStack->getTaskgroupReductionRef(), - DSAStack->isCancelRegion()); + return OMPSectionsDirective::Create( + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, + DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPSectionDirective(Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentCancelRegion(DSAStack->isCancelRegion()); - return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt, + return OMPSectionDirective::Create(getASTContext(), StartLoc, EndLoc, AStmt, DSAStack->isCancelRegion()); } @@ -10691,10 +10799,10 @@ static Expr *getDirectCallExpr(Expr *E) { return nullptr; } -StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPDispatchDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -10707,7 +10815,7 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef Clauses, SourceLocation TargetCallLoc; - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { Expr *TargetCall = nullptr; auto *E = dyn_cast(S); @@ -10735,10 +10843,10 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef Clauses, TargetCallLoc = TargetCall->getExprLoc(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPDispatchDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - TargetCallLoc); + return OMPDispatchDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt, TargetCallLoc); } static bool checkGenericLoopLastprivate(Sema &S, ArrayRef Clauses, @@ -10766,7 +10874,7 @@ static bool checkGenericLoopLastprivate(Sema &S, ArrayRef Clauses, return ErrorFound; } -StmtResult Sema::ActOnOpenMPGenericLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10775,7 +10883,7 @@ StmtResult Sema::ActOnOpenMPGenericLoopDirective( // OpenMP 5.1 [2.11.7, loop construct, Restrictions] // A list item may not appear in a lastprivate clause unless it is the // loop iteration variable of a loop that is associated with the construct. - if (checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack)) + if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_loop, DSAStack)) return StmtError(); auto *CS = cast(AStmt); @@ -10790,19 +10898,19 @@ StmtResult Sema::ActOnOpenMPGenericLoopDirective( // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_loop, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses), - AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); + AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); - setFunctionHasBranchProtectedScope(); - return OMPGenericLoopDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPGenericLoopDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPTeamsGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10811,7 +10919,7 @@ StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective( // OpenMP 5.1 [2.11.7, loop construct, Restrictions] // A list item may not appear in a lastprivate clause unless it is the // loop iteration variable of a loop that is associated with the construct. - if (checkGenericLoopLastprivate(*this, Clauses, OMPD_teams_loop, DSAStack)) + if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_teams_loop, DSAStack)) return StmtError(); auto *CS = cast(AStmt); @@ -10836,22 +10944,22 @@ StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective( // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_teams_loop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); return OMPTeamsGenericLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10860,7 +10968,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective( // OpenMP 5.1 [2.11.7, loop construct, Restrictions] // A list item may not appear in a lastprivate clause unless it is the // loop iteration variable of a loop that is associated with the construct. - if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_teams_loop, + if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_target_teams_loop, DSAStack)) return StmtError(); @@ -10886,21 +10994,22 @@ StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective( // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_target_teams_loop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetTeamsGenericLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + teamsLoopCanBeParallelFor(AStmt, SemaRef)); } -StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10909,7 +11018,8 @@ StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective( // OpenMP 5.1 [2.11.7, loop construct, Restrictions] // A list item may not appear in a lastprivate clause unless it is the // loop iteration variable of a loop that is associated with the construct. - if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack)) + if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_parallel_loop, + DSAStack)) return StmtError(); auto *CS = cast(AStmt); @@ -10934,21 +11044,21 @@ StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective( // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelGenericLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetParallelGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -10957,7 +11067,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective( // OpenMP 5.1 [2.11.7, loop construct, Restrictions] // A list item may not appear in a lastprivate clause unless it is the // loop iteration variable of a loop that is associated with the construct. - if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_parallel_loop, + if (checkGenericLoopLastprivate(SemaRef, Clauses, OMPD_target_parallel_loop, DSAStack)) return StmtError(); @@ -10983,30 +11093,30 @@ StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective( // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_target_parallel_loop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetParallelGenericLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPSingleDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); assert(isa(AStmt) && "Captured statement expected"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); // OpenMP [2.7.3, single Construct, Restrictions] // The copyprivate clause must not be used with the nowait clause. @@ -11025,33 +11135,35 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef Clauses, } } - return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPSingleDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } -StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPMasterDirective(Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt); + return OMPMasterDirective::Create(getASTContext(), StartLoc, EndLoc, AStmt); } -StmtResult Sema::ActOnOpenMPMaskedDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPMaskedDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPMaskedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPMaskedDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } -StmtResult Sema::ActOnOpenMPCriticalDirective( +StmtResult SemaOpenMP::ActOnOpenMPCriticalDirective( const DeclarationNameInfo &DirName, ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { if (!AStmt) @@ -11072,7 +11184,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective( E->isInstantiationDependent()) { DependentHint = true; } else { - Hint = E->EvaluateKnownConstInt(Context); + Hint = E->EvaluateKnownConstInt(getASTContext()); HintLoc = C->getBeginLoc(); } } @@ -11091,7 +11203,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective( if (const auto *C = Pair.first->getSingleClause()) { Diag(C->getBeginLoc(), diag::note_omp_critical_hint_here) << 1 - << toString(C->getHint()->EvaluateKnownConstInt(Context), + << toString(C->getHint()->EvaluateKnownConstInt(getASTContext()), /*Radix=*/10, /*Signed=*/false); } else { Diag(Pair.first->getBeginLoc(), diag::note_omp_critical_no_hint) << 1; @@ -11099,16 +11211,16 @@ StmtResult Sema::ActOnOpenMPCriticalDirective( } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - auto *Dir = OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc, - Clauses, AStmt); + auto *Dir = OMPCriticalDirective::Create(getASTContext(), DirName, StartLoc, + EndLoc, Clauses, AStmt); if (!Pair.first && DirName.getName() && !DependentHint) DSAStack->addCriticalWithHint(Dir, Hint); return Dir; } -StmtResult Sema::ActOnOpenMPParallelForDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -11127,32 +11239,32 @@ StmtResult Sema::ActOnOpenMPParallelForDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack, + getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp parallel for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPParallelForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -11171,34 +11283,33 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack, + getOrderedNumberExpr(Clauses), AStmt, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelForSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult -Sema::ActOnOpenMPParallelMasterDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPParallelMasterDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -11211,17 +11322,16 @@ Sema::ActOnOpenMPParallelMasterDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMasterDirective::Create( - Context, StartLoc, EndLoc, Clauses, AStmt, + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, DSAStack->getTaskgroupReductionRef()); } -StmtResult -Sema::ActOnOpenMPParallelMaskedDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -11234,17 +11344,16 @@ Sema::ActOnOpenMPParallelMaskedDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMaskedDirective::Create( - Context, StartLoc, EndLoc, Clauses, AStmt, + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, DSAStack->getTaskgroupReductionRef()); } -StmtResult -Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPParallelSectionsDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -11274,10 +11383,10 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef Clauses, return StmtError(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelSectionsDirective::Create( - Context, StartLoc, EndLoc, Clauses, AStmt, + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } @@ -11304,16 +11413,17 @@ static bool checkMutuallyExclusiveClauses( return ErrorFound; } -StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTaskDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); // OpenMP 5.0, 2.10.1 task Construct // If a detach clause appears on the directive, then a mergeable clause cannot // appear on the same directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_detach, OMPC_mergeable})) return StmtError(); @@ -11325,26 +11435,26 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - DSAStack->isCancelRegion()); + return OMPTaskDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, - SourceLocation EndLoc) { - return OMPTaskyieldDirective::Create(Context, StartLoc, EndLoc); +StmtResult SemaOpenMP::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, + SourceLocation EndLoc) { + return OMPTaskyieldDirective::Create(getASTContext(), StartLoc, EndLoc); } -StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc, - SourceLocation EndLoc) { - return OMPBarrierDirective::Create(Context, StartLoc, EndLoc); +StmtResult SemaOpenMP::ActOnOpenMPBarrierDirective(SourceLocation StartLoc, + SourceLocation EndLoc) { + return OMPBarrierDirective::Create(getASTContext(), StartLoc, EndLoc); } -StmtResult Sema::ActOnOpenMPErrorDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - bool InExContext) { +StmtResult SemaOpenMP::ActOnOpenMPErrorDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc, + bool InExContext) { const OMPAtClause *AtC = OMPExecutableDirective::getSingleClause(Clauses); @@ -11369,12 +11479,13 @@ StmtResult Sema::ActOnOpenMPErrorDirective(ArrayRef Clauses, if (!SeverityC || SeverityC->getSeverityKind() != OMPC_SEVERITY_warning) return StmtError(); } - return OMPErrorDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPErrorDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses); } -StmtResult Sema::ActOnOpenMPTaskwaitDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPTaskwaitDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc) { const OMPNowaitClause *NowaitC = OMPExecutableDirective::getSingleClause(Clauses); bool HasDependC = @@ -11385,28 +11496,29 @@ StmtResult Sema::ActOnOpenMPTaskwaitDirective(ArrayRef Clauses, return StmtError(); } - return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPTaskwaitDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses); } -StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPTaskgroupDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); assert(isa(AStmt) && "Captured statement expected"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPTaskgroupDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt, + return OMPTaskgroupDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt, DSAStack->getTaskgroupReductionRef()); } -StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPFlushDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc) { OMPFlushClause *FC = nullptr; OMPClause *OrderClause = nullptr; for (OMPClause *C : Clauses) { @@ -11440,12 +11552,12 @@ StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef Clauses, << getOpenMPClauseName(OrderClause->getClauseKind()); return StmtError(); } - return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPFlushDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses); } -StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPDepobjDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (Clauses.empty()) { Diag(StartLoc, diag::err_omp_depobj_expected); return StmtError(); @@ -11462,12 +11574,12 @@ StmtResult Sema::ActOnOpenMPDepobjDirective(ArrayRef Clauses, Diag(Clauses[0]->getEndLoc(), diag::err_omp_depobj_single_clause_expected); return StmtError(); } - return OMPDepobjDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPDepobjDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses); } -StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPScanDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc) { // Check that exactly one clause is specified. if (Clauses.size() != 1) { Diag(Clauses.empty() ? EndLoc : Clauses[1]->getBeginLoc(), @@ -11492,13 +11604,13 @@ StmtResult Sema::ActOnOpenMPScanDirective(ArrayRef Clauses, return StmtError(); } DSAStack->setParentHasScanDirective(StartLoc); - return OMPScanDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPScanDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses); } -StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPOrderedDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { const OMPClause *DependFound = nullptr; const OMPClause *DependSourceClause = nullptr; const OMPClause *DependSinkClause = nullptr; @@ -11557,7 +11669,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef Clauses, // An ordered construct with the simd clause is the only OpenMP construct // that can appear in the simd region. Diag(StartLoc, diag::err_omp_prohibited_region_simd) - << (LangOpts.OpenMP >= 50 ? 1 : 0); + << (getLangOpts().OpenMP >= 50 ? 1 : 0); ErrorFound = true; } else if ((DependFound || DoacrossFound) && (TC || SC)) { SourceLocation Loc = @@ -11604,10 +11716,11 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef Clauses, if (AStmt) { assert(isa(AStmt) && "Captured statement expected"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); } - return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPOrderedDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } namespace { @@ -12665,10 +12778,11 @@ bool OpenMPAtomicCompareCaptureChecker::checkStmt(Stmt *S, } } // namespace -StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPAtomicDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { + ASTContext &Context = getASTContext(); // Register location of the first atomic directive. DSAStack->addAtomicDirectiveLoc(StartLoc); if (!AStmt) @@ -12871,7 +12985,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, << ErrorFound << NoteRange; return StmtError(); } - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) V = X = nullptr; } else if (AtomicKind == OMPC_write) { enum { @@ -12933,7 +13047,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, << ErrorFound << NoteRange; return StmtError(); } - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) E = X = nullptr; } else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) { // If clause is update: @@ -12944,7 +13058,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, // x binop= expr; // x = x binop expr; // x = expr binop x; - OpenMPAtomicUpdateChecker Checker(*this); + OpenMPAtomicUpdateChecker Checker(SemaRef); if (Checker.checkStatement( Body, (AtomicKind == OMPC_update) @@ -12952,7 +13066,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, : diag::err_omp_atomic_not_expression_statement, diag::note_omp_atomic_update)) return StmtError(); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { E = Checker.getExpr(); X = Checker.getX(); UE = Checker.getUpdateExpr(); @@ -12982,7 +13096,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { V = AtomicBinOp->getLHS(); Body = AtomicBinOp->getRHS()->IgnoreParenImpCasts(); - OpenMPAtomicUpdateChecker Checker(*this); + OpenMPAtomicUpdateChecker Checker(SemaRef); if (Checker.checkStatement( Body, diag::err_omp_atomic_capture_not_expression_statement, diag::note_omp_atomic_update)) @@ -13007,7 +13121,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; return StmtError(); } - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) UE = V = E = X = nullptr; } else { // If clause is a capture: @@ -13036,14 +13150,14 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, if (auto *EWC = dyn_cast(Second)) Second = EWC->getSubExpr()->IgnoreParenImpCasts(); // Need to find what subexpression is 'v' and what is 'x'. - OpenMPAtomicUpdateChecker Checker(*this); + OpenMPAtomicUpdateChecker Checker(SemaRef); bool IsUpdateExprFound = !Checker.checkStatement(Second); BinaryOperator *BinOp = nullptr; if (IsUpdateExprFound) { BinOp = dyn_cast(First); IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; } - if (IsUpdateExprFound && !CurContext->isDependentContext()) { + if (IsUpdateExprFound && !SemaRef.CurContext->isDependentContext()) { // { v = x; x++; } // { v = x; x--; } // { v = x; ++x; } @@ -13073,7 +13187,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, BinOp = dyn_cast(Second); IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; } - if (IsUpdateExprFound && !CurContext->isDependentContext()) { + if (IsUpdateExprFound && + !SemaRef.CurContext->isDependentContext()) { // { x++; v = x; } // { x--; v = x; } // { ++x; v = x; } @@ -13170,12 +13285,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; return StmtError(); } - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) UE = V = E = X = nullptr; } else if (AtomicKind == OMPC_compare) { if (IsCompareCapture) { OpenMPAtomicCompareCaptureChecker::ErrorInfoTy ErrorInfo; - OpenMPAtomicCompareCaptureChecker Checker(*this); + OpenMPAtomicCompareCaptureChecker Checker(SemaRef); if (!Checker.checkStmt(Body, ErrorInfo)) { Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare_capture) << ErrorInfo.ErrorRange; @@ -13195,7 +13310,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, IsPostfixUpdate = Checker.isPostfixUpdate(); } else { OpenMPAtomicCompareChecker::ErrorInfoTy ErrorInfo; - OpenMPAtomicCompareChecker Checker(*this); + OpenMPAtomicCompareChecker Checker(SemaRef); if (!Checker.checkStmt(Body, ErrorInfo)) { Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare) << ErrorInfo.ErrorRange; @@ -13233,17 +13348,17 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef Clauses, } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPAtomicDirective::Create( Context, StartLoc, EndLoc, Clauses, AStmt, {X, V, R, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly}); } -StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTargetDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -13300,15 +13415,15 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef Clauses, } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPTargetDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } -StmtResult -Sema::ActOnOpenMPTargetParallelDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTargetParallelDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -13330,14 +13445,14 @@ Sema::ActOnOpenMPTargetParallelDirective(ArrayRef Clauses, CS->getCapturedDecl()->setNothrow(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetParallelDirective::Create( - Context, StartLoc, EndLoc, Clauses, AStmt, + getASTContext(), StartLoc, EndLoc, Clauses, AStmt, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPTargetParallelForDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13366,28 +13481,28 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_target_parallel_for, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, + getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target parallel for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetParallelForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } @@ -13424,10 +13539,10 @@ static bool isClauseMappable(ArrayRef Clauses) { return true; } -StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPTargetDataDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -13437,9 +13552,10 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef Clauses, // At least one map, use_device_addr or use_device_ptr clause must appear on // the directive. if (!hasClauses(Clauses, OMPC_map, OMPC_use_device_ptr) && - (LangOpts.OpenMP < 50 || !hasClauses(Clauses, OMPC_use_device_addr))) { + (getLangOpts().OpenMP < 50 || + !hasClauses(Clauses, OMPC_use_device_addr))) { StringRef Expected; - if (LangOpts.OpenMP < 50) + if (getLangOpts().OpenMP < 50) Expected = "'map' or 'use_device_ptr'"; else Expected = "'map', 'use_device_ptr', or 'use_device_addr'"; @@ -13448,16 +13564,15 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef Clauses, return StmtError(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPTargetDataDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt); + return OMPTargetDataDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt); } -StmtResult -Sema::ActOnOpenMPTargetEnterDataDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, Stmt *AStmt) { +StmtResult SemaOpenMP::ActOnOpenMPTargetEnterDataDirective( + ArrayRef Clauses, SourceLocation StartLoc, + SourceLocation EndLoc, Stmt *AStmt) { if (!AStmt) return StmtError(); @@ -13487,14 +13602,13 @@ Sema::ActOnOpenMPTargetEnterDataDirective(ArrayRef Clauses, return StmtError(); } - return OMPTargetEnterDataDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt); + return OMPTargetEnterDataDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt); } -StmtResult -Sema::ActOnOpenMPTargetExitDataDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, Stmt *AStmt) { +StmtResult SemaOpenMP::ActOnOpenMPTargetExitDataDirective( + ArrayRef Clauses, SourceLocation StartLoc, + SourceLocation EndLoc, Stmt *AStmt) { if (!AStmt) return StmtError(); @@ -13524,14 +13638,13 @@ Sema::ActOnOpenMPTargetExitDataDirective(ArrayRef Clauses, return StmtError(); } - return OMPTargetExitDataDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt); + return OMPTargetExitDataDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt); } -StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - Stmt *AStmt) { +StmtResult SemaOpenMP::ActOnOpenMPTargetUpdateDirective( + ArrayRef Clauses, SourceLocation StartLoc, + SourceLocation EndLoc, Stmt *AStmt) { if (!AStmt) return StmtError(); @@ -13563,13 +13676,14 @@ StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef Clauses, return StmtError(); } - return OMPTargetUpdateDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt); + return OMPTargetUpdateDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt); } -StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTeamsDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -13585,17 +13699,17 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); - return OMPTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPTeamsDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } -StmtResult -Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, - SourceLocation EndLoc, - OpenMPDirectiveKind CancelRegion) { +StmtResult SemaOpenMP::ActOnOpenMPCancellationPointDirective( + SourceLocation StartLoc, SourceLocation EndLoc, + OpenMPDirectiveKind CancelRegion) { if (DSAStack->isParentNowaitRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 0; return StmtError(); @@ -13604,14 +13718,13 @@ Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 0; return StmtError(); } - return OMPCancellationPointDirective::Create(Context, StartLoc, EndLoc, - CancelRegion); + return OMPCancellationPointDirective::Create(getASTContext(), StartLoc, + EndLoc, CancelRegion); } -StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc, - OpenMPDirectiveKind CancelRegion) { +StmtResult SemaOpenMP::ActOnOpenMPCancelDirective( + ArrayRef Clauses, SourceLocation StartLoc, + SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) { if (DSAStack->isParentNowaitRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 1; return StmtError(); @@ -13621,7 +13734,7 @@ StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef Clauses, return StmtError(); } DSAStack->setParentCancelRegion(/*Cancel=*/true); - return OMPCancelDirective::Create(Context, StartLoc, EndLoc, Clauses, + return OMPCancelDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, CancelRegion); } @@ -13652,7 +13765,7 @@ static bool checkReductionClauseWithNogroup(Sema &S, return false; } -StmtResult Sema::ActOnOpenMPTaskLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13664,33 +13777,33 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13702,21 +13815,21 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } @@ -13724,23 +13837,23 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective( // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPTaskLoopSimdDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPTaskLoopSimdDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPMasterTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13752,33 +13865,33 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_master_taskloop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPMasterTaskLoopDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPMasterTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPMaskedTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13790,33 +13903,33 @@ StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_masked_taskloop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPMaskedTaskLoopDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPMaskedTaskLoopDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13828,21 +13941,21 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_master_taskloop_simd, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } @@ -13850,23 +13963,23 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective( // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPMasterTaskLoopSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPMaskedTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13878,21 +13991,21 @@ StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_masked_taskloop_simd, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack, - VarsWithImplicitDSA, B); + /*OrderedLoopCountExpr=*/nullptr, AStmt, SemaRef, + *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } @@ -13900,23 +14013,23 @@ StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective( // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPMaskedTaskLoopSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -13947,33 +14060,33 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_parallel_master_taskloop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMasterTaskLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedTaskLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14004,33 +14117,33 @@ StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_parallel_masked_taskloop, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMaskedTaskLoopDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14061,21 +14174,21 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_parallel_master_taskloop_simd, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } @@ -14083,23 +14196,23 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective( // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMasterTaskLoopSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPParallelMaskedTaskLoopSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14130,21 +14243,21 @@ StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_parallel_masked_taskloop_simd, getCollapseNumberExpr(Clauses), - /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + /*OrderedLoopCountExpr=*/nullptr, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } @@ -14152,23 +14265,23 @@ StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective( // OpenMP, [2.9.2 taskloop Construct, Restrictions] // The grainsize clause and num_tasks clause are mutually exclusive and may // not appear on the same taskloop directive. - if (checkMutuallyExclusiveClauses(*this, Clauses, + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, {OMPC_grainsize, OMPC_num_tasks})) return StmtError(); // OpenMP, [2.9.2 taskloop Construct, Restrictions] // If a reduction clause is present on the taskloop directive, the nogroup // clause must not be specified. - if (checkReductionClauseWithNogroup(*this, Clauses)) + if (checkReductionClauseWithNogroup(SemaRef, Clauses)) return StmtError(); - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPParallelMaskedTaskLoopSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPDistributeDirective( +StmtResult SemaOpenMP::ActOnOpenMPDistributeDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14184,21 +14297,21 @@ StmtResult Sema::ActOnOpenMPDistributeDirective( unsigned NestedLoopCount = checkOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses), nullptr /*ordered not a clause on distribute*/, AStmt, - *this, *DSAStack, VarsWithImplicitDSA, B); + SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); auto *DistributeDirective = OMPDistributeDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getMappedDirective()); return DistributeDirective; } -StmtResult Sema::ActOnOpenMPDistributeParallelForDirective( +StmtResult SemaOpenMP::ActOnOpenMPDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14228,21 +14341,21 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_distribute_parallel_for, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPDistributeParallelForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14272,34 +14385,34 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPDistributeParallelForSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPDistributeSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14328,34 +14441,34 @@ StmtResult Sema::ActOnOpenMPDistributeSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_distribute_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, - *DSAStack, VarsWithImplicitDSA, B); + nullptr /*ordered not a clause on distribute*/, CS, + SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPDistributeSimdDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPDistributeSimdDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14385,33 +14498,33 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_target_parallel_for_simd, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, VarsWithImplicitDSA, - B); + getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack, + VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target parallel for simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetParallelForSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14440,34 +14553,34 @@ StmtResult Sema::ActOnOpenMPTargetSimdDirective( // nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses), - getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, + getOrderedNumberExpr(Clauses), CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); - return OMPTargetSimdDirective::Create(Context, StartLoc, EndLoc, + SemaRef.setFunctionHasBranchProtectedScope(); + return OMPTargetSimdDirective::Create(getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTeamsDistributeDirective( +StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14496,23 +14609,23 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, - *DSAStack, VarsWithImplicitDSA, B); + nullptr /*ordered not a clause on distribute*/, CS, + SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp teams distribute loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); return OMPTeamsDistributeDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14542,38 +14655,38 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_teams_distribute_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp teams distribute simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); return OMPTeamsDistributeSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14604,38 +14717,38 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); return OMPTeamsDistributeParallelForSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective( +StmtResult SemaOpenMP::ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14666,28 +14779,27 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); DSAStack->setParentTeamsRegionLoc(StartLoc); return OMPTeamsDistributeParallelForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); @@ -14709,7 +14821,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); const OMPClause *BareClause = nullptr; bool HasThreadLimitAndNumTeamsClause = hasClauses(Clauses, OMPC_num_teams) && @@ -14724,11 +14836,11 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef Clauses, return StmtError(); } - return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, - AStmt); + return OMPTargetTeamsDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses, AStmt); } -StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14758,20 +14870,20 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_target_teams_distribute, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target teams distribute loop exprs were not built"); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetTeamsDistributeDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14801,32 +14913,32 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_target_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target teams distribute parallel for loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetTeamsDistributeParallelForDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B, DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion()); } -StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14857,35 +14969,35 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( unsigned NestedLoopCount = checkOpenMPLoop(OMPD_target_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, - *DSAStack, VarsWithImplicitDSA, B); + nullptr /*ordered not a clause on distribute*/, CS, + SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target teams distribute parallel for simd loop exprs were not " "built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetTeamsDistributeParallelForSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective( +StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { if (!AStmt) @@ -14915,34 +15027,34 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective( // define the nested loops number. unsigned NestedLoopCount = checkOpenMPLoop( OMPD_target_teams_distribute_simd, getCollapseNumberExpr(Clauses), - nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack, + nullptr /*ordered not a clause on distribute*/, CS, SemaRef, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); - assert((CurContext->isDependentContext() || B.builtAll()) && + assert((SemaRef.CurContext->isDependentContext() || B.builtAll()) && "omp target teams distribute simd loop exprs were not built"); - if (!CurContext->isDependentContext()) { + if (!SemaRef.CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (OMPClause *C : Clauses) { if (auto *LC = dyn_cast(C)) if (FinishOpenMPLinearClause(*LC, cast(B.IterationVarRef), - B.NumIterations, *this, CurScope, - DSAStack)) + B.NumIterations, SemaRef, + SemaRef.getCurScope(), DSAStack)) return StmtError(); } } - if (checkSimdlenSafelenSpecified(*this, Clauses)) + if (checkSimdlenSafelenSpecified(SemaRef, Clauses)) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); return OMPTargetTeamsDistributeSimdDirective::Create( - Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); + getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } -bool Sema::checkTransformableLoopNest( +bool SemaOpenMP::checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl &LoopHelpers, Stmt *&Body, @@ -14955,7 +15067,7 @@ bool Sema::checkTransformableLoopNest( Stmt *CurStmt) { VarsWithInheritedDSAType TmpDSA; unsigned SingleNumLoops = - checkOpenMPLoop(Kind, nullptr, nullptr, CurStmt, *this, *DSAStack, + checkOpenMPLoop(Kind, nullptr, nullptr, CurStmt, SemaRef, *DSAStack, TmpDSA, LoopHelpers[Cnt]); if (SingleNumLoops == 0) return true; @@ -14991,9 +15103,11 @@ bool Sema::checkTransformableLoopNest( return Result; } -StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { + ASTContext &Context = getASTContext(); auto SizesClauses = OMPExecutableDirective::getClausesOfKind(Clauses); if (SizesClauses.empty()) { @@ -15017,7 +15131,7 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, return StmtError(); // Delay tiling to when template is completely instantiated. - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) return OMPTileDirective::Create(Context, StartLoc, EndLoc, Clauses, NumLoops, AStmt, nullptr, nullptr); @@ -15043,7 +15157,7 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, std::string FloorCntName = (Twine(".floor_") + llvm::utostr(I) + ".iv." + OrigVarName).str(); VarDecl *FloorCntDecl = - buildVarDecl(*this, {}, CntTy, FloorCntName, nullptr, OrigCntVar); + buildVarDecl(SemaRef, {}, CntTy, FloorCntName, nullptr, OrigCntVar); FloorIndVars[I] = FloorCntDecl; } @@ -15056,7 +15170,8 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, // used by the expressions to derive the original iteration variable's // value from the logical iteration number. auto *TileCntDecl = cast(IterVarRef->getDecl()); - TileCntDecl->setDeclName(&PP.getIdentifierTable().get(TileCntName)); + TileCntDecl->setDeclName( + &SemaRef.PP.getIdentifierTable().get(TileCntName)); TileIndVars[I] = TileCntDecl; } for (auto &P : OriginalInits[I]) { @@ -15085,17 +15200,18 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, auto *OrigCntVar = cast(LoopHelper.Counters[0]); QualType CntTy = OrigCntVar->getType(); Expr *DimTileSize = SizesClause->getSizesRefs()[I]; - Scope *CurScope = getCurScope(); + Scope *CurScope = SemaRef.getCurScope(); // Commonly used variables. - DeclRefExpr *TileIV = buildDeclRefExpr(*this, TileIndVars[I], CntTy, + DeclRefExpr *TileIV = buildDeclRefExpr(SemaRef, TileIndVars[I], CntTy, OrigCntVar->getExprLoc()); - DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy, + DeclRefExpr *FloorIV = buildDeclRefExpr(SemaRef, FloorIndVars[I], CntTy, OrigCntVar->getExprLoc()); // For init-statement: auto .tile.iv = .floor.iv - AddInitializerToDecl(TileIndVars[I], DefaultLvalueConversion(FloorIV).get(), - /*DirectInit=*/false); + SemaRef.AddInitializerToDecl(TileIndVars[I], + SemaRef.DefaultLvalueConversion(FloorIV).get(), + /*DirectInit=*/false); Decl *CounterDecl = TileIndVars[I]; StmtResult InitStmt = new (Context) DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1), @@ -15105,28 +15221,29 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, // For cond-expression: .tile.iv < min(.floor.iv + DimTileSize, // NumIterations) - ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), - BO_Add, FloorIV, DimTileSize); + ExprResult EndOfTile = SemaRef.BuildBinOp( + CurScope, LoopHelper.Cond->getExprLoc(), BO_Add, FloorIV, DimTileSize); if (!EndOfTile.isUsable()) return StmtError(); ExprResult IsPartialTile = - BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, - NumIterations, EndOfTile.get()); + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, + NumIterations, EndOfTile.get()); if (!IsPartialTile.isUsable()) return StmtError(); - ExprResult MinTileAndIterSpace = ActOnConditionalOp( + ExprResult MinTileAndIterSpace = SemaRef.ActOnConditionalOp( LoopHelper.Cond->getBeginLoc(), LoopHelper.Cond->getEndLoc(), IsPartialTile.get(), NumIterations, EndOfTile.get()); if (!MinTileAndIterSpace.isUsable()) return StmtError(); - ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), - BO_LT, TileIV, MinTileAndIterSpace.get()); + ExprResult CondExpr = + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, + TileIV, MinTileAndIterSpace.get()); if (!CondExpr.isUsable()) return StmtError(); // For incr-statement: ++.tile.iv - ExprResult IncrStmt = - BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, TileIV); + ExprResult IncrStmt = SemaRef.BuildUnaryOp( + CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, TileIV); if (!IncrStmt.isUsable()) return StmtError(); @@ -15161,16 +15278,16 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, DeclRefExpr *OrigCntVar = cast(LoopHelper.Counters[0]); QualType CntTy = OrigCntVar->getType(); Expr *DimTileSize = SizesClause->getSizesRefs()[I]; - Scope *CurScope = getCurScope(); + Scope *CurScope = SemaRef.getCurScope(); // Commonly used variables. - DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy, + DeclRefExpr *FloorIV = buildDeclRefExpr(SemaRef, FloorIndVars[I], CntTy, OrigCntVar->getExprLoc()); // For init-statement: auto .floor.iv = 0 - AddInitializerToDecl( + SemaRef.AddInitializerToDecl( FloorIndVars[I], - ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(), + SemaRef.ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(), /*DirectInit=*/false); Decl *CounterDecl = FloorIndVars[I]; StmtResult InitStmt = new (Context) @@ -15180,14 +15297,15 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, return StmtError(); // For cond-expression: .floor.iv < NumIterations - ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), - BO_LT, FloorIV, NumIterations); + ExprResult CondExpr = SemaRef.BuildBinOp( + CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, FloorIV, NumIterations); if (!CondExpr.isUsable()) return StmtError(); // For incr-statement: .floor.iv += DimTileSize - ExprResult IncrStmt = BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), - BO_AddAssign, FloorIV, DimTileSize); + ExprResult IncrStmt = + SemaRef.BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign, + FloorIV, DimTileSize); if (!IncrStmt.isUsable()) return StmtError(); @@ -15202,15 +15320,18 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef Clauses, buildPreInits(Context, PreInits)); } -StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, - Stmt *AStmt, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { + ASTContext &Context = getASTContext(); + Scope *CurScope = SemaRef.getCurScope(); // Empty statement should only be possible if there already was an error. if (!AStmt) return StmtError(); - if (checkMutuallyExclusiveClauses(*this, Clauses, {OMPC_partial, OMPC_full})) + if (checkMutuallyExclusiveClauses(SemaRef, Clauses, + {OMPC_partial, OMPC_full})) return StmtError(); const OMPFullClause *FullClause = @@ -15233,7 +15354,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, unsigned NumGeneratedLoops = PartialClause ? 1 : 0; // Delay unrolling to when template is completely instantiated. - if (CurContext->isDependentContext()) + if (SemaRef.CurContext->isDependentContext()) return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, NumGeneratedLoops, nullptr, nullptr); @@ -15338,8 +15459,8 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, assert(Factor > 0 && "Expected positive unroll factor"); auto MakeFactorExpr = [this, Factor, IVTy, FactorLoc]() { return IntegerLiteral::Create( - Context, llvm::APInt(Context.getIntWidth(IVTy), Factor), IVTy, - FactorLoc); + getASTContext(), llvm::APInt(getASTContext().getIntWidth(IVTy), Factor), + IVTy, FactorLoc); }; // Iteration variable SourceLocations. @@ -15356,30 +15477,31 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, // Create the iteration variable for the unrolled loop. VarDecl *OuterIVDecl = - buildVarDecl(*this, {}, IVTy, OuterIVName, nullptr, OrigVar); + buildVarDecl(SemaRef, {}, IVTy, OuterIVName, nullptr, OrigVar); auto MakeOuterRef = [this, OuterIVDecl, IVTy, OrigVarLoc]() { - return buildDeclRefExpr(*this, OuterIVDecl, IVTy, OrigVarLoc); + return buildDeclRefExpr(SemaRef, OuterIVDecl, IVTy, OrigVarLoc); }; // Iteration variable for the inner loop: Reuse the iteration variable created // by checkOpenMPLoop. auto *InnerIVDecl = cast(IterationVarRef->getDecl()); - InnerIVDecl->setDeclName(&PP.getIdentifierTable().get(InnerIVName)); + InnerIVDecl->setDeclName(&SemaRef.PP.getIdentifierTable().get(InnerIVName)); auto MakeInnerRef = [this, InnerIVDecl, IVTy, OrigVarLoc]() { - return buildDeclRefExpr(*this, InnerIVDecl, IVTy, OrigVarLoc); + return buildDeclRefExpr(SemaRef, InnerIVDecl, IVTy, OrigVarLoc); }; // Make a copy of the NumIterations expression for each use: By the AST // constraints, every expression object in a DeclContext must be unique. - CaptureVars CopyTransformer(*this); + CaptureVars CopyTransformer(SemaRef); auto MakeNumIterations = [&CopyTransformer, &LoopHelper]() -> Expr * { return AssertSuccess( CopyTransformer.TransformExpr(LoopHelper.NumIterations)); }; // Inner For init-statement: auto .unroll_inner.iv = .unrolled.iv - ExprResult LValueConv = DefaultLvalueConversion(MakeOuterRef()); - AddInitializerToDecl(InnerIVDecl, LValueConv.get(), /*DirectInit=*/false); + ExprResult LValueConv = SemaRef.DefaultLvalueConversion(MakeOuterRef()); + SemaRef.AddInitializerToDecl(InnerIVDecl, LValueConv.get(), + /*DirectInit=*/false); StmtResult InnerInit = new (Context) DeclStmt(DeclGroupRef(InnerIVDecl), OrigVarLocBegin, OrigVarLocEnd); if (!InnerInit.isUsable()) @@ -15392,28 +15514,30 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, // \endcode // This conjunction of two conditions allows ScalarEvolution to derive the // maximum trip count of the inner loop. - ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), - BO_Add, MakeOuterRef(), MakeFactorExpr()); + ExprResult EndOfTile = + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_Add, + MakeOuterRef(), MakeFactorExpr()); if (!EndOfTile.isUsable()) return StmtError(); - ExprResult InnerCond1 = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), - BO_LT, MakeInnerRef(), EndOfTile.get()); + ExprResult InnerCond1 = + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, + MakeInnerRef(), EndOfTile.get()); if (!InnerCond1.isUsable()) return StmtError(); ExprResult InnerCond2 = - BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeInnerRef(), - MakeNumIterations()); + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, + MakeInnerRef(), MakeNumIterations()); if (!InnerCond2.isUsable()) return StmtError(); ExprResult InnerCond = - BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LAnd, - InnerCond1.get(), InnerCond2.get()); + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LAnd, + InnerCond1.get(), InnerCond2.get()); if (!InnerCond.isUsable()) return StmtError(); // Inner For incr-statement: ++.unroll_inner.iv - ExprResult InnerIncr = BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(), - UO_PreInc, MakeInnerRef()); + ExprResult InnerIncr = SemaRef.BuildUnaryOp( + CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, MakeInnerRef()); if (!InnerIncr.isUsable()) return StmtError(); @@ -15422,7 +15546,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end()); InnerBodyStmts.push_back(Body); CompoundStmt *InnerBody = - CompoundStmt::Create(Context, InnerBodyStmts, FPOptionsOverride(), + CompoundStmt::Create(getASTContext(), InnerBodyStmts, FPOptionsOverride(), Body->getBeginLoc(), Body->getEndLoc()); ForStmt *InnerFor = new (Context) ForStmt(Context, InnerInit.get(), InnerCond.get(), nullptr, @@ -15444,12 +15568,13 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, LoopHintAttr *UnrollHintAttr = LoopHintAttr::CreateImplicit(Context, LoopHintAttr::UnrollCount, LoopHintAttr::Numeric, MakeFactorExpr()); - AttributedStmt *InnerUnrolled = - AttributedStmt::Create(Context, StartLoc, {UnrollHintAttr}, InnerFor); + AttributedStmt *InnerUnrolled = AttributedStmt::Create( + getASTContext(), StartLoc, {UnrollHintAttr}, InnerFor); // Outer For init-statement: auto .unrolled.iv = 0 - AddInitializerToDecl( - OuterIVDecl, ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(), + SemaRef.AddInitializerToDecl( + OuterIVDecl, + SemaRef.ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(), /*DirectInit=*/false); StmtResult OuterInit = new (Context) DeclStmt(DeclGroupRef(OuterIVDecl), OrigVarLocBegin, OrigVarLocEnd); @@ -15458,15 +15583,15 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, // Outer For cond-expression: .unrolled.iv < NumIterations ExprResult OuterConde = - BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeOuterRef(), - MakeNumIterations()); + SemaRef.BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, + MakeOuterRef(), MakeNumIterations()); if (!OuterConde.isUsable()) return StmtError(); // Outer For incr-statement: .unrolled.iv += Factor ExprResult OuterIncr = - BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign, - MakeOuterRef(), MakeFactorExpr()); + SemaRef.BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign, + MakeOuterRef(), MakeFactorExpr()); if (!OuterIncr.isUsable()) return StmtError(); @@ -15481,10 +15606,11 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef Clauses, buildPreInits(Context, PreInits)); } -OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, + Expr *Expr, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_final: @@ -15647,6 +15773,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel) CaptureRegion = OMPD_target; break; + case OMPD_teams_loop: + case OMPD_target_teams_loop: + // For [target] teams loop, assume capture region is 'teams' so it's + // available for codegen later to use if/when necessary. + CaptureRegion = OMPD_teams; + break; case OMPD_target_teams_distribute_parallel_for_simd: if (OpenMPVersion >= 50 && (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)) { @@ -15654,7 +15786,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( break; } [[fallthrough]]; - case OMPD_target_teams_loop: case OMPD_target_teams_distribute_parallel_for: // If this clause applies to the nested 'parallel' region, capture within // the 'teams' region, otherwise do not capture. @@ -15777,7 +15908,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_loop: - case OMPD_teams_loop: case OMPD_teams: case OMPD_tile: case OMPD_unroll: @@ -16568,19 +16698,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause( return CaptureRegion; } -OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, - Expr *Condition, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation NameModifierLoc, - SourceLocation ColonLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPIfClause( + OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation NameModifierLoc, + SourceLocation ColonLoc, SourceLocation EndLoc) { Expr *ValExpr = Condition; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind CaptureRegion = OMPD_unknown; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { - ExprResult Val = CheckBooleanCondition(StartLoc, Condition); + ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition); if (Val.isInvalid()) return nullptr; @@ -16588,57 +16716,60 @@ OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); CaptureRegion = getOpenMPCaptureRegionForClause( - DKind, OMPC_if, LangOpts.OpenMP, NameModifier); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + DKind, OMPC_if, getLangOpts().OpenMP, NameModifier); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } - return new (Context) + return new (getASTContext()) OMPIfClause(NameModifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, NameModifierLoc, ColonLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPFinalClause(Expr *Condition, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = Condition; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind CaptureRegion = OMPD_unknown; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { - ExprResult Val = CheckBooleanCondition(StartLoc, Condition); + ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition); if (Val.isInvalid()) return nullptr; - ValExpr = MakeFullExpr(Val.get()).get(); + ValExpr = SemaRef.MakeFullExpr(Val.get()).get(); OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); - CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_final, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_final, + getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } - return new (Context) OMPFinalClause(ValExpr, HelperValStmt, CaptureRegion, - StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) OMPFinalClause( + ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc, - Expr *Op) { +ExprResult +SemaOpenMP::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc, + Expr *Op) { if (!Op) return ExprError(); - class IntConvertDiagnoser : public ICEConvertDiagnoser { + class IntConvertDiagnoser : public Sema::ICEConvertDiagnoser { public: IntConvertDiagnoser() : ICEConvertDiagnoser(/*AllowScopedEnumerations*/ false, false, true) {} @@ -16674,7 +16805,7 @@ ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc, llvm_unreachable("conversion functions are permitted"); } } ConvertDiagnoser; - return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser); + return SemaRef.PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser); } static bool @@ -16687,7 +16818,7 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind, !ValExpr->isInstantiationDependent()) { SourceLocation Loc = ValExpr->getExprLoc(); ExprResult Value = - SemaRef.PerformOpenMPImplicitIntegerConversion(Loc, ValExpr); + SemaRef.OpenMP().PerformOpenMPImplicitIntegerConversion(Loc, ValExpr); if (Value.isInvalid()) return false; @@ -16719,37 +16850,37 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind, return true; } -OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNumThreadsClause(Expr *NumThreads, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = NumThreads; Stmt *HelperValStmt = nullptr; // OpenMP [2.5, Restrictions] // The num_threads expression must evaluate to a positive integer value. - if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads, + if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_threads, /*StrictlyPositive=*/true)) return nullptr; OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); - OpenMPDirectiveKind CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_num_threads, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause( + DKind, OMPC_num_threads, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) OMPNumThreadsClause( + return new (getASTContext()) OMPNumThreadsClause( ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, - OpenMPClauseKind CKind, - bool StrictlyPositive, - bool SuppressExprDiags) { +ExprResult SemaOpenMP::VerifyPositiveIntegerConstantInClause( + Expr *E, OpenMPClauseKind CKind, bool StrictlyPositive, + bool SuppressExprDiags) { if (!E) return ExprError(); if (E->isValueDependent() || E->isTypeDependent() || @@ -16763,14 +16894,16 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, // expression. struct SuppressedDiagnoser : public Sema::VerifyICEDiagnoser { SuppressedDiagnoser() : VerifyICEDiagnoser(/*Suppress=*/true) {} - Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S, - SourceLocation Loc) override { + SemaBase::SemaDiagnosticBuilder + diagnoseNotICE(Sema &S, SourceLocation Loc) override { llvm_unreachable("Diagnostic suppressed"); } } Diagnoser; - ICE = VerifyIntegerConstantExpression(E, &Result, Diagnoser, AllowFold); + ICE = SemaRef.VerifyIntegerConstantExpression(E, &Result, Diagnoser, + Sema::AllowFold); } else { - ICE = VerifyIntegerConstantExpression(E, &Result, /*FIXME*/ AllowFold); + ICE = SemaRef.VerifyIntegerConstantExpression(E, &Result, + /*FIXME*/ Sema::AllowFold); } if (ICE.isInvalid()) return ExprError(); @@ -16794,29 +16927,31 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, return ICE; } -OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSafelenClause(Expr *Len, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { // OpenMP [2.8.1, simd construct, Description] // The parameter of the safelen clause must be a constant // positive integer expression. ExprResult Safelen = VerifyPositiveIntegerConstantInClause(Len, OMPC_safelen); if (Safelen.isInvalid()) return nullptr; - return new (Context) + return new (getASTContext()) OMPSafelenClause(Safelen.get(), StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSimdlenClause(Expr *Len, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { // OpenMP [2.8.1, simd construct, Description] // The parameter of the simdlen clause must be a constant // positive integer expression. ExprResult Simdlen = VerifyPositiveIntegerConstantInClause(Len, OMPC_simdlen); if (Simdlen.isInvalid()) return nullptr; - return new (Context) + return new (getASTContext()) OMPSimdlenClause(Simdlen.get(), StartLoc, LParenLoc, EndLoc); } @@ -16876,31 +17011,32 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc, return true; } -OMPClause *Sema::ActOnOpenMPAllocatorClause(Expr *A, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPAllocatorClause(Expr *A, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { // OpenMP [2.11.3, allocate Directive, Description] // allocator is an expression of omp_allocator_handle_t type. - if (!findOMPAllocatorHandleT(*this, A->getExprLoc(), DSAStack)) + if (!findOMPAllocatorHandleT(SemaRef, A->getExprLoc(), DSAStack)) return nullptr; - ExprResult Allocator = DefaultLvalueConversion(A); + ExprResult Allocator = SemaRef.DefaultLvalueConversion(A); if (Allocator.isInvalid()) return nullptr; - Allocator = PerformImplicitConversion(Allocator.get(), - DSAStack->getOMPAllocatorHandleT(), - Sema::AA_Initializing, - /*AllowExplicit=*/true); + Allocator = SemaRef.PerformImplicitConversion( + Allocator.get(), DSAStack->getOMPAllocatorHandleT(), + Sema::AA_Initializing, + /*AllowExplicit=*/true); if (Allocator.isInvalid()) return nullptr; - return new (Context) + return new (getASTContext()) OMPAllocatorClause(Allocator.get(), StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPCollapseClause(Expr *NumForLoops, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { // OpenMP [2.7.1, loop construct, Description] // OpenMP [2.8.1, simd construct, Description] // OpenMP [2.9.6, distribute construct, Description] @@ -16910,14 +17046,14 @@ OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops, VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_collapse); if (NumForLoopsResult.isInvalid()) return nullptr; - return new (Context) + return new (getASTContext()) OMPCollapseClause(NumForLoopsResult.get(), StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc, - SourceLocation EndLoc, - SourceLocation LParenLoc, - Expr *NumForLoops) { +OMPClause *SemaOpenMP::ActOnOpenMPOrderedClause(SourceLocation StartLoc, + SourceLocation EndLoc, + SourceLocation LParenLoc, + Expr *NumForLoops) { // OpenMP [2.7.1, loop construct, Description] // OpenMP [2.8.1, simd construct, Description] // OpenMP [2.9.6, distribute construct, Description] @@ -16932,14 +17068,15 @@ OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc, } else { NumForLoops = nullptr; } - auto *Clause = OMPOrderedClause::Create( - Context, NumForLoops, NumForLoops ? DSAStack->getAssociatedLoops() : 0, - StartLoc, LParenLoc, EndLoc); + auto *Clause = + OMPOrderedClause::Create(getASTContext(), NumForLoops, + NumForLoops ? DSAStack->getAssociatedLoops() : 0, + StartLoc, LParenLoc, EndLoc); DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops, Clause); return Clause; } -OMPClause *Sema::ActOnOpenMPSimpleClause( +OMPClause *SemaOpenMP::ActOnOpenMPSimpleClause( OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { OMPClause *Res = nullptr; @@ -17081,11 +17218,11 @@ getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last, return std::string(Out.str()); } -OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind, - SourceLocation KindKwLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPDefaultClause(DefaultKind Kind, + SourceLocation KindKwLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMP_DEFAULT_unknown) { Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_default, /*First=*/0, @@ -17111,39 +17248,39 @@ OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind, llvm_unreachable("DSA unexpected in OpenMP default clause"); } - return new (Context) + return new (getASTContext()) OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPProcBindClause(ProcBindKind Kind, - SourceLocation KindKwLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPProcBindClause(ProcBindKind Kind, + SourceLocation KindKwLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMP_PROC_BIND_unknown) { Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_proc_bind, /*First=*/unsigned(OMP_PROC_BIND_master), /*Last=*/ - unsigned(LangOpts.OpenMP > 50 + unsigned(getLangOpts().OpenMP > 50 ? OMP_PROC_BIND_primary : OMP_PROC_BIND_spread) + 1) << getOpenMPClauseName(OMPC_proc_bind); return nullptr; } - if (Kind == OMP_PROC_BIND_primary && LangOpts.OpenMP < 51) + if (Kind == OMP_PROC_BIND_primary && getLangOpts().OpenMP < 51) Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_proc_bind, /*First=*/unsigned(OMP_PROC_BIND_master), /*Last=*/ unsigned(OMP_PROC_BIND_spread) + 1) << getOpenMPClauseName(OMPC_proc_bind); - return new (Context) + return new (getASTContext()) OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause( +OMPClause *SemaOpenMP::ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (Kind == OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown) { @@ -17154,15 +17291,15 @@ OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause( << getOpenMPClauseName(OMPC_atomic_default_mem_order); return nullptr; } - return new (Context) OMPAtomicDefaultMemOrderClause(Kind, KindKwLoc, StartLoc, - LParenLoc, EndLoc); + return new (getASTContext()) OMPAtomicDefaultMemOrderClause( + Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind, - SourceLocation KindKwLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind, + SourceLocation KindKwLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMPC_AT_unknown) { Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_at, /*First=*/0, @@ -17170,15 +17307,15 @@ OMPClause *Sema::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind, << getOpenMPClauseName(OMPC_at); return nullptr; } - return new (Context) + return new (getASTContext()) OMPAtClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind, - SourceLocation KindKwLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind, + SourceLocation KindKwLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMPC_SEVERITY_unknown) { Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_severity, /*First=*/0, @@ -17186,28 +17323,30 @@ OMPClause *Sema::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind, << getOpenMPClauseName(OMPC_severity); return nullptr; } - return new (Context) + return new (getASTContext()) OMPSeverityClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPMessageClause(Expr *ME, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { - assert(ME && "NULL expr in Message clause"); - if (!isa(ME)) { +OMPClause *SemaOpenMP::ActOnOpenMPMessageClause(Expr *ME, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { + assert(ME && "NULL expr in Message clause"); + if (!isa(ME)) { Diag(ME->getBeginLoc(), diag::warn_clause_expected_string) << getOpenMPClauseName(OMPC_message); return nullptr; } - return new (Context) OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) + OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPOrderClause( +OMPClause *SemaOpenMP::ActOnOpenMPOrderClause( OpenMPOrderClauseModifier Modifier, OpenMPOrderClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc) { if (Kind != OMPC_ORDER_concurrent || - (LangOpts.OpenMP < 51 && MLoc.isValid())) { + (getLangOpts().OpenMP < 51 && MLoc.isValid())) { // Kind should be concurrent, // Modifiers introduced in OpenMP 5.1 static_assert(OMPC_ORDER_unknown > 0, @@ -17220,7 +17359,7 @@ OMPClause *Sema::ActOnOpenMPOrderClause( << getOpenMPClauseName(OMPC_order); return nullptr; } - if (LangOpts.OpenMP >= 51) { + if (getLangOpts().OpenMP >= 51) { if (Modifier == OMPC_ORDER_MODIFIER_unknown && MLoc.isValid()) { Diag(MLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_order, @@ -17237,21 +17376,21 @@ OMPClause *Sema::ActOnOpenMPOrderClause( } } } - return new (Context) OMPOrderClause(Kind, KindLoc, StartLoc, LParenLoc, - EndLoc, Modifier, MLoc); + return new (getASTContext()) OMPOrderClause( + Kind, KindLoc, StartLoc, LParenLoc, EndLoc, Modifier, MLoc); } -OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, - SourceLocation KindKwLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, + SourceLocation KindKwLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source || Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) { SmallVector Except = { OMPC_DEPEND_source, OMPC_DEPEND_sink, OMPC_DEPEND_depobj, OMPC_DEPEND_outallmemory, OMPC_DEPEND_inoutallmemory}; - if (LangOpts.OpenMP < 51) + if (getLangOpts().OpenMP < 51) Except.push_back(OMPC_DEPEND_inoutset); Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_depend, /*First=*/0, @@ -17259,14 +17398,14 @@ OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, << getOpenMPClauseName(OMPC_update); return nullptr; } - return OMPUpdateClause::Create(Context, StartLoc, LParenLoc, KindKwLoc, Kind, - EndLoc); + return OMPUpdateClause::Create(getASTContext(), StartLoc, LParenLoc, + KindKwLoc, Kind, EndLoc); } -OMPClause *Sema::ActOnOpenMPSizesClause(ArrayRef SizeExprs, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSizesClause(ArrayRef SizeExprs, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { for (Expr *SizeExpr : SizeExprs) { ExprResult NumForLoopsResult = VerifyPositiveIntegerConstantInClause( SizeExpr, OMPC_sizes, /*StrictlyPositive=*/true); @@ -17275,19 +17414,19 @@ OMPClause *Sema::ActOnOpenMPSizesClause(ArrayRef SizeExprs, } DSAStack->setAssociatedLoops(SizeExprs.size()); - return OMPSizesClause::Create(Context, StartLoc, LParenLoc, EndLoc, + return OMPSizesClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, SizeExprs); } -OMPClause *Sema::ActOnOpenMPFullClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return OMPFullClause::Create(Context, StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPFullClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return OMPFullClause::Create(getASTContext(), StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPPartialClause(Expr *FactorExpr, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (FactorExpr) { // If an argument is specified, it must be a constant (or an unevaluated // template expression). @@ -17298,22 +17437,22 @@ OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr, FactorExpr = FactorResult.get(); } - return OMPPartialClause::Create(Context, StartLoc, LParenLoc, EndLoc, + return OMPPartialClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, FactorExpr); } -OMPClause *Sema::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { ExprResult AlignVal; AlignVal = VerifyPositiveIntegerConstantInClause(A, OMPC_align); if (AlignVal.isInvalid()) return nullptr; - return OMPAlignClause::Create(Context, AlignVal.get(), StartLoc, LParenLoc, - EndLoc); + return OMPAlignClause::Create(getASTContext(), AlignVal.get(), StartLoc, + LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause( +OMPClause *SemaOpenMP::ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef ArgumentLoc, SourceLocation DelimLoc, @@ -17481,13 +17620,13 @@ static bool checkScheduleModifiers(Sema &S, OpenMPScheduleClauseModifier M1, return false; } -OMPClause *Sema::ActOnOpenMPScheduleClause( +OMPClause *SemaOpenMP::ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { - if (checkScheduleModifiers(*this, M1, M2, M1Loc, M2Loc) || - checkScheduleModifiers(*this, M2, M1, M2Loc, M1Loc)) + if (checkScheduleModifiers(SemaRef, M1, M2, M1Loc, M2Loc) || + checkScheduleModifiers(SemaRef, M2, M1, M2Loc, M1Loc)) return nullptr; // OpenMP, 2.7.1, Loop Construct, Restrictions // Either the monotonic modifier or the nonmonotonic modifier can be specified @@ -17521,7 +17660,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause( // The nonmonotonic modifier can only be specified with schedule(dynamic) or // schedule(guided). // OpenMP 5.0 does not have this restriction. - if (LangOpts.OpenMP < 50 && + if (getLangOpts().OpenMP < 50 && (M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic) && Kind != OMPC_SCHEDULE_dynamic && Kind != OMPC_SCHEDULE_guided) { @@ -17547,7 +17686,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause( // chunk_size must be a loop invariant integer expression with a positive // value. if (std::optional Result = - ValExpr->getIntegerConstantExpr(Context)) { + ValExpr->getIntegerConstantExpr(getASTContext())) { if (Result->isSigned() && !Result->isStrictlyPositive()) { Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause) << "schedule" << 1 << ChunkSize->getSourceRange(); @@ -17555,24 +17694,24 @@ OMPClause *Sema::ActOnOpenMPScheduleClause( } } else if (getOpenMPCaptureRegionForClause( DSAStack->getCurrentDirective(), OMPC_schedule, - LangOpts.OpenMP) != OMPD_unknown && - !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + getLangOpts().OpenMP) != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } } - return new (Context) + return new (getASTContext()) OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind, ValExpr, HelperValStmt, M1, M1Loc, M2, M2Loc); } -OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind, - SourceLocation StartLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPClause(OpenMPClauseKind Kind, + SourceLocation StartLoc, + SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_ordered: @@ -17726,134 +17865,138 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind, return Res; } -OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNowaitClause(SourceLocation StartLoc, + SourceLocation EndLoc) { DSAStack->setNowaitRegion(); - return new (Context) OMPNowaitClause(StartLoc, EndLoc); + return new (getASTContext()) OMPNowaitClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPUntiedClause(SourceLocation StartLoc, + SourceLocation EndLoc) { DSAStack->setUntiedRegion(); - return new (Context) OMPUntiedClause(StartLoc, EndLoc); + return new (getASTContext()) OMPUntiedClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPMergeableClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPMergeableClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPMergeableClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPReadClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPReadClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPReadClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPWriteClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPWriteClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPWriteClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return OMPUpdateClause::Create(Context, StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPUpdateClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return OMPUpdateClause::Create(getASTContext(), StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPCaptureClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPCaptureClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPCaptureClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPCompareClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPCompareClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPCompareClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPCompareClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPFailClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPFailClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPFailClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPFailClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPFailClause( - OpenMPClauseKind Parameter, SourceLocation KindLoc, - SourceLocation StartLoc, SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPFailClause(OpenMPClauseKind Parameter, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (!checkFailClauseParameter(Parameter)) { Diag(KindLoc, diag::err_omp_atomic_fail_wrong_or_no_clauses); return nullptr; } - return new (Context) + return new (getASTContext()) OMPFailClause(Parameter, KindLoc, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPSeqCstClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPSeqCstClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPSeqCstClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPAcqRelClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPAcqRelClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPAcqRelClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPAcqRelClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPAcquireClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPAcquireClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPAcquireClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPAcquireClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPReleaseClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPReleaseClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPReleaseClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPReleaseClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPRelaxedClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPRelaxedClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPRelaxedClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPRelaxedClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPWeakClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPWeakClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPWeakClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPWeakClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPThreadsClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPThreadsClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPThreadsClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPThreadsClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPSIMDClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPSIMDClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPSIMDClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPSIMDClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPNogroupClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPNogroupClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPNogroupClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPNogroupClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPUnifiedAddressClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPUnifiedAddressClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc); +OMPClause * +SemaOpenMP::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPReverseOffloadClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPReverseOffloadClause(StartLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc); +OMPClause * +SemaOpenMP::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPDynamicAllocatorsClause(StartLoc, EndLoc); } -StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef Clauses, - SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult +SemaOpenMP::ActOnOpenMPInteropDirective(ArrayRef Clauses, + SourceLocation StartLoc, + SourceLocation EndLoc) { // OpenMP 5.1 [2.15.1, interop Construct, Restrictions] // At least one action-clause must appear on a directive. @@ -17903,13 +18046,13 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef Clauses, if (ClauseKind == OMPC_init) { auto *E = cast(C)->getInteropVar(); - DeclResult = getPrivateItem(*this, E, ELoc, ERange); + DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange); } else if (ClauseKind == OMPC_use) { auto *E = cast(C)->getInteropVar(); - DeclResult = getPrivateItem(*this, E, ELoc, ERange); + DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange); } else if (ClauseKind == OMPC_destroy) { auto *E = cast(C)->getInteropVar(); - DeclResult = getPrivateItem(*this, E, ELoc, ERange); + DeclResult = getPrivateItem(SemaRef, E, ELoc, ERange); } if (DeclResult.first) { @@ -17921,7 +18064,8 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef Clauses, } } - return OMPInteropDirective::Create(Context, StartLoc, EndLoc, Clauses); + return OMPInteropDirective::Create(getASTContext(), StartLoc, EndLoc, + Clauses); } static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr, @@ -17981,12 +18125,11 @@ static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr, return true; } -OMPClause * -Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo, - SourceLocation StartLoc, SourceLocation LParenLoc, - SourceLocation VarLoc, SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPInitClause( + Expr *InteropVar, OMPInteropInfo &InteropInfo, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) { - if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_init)) + if (!isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_init)) return nullptr; // Check prefer_type values. These foreign-runtime-id values are either @@ -17995,7 +18138,7 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo, if (E->isValueDependent() || E->isTypeDependent() || E->isInstantiationDependent() || E->containsUnexpandedParameterPack()) continue; - if (E->isIntegerConstantExpr(Context)) + if (E->isIntegerConstantExpr(getASTContext())) continue; if (isa(E)) continue; @@ -18003,28 +18146,29 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo, return nullptr; } - return OMPInitClause::Create(Context, InteropVar, InteropInfo, StartLoc, - LParenLoc, VarLoc, EndLoc); + return OMPInitClause::Create(getASTContext(), InteropVar, InteropInfo, + StartLoc, LParenLoc, VarLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation VarLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPUseClause(Expr *InteropVar, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation VarLoc, + SourceLocation EndLoc) { - if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_use)) + if (!isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_use)) return nullptr; - return new (Context) + return new (getASTContext()) OMPUseClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation VarLoc, - SourceLocation EndLoc) { - if (!InteropVar && LangOpts.OpenMP >= 52 && +OMPClause *SemaOpenMP::ActOnOpenMPDestroyClause(Expr *InteropVar, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation VarLoc, + SourceLocation EndLoc) { + if (!InteropVar && getLangOpts().OpenMP >= 52 && DSAStack->getCurrentDirective() == OMPD_depobj) { Diag(StartLoc, diag::err_omp_expected_clause_argument) << getOpenMPClauseName(OMPC_destroy) @@ -18032,100 +18176,103 @@ OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar, return nullptr; } if (InteropVar && - !isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_destroy)) + !isValidInteropVariable(SemaRef, InteropVar, VarLoc, OMPC_destroy)) return nullptr; - return new (Context) + return new (getASTContext()) OMPDestroyClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPNovariantsClause(Expr *Condition, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNovariantsClause(Expr *Condition, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = Condition; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind CaptureRegion = OMPD_unknown; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { - ExprResult Val = CheckBooleanCondition(StartLoc, Condition); + ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition); if (Val.isInvalid()) return nullptr; - ValExpr = MakeFullExpr(Val.get()).get(); + ValExpr = SemaRef.MakeFullExpr(Val.get()).get(); OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_novariants, - LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } - return new (Context) OMPNovariantsClause( + return new (getASTContext()) OMPNovariantsClause( ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPNocontextClause(Expr *Condition, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNocontextClause(Expr *Condition, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = Condition; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind CaptureRegion = OMPD_unknown; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { - ExprResult Val = CheckBooleanCondition(StartLoc, Condition); + ExprResult Val = SemaRef.CheckBooleanCondition(StartLoc, Condition); if (Val.isInvalid()) return nullptr; - ValExpr = MakeFullExpr(Val.get()).get(); + ValExpr = SemaRef.MakeFullExpr(Val.get()).get(); OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); - CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_nocontext, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_nocontext, + getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } - return new (Context) OMPNocontextClause(ValExpr, HelperValStmt, CaptureRegion, - StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) OMPNocontextClause( + ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPFilterClause(Expr *ThreadID, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPFilterClause(Expr *ThreadID, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = ThreadID; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); OpenMPDirectiveKind CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_filter, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + getOpenMPCaptureRegionForClause(DKind, OMPC_filter, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) OMPFilterClause(ValExpr, HelperValStmt, CaptureRegion, - StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) OMPFilterClause( + ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind, - ArrayRef VarList, - const OMPVarListLocTy &Locs, - OpenMPVarListDataTy &Data) { +OMPClause *SemaOpenMP::ActOnOpenMPVarListClause(OpenMPClauseKind Kind, + ArrayRef VarList, + const OMPVarListLocTy &Locs, + OpenMPVarListDataTy &Data) { SourceLocation StartLoc = Locs.StartLoc; SourceLocation LParenLoc = Locs.LParenLoc; SourceLocation EndLoc = Locs.EndLoc; @@ -18317,29 +18464,30 @@ OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind, return Res; } -ExprResult Sema::getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, - ExprObjectKind OK, SourceLocation Loc) { - ExprResult Res = BuildDeclRefExpr( +ExprResult SemaOpenMP::getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, + ExprObjectKind OK, + SourceLocation Loc) { + ExprResult Res = SemaRef.BuildDeclRefExpr( Capture, Capture->getType().getNonReferenceType(), VK_LValue, Loc); if (!Res.isUsable()) return ExprError(); if (OK == OK_Ordinary && !getLangOpts().CPlusPlus) { - Res = CreateBuiltinUnaryOp(Loc, UO_Deref, Res.get()); + Res = SemaRef.CreateBuiltinUnaryOp(Loc, UO_Deref, Res.get()); if (!Res.isUsable()) return ExprError(); } if (VK != VK_LValue && Res.get()->isGLValue()) { - Res = DefaultLvalueConversion(Res.get()); + Res = SemaRef.DefaultLvalueConversion(Res.get()); if (!Res.isUsable()) return ExprError(); } return Res; } -OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPPrivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; SmallVector PrivateCopies; bool IsImplicitClause = @@ -18349,7 +18497,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -18365,7 +18513,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, // OpenMP [2.9.3.3, Restrictions, C/C++, p.3] // A variable that appears in a private clause must not have an incomplete // type or a reference type. - if (RequireCompleteType(ELoc, Type, diag::err_omp_private_incomplete_type)) + if (SemaRef.RequireCompleteType(ELoc, Type, + diag::err_omp_private_incomplete_type)) continue; Type = Type.getNonReferenceType(); @@ -18377,7 +18526,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, // OpenMP 3.1 [2.9.3.3, private clause, Restrictions] // A variable that appears in a private clause must not have a // const-qualified type unless it is of class type with a mutable member. - if (rejectConstNotMutableType(*this, D, Type, OMPC_private, ELoc)) + if (rejectConstNotMutableType(SemaRef, D, Type, OMPC_private, ELoc)) continue; // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced @@ -18391,7 +18540,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_private); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -18402,7 +18551,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_private) << Type << getOpenMPDirectiveName(CurrDir); - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -18418,7 +18567,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, // A list item cannot appear in both a map clause and a data-sharing // attribute clause on the same construct unless the construct is a // combined construct. - if ((LangOpts.OpenMP <= 45 && isOpenMPTargetExecutionDirective(CurrDir)) || + if ((getLangOpts().OpenMP <= 45 && + isOpenMPTargetExecutionDirective(CurrDir)) || CurrDir == OMPD_target) { OpenMPClauseKind ConflictKind; if (DSAStack->checkMappableExprComponentListsForDecl( @@ -18432,7 +18582,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, << getOpenMPClauseName(OMPC_private) << getOpenMPClauseName(ConflictKind) << getOpenMPDirectiveName(CurrDir); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -18448,28 +18598,28 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, // proper diagnostics. Type = Type.getUnqualifiedType(); VarDecl *VDPrivate = - buildVarDecl(*this, ELoc, Type, D->getName(), + buildVarDecl(SemaRef, ELoc, Type, D->getName(), D->hasAttrs() ? &D->getAttrs() : nullptr, VD ? cast(SimpleRefExpr) : nullptr); - ActOnUninitializedDecl(VDPrivate); + SemaRef.ActOnUninitializedDecl(VDPrivate); if (VDPrivate->isInvalidDecl()) continue; DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr( - *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc); + SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc); DeclRefExpr *Ref = nullptr; - if (!VD && !CurContext->isDependentContext()) { + if (!VD && !SemaRef.CurContext->isDependentContext()) { auto *FD = dyn_cast(D); VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr; if (VD) - Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(), + Ref = buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(), RefExpr->getExprLoc()); else - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false); + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false); } if (!IsImplicitClause) DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref); - Vars.push_back((VD || CurContext->isDependentContext()) + Vars.push_back((VD || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); PrivateCopies.push_back(VDPrivateRefExpr); @@ -18478,14 +18628,14 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, - PrivateCopies); + return OMPPrivateClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, + Vars, PrivateCopies); } -OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPFirstprivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; SmallVector PrivateCopies; SmallVector Inits; @@ -18499,7 +18649,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -18517,8 +18667,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, // OpenMP [2.9.3.3, Restrictions, C/C++, p.3] // A variable that appears in a private clause must not have an incomplete // type or a reference type. - if (RequireCompleteType(ELoc, Type, - diag::err_omp_firstprivate_incomplete_type)) + if (SemaRef.RequireCompleteType(ELoc, Type, + diag::err_omp_firstprivate_incomplete_type)) continue; Type = Type.getNonReferenceType(); @@ -18526,7 +18676,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, // A variable of class type (or array thereof) that appears in a private // clause requires an accessible, unambiguous copy constructor for the // class type. - QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType(); + QualType ElemType = + getASTContext().getBaseElementType(Type).getNonReferenceType(); // If an implicit firstprivate variable found it was checked already. DSAStackTy::DSAVarData TopDVar; @@ -18535,7 +18686,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, DSAStack->getTopDSA(D, /*FromParent=*/false); TopDVar = DVar; OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective(); - bool IsConstant = ElemType.isConstant(Context); + bool IsConstant = ElemType.isConstant(getASTContext()); // OpenMP [2.4.13, Data-sharing Attribute Clauses] // A list item that specifies a given variable may not appear in more // than one clause on the same directive, except that a variable may be @@ -18550,7 +18701,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_firstprivate); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -18570,7 +18721,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_firstprivate); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -18601,7 +18752,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_firstprivate) << getOpenMPClauseName(OMPC_shared); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -18634,7 +18785,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, isOpenMPTeamsDirective(DVar.DKind))) { Diag(ELoc, diag::err_omp_parallel_reduction_in_task_firstprivate) << getOpenMPDirectiveName(DVar.DKind); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -18647,7 +18798,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, // A list item cannot appear in both a map clause and a data-sharing // attribute clause on the same construct unless the construct is a // combined construct. - if ((LangOpts.OpenMP <= 45 && + if ((getLangOpts().OpenMP <= 45 && isOpenMPTargetExecutionDirective(CurrDir)) || CurrDir == OMPD_target) { OpenMPClauseKind ConflictKind; @@ -18663,7 +18814,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, << getOpenMPClauseName(OMPC_firstprivate) << getOpenMPClauseName(ConflictKind) << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -18675,7 +18826,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_firstprivate) << Type << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -18685,7 +18836,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, Type = Type.getUnqualifiedType(); VarDecl *VDPrivate = - buildVarDecl(*this, ELoc, Type, D->getName(), + buildVarDecl(SemaRef, ELoc, Type, D->getName(), D->hasAttrs() ? &D->getAttrs() : nullptr, VD ? cast(SimpleRefExpr) : nullptr); // Generate helper private variable and initialize it with the value of the @@ -18698,32 +18849,32 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, // original array element in CodeGen. if (Type->isArrayType()) { VarDecl *VDInit = - buildVarDecl(*this, RefExpr->getExprLoc(), ElemType, D->getName()); - VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc); - Expr *Init = DefaultLvalueConversion(VDInitRefExpr).get(); + buildVarDecl(SemaRef, RefExpr->getExprLoc(), ElemType, D->getName()); + VDInitRefExpr = buildDeclRefExpr(SemaRef, VDInit, ElemType, ELoc); + Expr *Init = SemaRef.DefaultLvalueConversion(VDInitRefExpr).get(); ElemType = ElemType.getUnqualifiedType(); - VarDecl *VDInitTemp = buildVarDecl(*this, RefExpr->getExprLoc(), ElemType, - ".firstprivate.temp"); + VarDecl *VDInitTemp = buildVarDecl(SemaRef, RefExpr->getExprLoc(), + ElemType, ".firstprivate.temp"); InitializedEntity Entity = InitializedEntity::InitializeVariable(VDInitTemp); InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc); - InitializationSequence InitSeq(*this, Entity, Kind, Init); - ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Init); + InitializationSequence InitSeq(SemaRef, Entity, Kind, Init); + ExprResult Result = InitSeq.Perform(SemaRef, Entity, Kind, Init); if (Result.isInvalid()) VDPrivate->setInvalidDecl(); else VDPrivate->setInit(Result.getAs()); // Remove temp variable declaration. - Context.Deallocate(VDInitTemp); + getASTContext().Deallocate(VDInitTemp); } else { - VarDecl *VDInit = buildVarDecl(*this, RefExpr->getExprLoc(), Type, + VarDecl *VDInit = buildVarDecl(SemaRef, RefExpr->getExprLoc(), Type, ".firstprivate.temp"); - VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(), + VDInitRefExpr = buildDeclRefExpr(SemaRef, VDInit, RefExpr->getType(), RefExpr->getExprLoc()); - AddInitializerToDecl(VDPrivate, - DefaultLvalueConversion(VDInitRefExpr).get(), - /*DirectInit=*/false); + SemaRef.AddInitializerToDecl( + VDPrivate, SemaRef.DefaultLvalueConversion(VDInitRefExpr).get(), + /*DirectInit=*/false); } if (VDPrivate->isInvalidDecl()) { if (IsImplicitClause) { @@ -18732,29 +18883,30 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, } continue; } - CurContext->addDecl(VDPrivate); + SemaRef.CurContext->addDecl(VDPrivate); DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr( - *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), + SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(), RefExpr->getExprLoc()); DeclRefExpr *Ref = nullptr; - if (!VD && !CurContext->isDependentContext()) { + if (!VD && !SemaRef.CurContext->isDependentContext()) { if (TopDVar.CKind == OMPC_lastprivate) { Ref = TopDVar.PrivateCopy; } else { auto *FD = dyn_cast(D); VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr; if (VD) - Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(), - RefExpr->getExprLoc()); + Ref = + buildDeclRefExpr(SemaRef, VD, VD->getType().getNonReferenceType(), + RefExpr->getExprLoc()); else - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); if (VD || !isOpenMPCapturedDecl(D)) ExprCaptures.push_back(Ref->getDecl()); } } if (!IsImplicitClause) DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref); - Vars.push_back((VD || CurContext->isDependentContext()) + Vars.push_back((VD || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); PrivateCopies.push_back(VDPrivateRefExpr); @@ -18764,12 +18916,12 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPFirstprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, - Vars, PrivateCopies, Inits, - buildPreInits(Context, ExprCaptures)); + return OMPFirstprivateClause::Create( + getASTContext(), StartLoc, LParenLoc, EndLoc, Vars, PrivateCopies, Inits, + buildPreInits(getASTContext(), ExprCaptures)); } -OMPClause *Sema::ActOnOpenMPLastprivateClause( +OMPClause *SemaOpenMP::ActOnOpenMPLastprivateClause( ArrayRef VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { @@ -18793,7 +18945,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -18811,8 +18963,8 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( // OpenMP [2.14.3.5, Restrictions, C/C++, p.2] // A variable that appears in a lastprivate clause must not have an // incomplete type or a reference type. - if (RequireCompleteType(ELoc, Type, - diag::err_omp_lastprivate_incomplete_type)) + if (SemaRef.RequireCompleteType(ELoc, Type, + diag::err_omp_lastprivate_incomplete_type)) continue; Type = Type.getNonReferenceType(); @@ -18824,7 +18976,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( // OpenMP 3.1 [2.9.3.5, lastprivate clause, Restrictions] // A variable that appears in a lastprivate clause must not have a // const-qualified type unless it is of class type with a mutable member. - if (rejectConstNotMutableType(*this, D, Type, OMPC_lastprivate, ELoc)) + if (rejectConstNotMutableType(SemaRef, D, Type, OMPC_lastprivate, ELoc)) continue; // OpenMP 5.0 [2.19.4.5 lastprivate Clause, Restrictions] @@ -18832,7 +18984,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( // modifier must be a scalar variable. if (LPKind == OMPC_LASTPRIVATE_conditional && !Type->isScalarType()) { Diag(ELoc, diag::err_omp_lastprivate_conditional_non_scalar); - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -18857,7 +19009,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_lastprivate); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -18876,7 +19028,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_lastprivate) << getOpenMPClauseName(OMPC_shared); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -18889,53 +19041,53 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( // A variable of class type (or array thereof) that appears in a // lastprivate clause requires an accessible, unambiguous copy assignment // operator for the class type. - Type = Context.getBaseElementType(Type).getNonReferenceType(); - VarDecl *SrcVD = buildVarDecl(*this, ERange.getBegin(), + Type = getASTContext().getBaseElementType(Type).getNonReferenceType(); + VarDecl *SrcVD = buildVarDecl(SemaRef, ERange.getBegin(), Type.getUnqualifiedType(), ".lastprivate.src", D->hasAttrs() ? &D->getAttrs() : nullptr); DeclRefExpr *PseudoSrcExpr = - buildDeclRefExpr(*this, SrcVD, Type.getUnqualifiedType(), ELoc); + buildDeclRefExpr(SemaRef, SrcVD, Type.getUnqualifiedType(), ELoc); VarDecl *DstVD = - buildVarDecl(*this, ERange.getBegin(), Type, ".lastprivate.dst", + buildVarDecl(SemaRef, ERange.getBegin(), Type, ".lastprivate.dst", D->hasAttrs() ? &D->getAttrs() : nullptr); - DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc); + DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(SemaRef, DstVD, Type, ELoc); // For arrays generate assignment operation for single element and replace // it by the original array element in CodeGen. - ExprResult AssignmentOp = BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign, - PseudoDstExpr, PseudoSrcExpr); + ExprResult AssignmentOp = SemaRef.BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign, + PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; - AssignmentOp = - ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false); + AssignmentOp = SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), ELoc, + /*DiscardedValue*/ false); if (AssignmentOp.isInvalid()) continue; DeclRefExpr *Ref = nullptr; - if (!VD && !CurContext->isDependentContext()) { + if (!VD && !SemaRef.CurContext->isDependentContext()) { if (TopDVar.CKind == OMPC_firstprivate) { Ref = TopDVar.PrivateCopy; } else { - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false); + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false); if (!isOpenMPCapturedDecl(D)) ExprCaptures.push_back(Ref->getDecl()); } if ((TopDVar.CKind == OMPC_firstprivate && !TopDVar.PrivateCopy) || (!isOpenMPCapturedDecl(D) && Ref->getDecl()->hasAttr())) { - ExprResult RefRes = DefaultLvalueConversion(Ref); + ExprResult RefRes = SemaRef.DefaultLvalueConversion(Ref); if (!RefRes.isUsable()) continue; ExprResult PostUpdateRes = - BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign, SimpleRefExpr, - RefRes.get()); + SemaRef.BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign, + SimpleRefExpr, RefRes.get()); if (!PostUpdateRes.isUsable()) continue; ExprPostUpdates.push_back( - IgnoredValueConversions(PostUpdateRes.get()).get()); + SemaRef.IgnoredValueConversions(PostUpdateRes.get()).get()); } } DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_lastprivate, Ref); - Vars.push_back((VD || CurContext->isDependentContext()) + Vars.push_back((VD || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); SrcExprs.push_back(PseudoSrcExpr); @@ -18946,24 +19098,24 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause( if (Vars.empty()) return nullptr; - return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, - Vars, SrcExprs, DstExprs, AssignmentOps, - LPKind, LPKindLoc, ColonLoc, - buildPreInits(Context, ExprCaptures), - buildPostUpdate(*this, ExprPostUpdates)); + return OMPLastprivateClause::Create( + getASTContext(), StartLoc, LParenLoc, EndLoc, Vars, SrcExprs, DstExprs, + AssignmentOps, LPKind, LPKindLoc, ColonLoc, + buildPreInits(getASTContext(), ExprCaptures), + buildPostUpdate(SemaRef, ExprPostUpdates)); } -OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPSharedClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP lastprivate clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -18985,15 +19137,16 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef VarList, DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_shared); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } DeclRefExpr *Ref = nullptr; - if (!VD && isOpenMPCapturedDecl(D) && !CurContext->isDependentContext()) - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); + if (!VD && isOpenMPCapturedDecl(D) && + !SemaRef.CurContext->isDependentContext()) + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_shared, Ref); - Vars.push_back((VD || !Ref || CurContext->isDependentContext()) + Vars.push_back((VD || !Ref || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); } @@ -19001,7 +19154,8 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPSharedClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); + return OMPSharedClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, + Vars); } namespace { @@ -20122,7 +20276,7 @@ static bool actOnOMPReductionKindClause( } else { VarsExpr = Ref = buildCapture(S, D, SimpleRefExpr, /*WithInit=*/false); } - if (!S.isOpenMPCapturedDecl(D)) { + if (!S.OpenMP().isOpenMPCapturedDecl(D)) { RD.ExprCaptures.emplace_back(Ref->getDecl()); if (Ref->getDecl()->hasAttr()) { ExprResult RefRes = S.DefaultLvalueConversion(Ref); @@ -20172,7 +20326,7 @@ static bool actOnOMPReductionKindClause( return RD.Vars.empty(); } -OMPClause *Sema::ActOnOpenMPReductionClause( +OMPClause *SemaOpenMP::ActOnOpenMPReductionClause( ArrayRef VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, @@ -20201,77 +20355,80 @@ OMPClause *Sema::ActOnOpenMPReductionClause( } ReductionData RD(VarList.size(), Modifier); - if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList, + if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_reduction, VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec, ReductionId, UnresolvedReductions, RD)) return nullptr; return OMPReductionClause::Create( - Context, StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc, Modifier, - RD.Vars, ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, + getASTContext(), StartLoc, LParenLoc, ModifierLoc, ColonLoc, EndLoc, + Modifier, RD.Vars, + ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId, RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.InscanCopyOps, RD.InscanCopyArrayTemps, RD.InscanCopyArrayElems, - buildPreInits(Context, RD.ExprCaptures), - buildPostUpdate(*this, RD.ExprPostUpdates)); + buildPreInits(getASTContext(), RD.ExprCaptures), + buildPostUpdate(SemaRef, RD.ExprPostUpdates)); } -OMPClause *Sema::ActOnOpenMPTaskReductionClause( +OMPClause *SemaOpenMP::ActOnOpenMPTaskReductionClause( ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef UnresolvedReductions) { ReductionData RD(VarList.size()); - if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_task_reduction, VarList, - StartLoc, LParenLoc, ColonLoc, EndLoc, - ReductionIdScopeSpec, ReductionId, + if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_task_reduction, + VarList, StartLoc, LParenLoc, ColonLoc, + EndLoc, ReductionIdScopeSpec, ReductionId, UnresolvedReductions, RD)) return nullptr; return OMPTaskReductionClause::Create( - Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars, - ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, + getASTContext(), StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars, + ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId, RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, - buildPreInits(Context, RD.ExprCaptures), - buildPostUpdate(*this, RD.ExprPostUpdates)); + buildPreInits(getASTContext(), RD.ExprCaptures), + buildPostUpdate(SemaRef, RD.ExprPostUpdates)); } -OMPClause *Sema::ActOnOpenMPInReductionClause( +OMPClause *SemaOpenMP::ActOnOpenMPInReductionClause( ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef UnresolvedReductions) { ReductionData RD(VarList.size()); - if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_in_reduction, VarList, + if (actOnOMPReductionKindClause(SemaRef, DSAStack, OMPC_in_reduction, VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec, ReductionId, UnresolvedReductions, RD)) return nullptr; return OMPInReductionClause::Create( - Context, StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars, - ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, + getASTContext(), StartLoc, LParenLoc, ColonLoc, EndLoc, RD.Vars, + ReductionIdScopeSpec.getWithLocInContext(getASTContext()), ReductionId, RD.Privates, RD.LHSs, RD.RHSs, RD.ReductionOps, RD.TaskgroupDescriptors, - buildPreInits(Context, RD.ExprCaptures), - buildPostUpdate(*this, RD.ExprPostUpdates)); + buildPreInits(getASTContext(), RD.ExprCaptures), + buildPostUpdate(SemaRef, RD.ExprPostUpdates)); } -bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, - SourceLocation LinLoc) { - if ((!LangOpts.CPlusPlus && LinKind != OMPC_LINEAR_val) || +bool SemaOpenMP::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, + SourceLocation LinLoc) { + if ((!getLangOpts().CPlusPlus && LinKind != OMPC_LINEAR_val) || LinKind == OMPC_LINEAR_unknown || LinKind == OMPC_LINEAR_step) { - Diag(LinLoc, diag::err_omp_wrong_linear_modifier) << LangOpts.CPlusPlus; + Diag(LinLoc, diag::err_omp_wrong_linear_modifier) + << getLangOpts().CPlusPlus; return true; } return false; } -bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, - OpenMPLinearClauseKind LinKind, QualType Type, - bool IsDeclareSimd) { +bool SemaOpenMP::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, + OpenMPLinearClauseKind LinKind, + QualType Type, bool IsDeclareSimd) { const auto *VD = dyn_cast_or_null(D); // A variable must not have an incomplete type or a reference type. - if (RequireCompleteType(ELoc, Type, diag::err_omp_linear_incomplete_type)) + if (SemaRef.RequireCompleteType(ELoc, Type, + diag::err_omp_linear_incomplete_type)) return true; if ((LinKind == OMPC_LINEAR_uval || LinKind == OMPC_LINEAR_ref) && !Type->isReferenceType()) { @@ -20287,17 +20444,17 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, // not apply to the firstprivate clause, nor to the linear clause on // declarative directives (like declare simd). if (!IsDeclareSimd && - rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc)) + rejectConstNotMutableType(SemaRef, D, Type, OMPC_linear, ELoc)) return true; // A list item must be of integral or pointer type. Type = Type.getUnqualifiedType().getCanonicalType(); const auto *Ty = Type.getTypePtrOrNull(); if (!Ty || (LinKind != OMPC_LINEAR_ref && !Ty->isDependentType() && - !Ty->isIntegralType(Context) && !Ty->isPointerType())) { + !Ty->isIntegralType(getASTContext()) && !Ty->isPointerType())) { Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << Type; if (D) { - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -20308,7 +20465,7 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, return false; } -OMPClause *Sema::ActOnOpenMPLinearClause( +OMPClause *SemaOpenMP::ActOnOpenMPLinearClause( ArrayRef VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, @@ -20331,7 +20488,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause( SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -20353,7 +20510,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause( if (DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_linear); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -20363,29 +20520,29 @@ OMPClause *Sema::ActOnOpenMPLinearClause( // Build private copy of original var. VarDecl *Private = - buildVarDecl(*this, ELoc, Type, D->getName(), + buildVarDecl(SemaRef, ELoc, Type, D->getName(), D->hasAttrs() ? &D->getAttrs() : nullptr, VD ? cast(SimpleRefExpr) : nullptr); - DeclRefExpr *PrivateRef = buildDeclRefExpr(*this, Private, Type, ELoc); + DeclRefExpr *PrivateRef = buildDeclRefExpr(SemaRef, Private, Type, ELoc); // Build var to save initial value. - VarDecl *Init = buildVarDecl(*this, ELoc, Type, ".linear.start"); + VarDecl *Init = buildVarDecl(SemaRef, ELoc, Type, ".linear.start"); Expr *InitExpr; DeclRefExpr *Ref = nullptr; - if (!VD && !CurContext->isDependentContext()) { - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false); + if (!VD && !SemaRef.CurContext->isDependentContext()) { + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false); if (!isOpenMPCapturedDecl(D)) { ExprCaptures.push_back(Ref->getDecl()); if (Ref->getDecl()->hasAttr()) { - ExprResult RefRes = DefaultLvalueConversion(Ref); + ExprResult RefRes = SemaRef.DefaultLvalueConversion(Ref); if (!RefRes.isUsable()) continue; ExprResult PostUpdateRes = - BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign, - SimpleRefExpr, RefRes.get()); + SemaRef.BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign, + SimpleRefExpr, RefRes.get()); if (!PostUpdateRes.isUsable()) continue; ExprPostUpdates.push_back( - IgnoredValueConversions(PostUpdateRes.get()).get()); + SemaRef.IgnoredValueConversions(PostUpdateRes.get()).get()); } } } @@ -20393,12 +20550,13 @@ OMPClause *Sema::ActOnOpenMPLinearClause( InitExpr = VD ? VD->getInit() : SimpleRefExpr; else InitExpr = VD ? SimpleRefExpr : Ref; - AddInitializerToDecl(Init, DefaultLvalueConversion(InitExpr).get(), - /*DirectInit=*/false); - DeclRefExpr *InitRef = buildDeclRefExpr(*this, Init, Type, ELoc); + SemaRef.AddInitializerToDecl( + Init, SemaRef.DefaultLvalueConversion(InitExpr).get(), + /*DirectInit=*/false); + DeclRefExpr *InitRef = buildDeclRefExpr(SemaRef, Init, Type, ELoc); DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_linear, Ref); - Vars.push_back((VD || CurContext->isDependentContext()) + Vars.push_back((VD || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); Privates.push_back(PrivateRef); @@ -20421,17 +20579,18 @@ OMPClause *Sema::ActOnOpenMPLinearClause( // Build var to save the step value. VarDecl *SaveVar = - buildVarDecl(*this, StepLoc, StepExpr->getType(), ".linear.step"); + buildVarDecl(SemaRef, StepLoc, StepExpr->getType(), ".linear.step"); ExprResult SaveRef = - buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc); - ExprResult CalcStep = - BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr); - CalcStep = ActOnFinishFullExpr(CalcStep.get(), /*DiscardedValue*/ false); + buildDeclRefExpr(SemaRef, SaveVar, StepExpr->getType(), StepLoc); + ExprResult CalcStep = SemaRef.BuildBinOp( + SemaRef.getCurScope(), StepLoc, BO_Assign, SaveRef.get(), StepExpr); + CalcStep = + SemaRef.ActOnFinishFullExpr(CalcStep.get(), /*DiscardedValue*/ false); // Warn about zero linear step (it would be probably better specified as // making corresponding variables 'const'). if (std::optional Result = - StepExpr->getIntegerConstantExpr(Context)) { + StepExpr->getIntegerConstantExpr(getASTContext())) { if (!Result->isNegative() && !Result->isStrictlyPositive()) Diag(StepLoc, diag::warn_omp_linear_step_zero) << Vars[0] << (Vars.size() > 1); @@ -20442,11 +20601,11 @@ OMPClause *Sema::ActOnOpenMPLinearClause( } } - return OMPLinearClause::Create(Context, StartLoc, LParenLoc, LinKind, LinLoc, - ColonLoc, StepModifierLoc, EndLoc, Vars, - Privates, Inits, StepExpr, CalcStepExpr, - buildPreInits(Context, ExprCaptures), - buildPostUpdate(*this, ExprPostUpdates)); + return OMPLinearClause::Create(getASTContext(), StartLoc, LParenLoc, LinKind, + LinLoc, ColonLoc, StepModifierLoc, EndLoc, + Vars, Privates, Inits, StepExpr, CalcStepExpr, + buildPreInits(getASTContext(), ExprCaptures), + buildPostUpdate(SemaRef, ExprPostUpdates)); } static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, @@ -20552,7 +20711,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, return HasErrors; } -OMPClause *Sema::ActOnOpenMPAlignedClause( +OMPClause *SemaOpenMP::ActOnOpenMPAlignedClause( ArrayRef VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { SmallVector Vars; @@ -20561,7 +20720,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause( SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -20581,7 +20740,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause( if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) { Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr) << QType << getLangOpts().CPlusPlus << ERange; - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -20601,9 +20760,10 @@ OMPClause *Sema::ActOnOpenMPAlignedClause( DeclRefExpr *Ref = nullptr; if (!VD && isOpenMPCapturedDecl(D)) - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); - Vars.push_back(DefaultFunctionArrayConversion( - (VD || !Ref) ? RefExpr->IgnoreParens() : Ref) + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); + Vars.push_back(SemaRef + .DefaultFunctionArrayConversion( + (VD || !Ref) ? RefExpr->IgnoreParens() : Ref) .get()); } @@ -20622,14 +20782,14 @@ OMPClause *Sema::ActOnOpenMPAlignedClause( if (Vars.empty()) return nullptr; - return OMPAlignedClause::Create(Context, StartLoc, LParenLoc, ColonLoc, - EndLoc, Vars, Alignment); + return OMPAlignedClause::Create(getASTContext(), StartLoc, LParenLoc, + ColonLoc, EndLoc, Vars, Alignment); } -OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPCopyinClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; SmallVector SrcExprs; SmallVector DstExprs; @@ -20683,26 +20843,28 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef VarList, // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. - QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType(); + QualType ElemType = + getASTContext().getBaseElementType(Type).getNonReferenceType(); VarDecl *SrcVD = - buildVarDecl(*this, DE->getBeginLoc(), ElemType.getUnqualifiedType(), + buildVarDecl(SemaRef, DE->getBeginLoc(), ElemType.getUnqualifiedType(), ".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr); DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr( - *this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc()); + SemaRef, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc()); VarDecl *DstVD = - buildVarDecl(*this, DE->getBeginLoc(), ElemType, ".copyin.dst", + buildVarDecl(SemaRef, DE->getBeginLoc(), ElemType, ".copyin.dst", VD->hasAttrs() ? &VD->getAttrs() : nullptr); DeclRefExpr *PseudoDstExpr = - buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc()); + buildDeclRefExpr(SemaRef, DstVD, ElemType, DE->getExprLoc()); // For arrays generate assignment operation for single element and replace // it by the original array element in CodeGen. ExprResult AssignmentOp = - BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr, - PseudoSrcExpr); + SemaRef.BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, + PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; - AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), - /*DiscardedValue*/ false); + AssignmentOp = + SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), + /*DiscardedValue*/ false); if (AssignmentOp.isInvalid()) continue; @@ -20716,14 +20878,14 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, - SrcExprs, DstExprs, AssignmentOps); + return OMPCopyinClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, + Vars, SrcExprs, DstExprs, AssignmentOps); } -OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPCopyprivateClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; SmallVector SrcExprs; SmallVector DstExprs; @@ -20733,7 +20895,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -20759,7 +20921,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_copyprivate); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -20772,7 +20934,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_copyprivate) << "threadprivate or private in the enclosing context"; - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } } @@ -20783,7 +20945,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_copyprivate) << Type << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); - bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) == + bool IsDecl = !VD || VD->isThisDeclarationADefinition(getASTContext()) == VarDecl::DeclarationOnly; Diag(D->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) @@ -20795,22 +20957,23 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. - Type = Context.getBaseElementType(Type.getNonReferenceType()) + Type = getASTContext() + .getBaseElementType(Type.getNonReferenceType()) .getUnqualifiedType(); VarDecl *SrcVD = - buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.src", + buildVarDecl(SemaRef, RefExpr->getBeginLoc(), Type, ".copyprivate.src", D->hasAttrs() ? &D->getAttrs() : nullptr); - DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, ELoc); + DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(SemaRef, SrcVD, Type, ELoc); VarDecl *DstVD = - buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.dst", + buildVarDecl(SemaRef, RefExpr->getBeginLoc(), Type, ".copyprivate.dst", D->hasAttrs() ? &D->getAttrs() : nullptr); - DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc); - ExprResult AssignmentOp = BuildBinOp( + DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(SemaRef, DstVD, Type, ELoc); + ExprResult AssignmentOp = SemaRef.BuildBinOp( DSAStack->getCurScope(), ELoc, BO_Assign, PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; - AssignmentOp = - ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false); + AssignmentOp = SemaRef.ActOnFinishFullExpr(AssignmentOp.get(), ELoc, + /*DiscardedValue*/ false); if (AssignmentOp.isInvalid()) continue; @@ -20819,7 +20982,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, assert(VD || isOpenMPCapturedDecl(D)); Vars.push_back( VD ? RefExpr->IgnoreParens() - : buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false)); + : buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false)); SrcExprs.push_back(PseudoSrcExpr); DstExprs.push_back(PseudoDstExpr); AssignmentOps.push_back(AssignmentOp.get()); @@ -20828,18 +20991,20 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, - Vars, SrcExprs, DstExprs, AssignmentOps); + return OMPCopyprivateClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, Vars, SrcExprs, DstExprs, + AssignmentOps); } -OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPFlushClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (VarList.empty()) return nullptr; - return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList); + return OMPFlushClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, + VarList); } /// Tries to find omp_depend_t. type. @@ -20859,22 +21024,23 @@ static bool findOMPDependT(Sema &S, SourceLocation Loc, DSAStackTy *Stack, return true; } -OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPDepobjClause(Expr *Depobj, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (!Depobj) return nullptr; - bool OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack); + bool OMPDependTFound = findOMPDependT(SemaRef, StartLoc, DSAStack); // OpenMP 5.0, 2.17.10.1 depobj Construct // depobj is an lvalue expression of type omp_depend_t. if (!Depobj->isTypeDependent() && !Depobj->isValueDependent() && !Depobj->isInstantiationDependent() && !Depobj->containsUnexpandedParameterPack() && - (OMPDependTFound && - !Context.typesAreCompatible(DSAStack->getOMPDependT(), Depobj->getType(), - /*CompareUnqualified=*/true))) { + (OMPDependTFound && !getASTContext().typesAreCompatible( + DSAStack->getOMPDependT(), Depobj->getType(), + /*CompareUnqualified=*/true))) { Diag(Depobj->getExprLoc(), diag::err_omp_expected_omp_depend_t_lvalue) << 0 << Depobj->getType() << Depobj->getSourceRange(); } @@ -20884,7 +21050,8 @@ OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, << 1 << Depobj->getSourceRange(); } - return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj); + return OMPDepobjClause::Create(getASTContext(), StartLoc, LParenLoc, EndLoc, + Depobj); } namespace { @@ -20984,8 +21151,9 @@ ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource, continue; } if (RHS) { - ExprResult RHSRes = SemaRef.VerifyPositiveIntegerConstantInClause( - RHS, OMPC_depend, /*StrictlyPositive=*/false); + ExprResult RHSRes = + SemaRef.OpenMP().VerifyPositiveIntegerConstantInClause( + RHS, OMPC_depend, /*StrictlyPositive=*/false); if (RHSRes.isInvalid()) continue; } @@ -21016,11 +21184,10 @@ ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource, return {Vars, OpsOffs, TotalDepCount}; } -OMPClause * -Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, - Expr *DepModifier, ArrayRef VarList, - SourceLocation StartLoc, SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPDependClause( + const OMPDependClause::DependDataTy &Data, Expr *DepModifier, + ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation EndLoc) { OpenMPDependClauseKind DepKind = Data.DepKind; SourceLocation DepLoc = Data.DepLoc; if (DSAStack->getCurrentDirective() == OMPD_ordered && @@ -21038,17 +21205,18 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, DSAStack->getCurrentDirective() == OMPD_depobj) && (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink || - ((LangOpts.OpenMP < 50 || + ((getLangOpts().OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj) && DepKind == OMPC_DEPEND_depobj))) { SmallVector Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink, OMPC_DEPEND_outallmemory, OMPC_DEPEND_inoutallmemory}; - if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj) + if (getLangOpts().OpenMP < 50 || + DSAStack->getCurrentDirective() == OMPD_depobj) Except.push_back(OMPC_DEPEND_depobj); - if (LangOpts.OpenMP < 51) + if (getLangOpts().OpenMP < 51) Except.push_back(OMPC_DEPEND_inoutset); - std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier) + std::string Expected = (getLangOpts().OpenMP >= 50 && !DepModifier) ? "depend modifier(iterator) or " : ""; Diag(DepLoc, diag::err_omp_unexpected_clause_value) @@ -21074,7 +21242,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) { DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon( - *this, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc); + SemaRef, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc); Vars = VarOffset.Vars; OpsOffs = VarOffset.OpsOffs; TotalDepCount = VarOffset.TotalDepCount; @@ -21090,9 +21258,9 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, SourceLocation ELoc = RefExpr->getExprLoc(); Expr *SimpleExpr = RefExpr->IgnoreParenCasts(); if (DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) { - bool OMPDependTFound = LangOpts.OpenMP >= 50; + bool OMPDependTFound = getLangOpts().OpenMP >= 50; if (OMPDependTFound) - OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack, + OMPDependTFound = findOMPDependT(SemaRef, StartLoc, DSAStack, DepKind == OMPC_DEPEND_depobj); if (DepKind == OMPC_DEPEND_depobj) { // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++ @@ -21102,8 +21270,8 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, !RefExpr->isInstantiationDependent() && !RefExpr->containsUnexpandedParameterPack() && (OMPDependTFound && - !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(), - RefExpr->getType()))) { + !getASTContext().hasSameUnqualifiedType( + DSAStack->getOMPDependT(), RefExpr->getType()))) { Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue) << 0 << RefExpr->getType() << RefExpr->getSourceRange(); continue; @@ -21134,7 +21302,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, const Expr *Length = OASE->getLength(); Expr::EvalResult Result; if (Length && !Length->isValueDependent() && - Length->EvaluateAsInt(Result, Context) && + Length->EvaluateAsInt(Result, getASTContext()) && Result.Val.getInt().isZero()) { Diag(ELoc, diag::err_omp_depend_zero_length_array_section_not_allowed) @@ -21154,8 +21322,9 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, (OMPDependTFound && DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr()))) { Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item) - << (LangOpts.OpenMP >= 50 ? 1 : 0) - << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange(); + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << RefExpr->getSourceRange(); continue; } @@ -21167,22 +21336,24 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, ->isPointerType() && !ASE->getBase()->getType().getNonReferenceType()->isArrayType()) { Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item) - << (LangOpts.OpenMP >= 50 ? 1 : 0) - << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange(); + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << RefExpr->getSourceRange(); continue; } ExprResult Res; { - Sema::TentativeAnalysisScope Trap(*this); - Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf, - RefExpr->IgnoreParenImpCasts()); + Sema::TentativeAnalysisScope Trap(SemaRef); + Res = SemaRef.CreateBuiltinUnaryOp(ELoc, UO_AddrOf, + RefExpr->IgnoreParenImpCasts()); } if (!Res.isUsable() && !isa(SimpleExpr) && !isa(SimpleExpr)) { Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item) - << (LangOpts.OpenMP >= 50 ? 1 : 0) - << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange(); + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << (getLangOpts().OpenMP >= 50 ? 1 : 0) + << RefExpr->getSourceRange(); continue; } } @@ -21197,7 +21368,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, return nullptr; auto *C = OMPDependClause::Create( - Context, StartLoc, LParenLoc, EndLoc, + getASTContext(), StartLoc, LParenLoc, EndLoc, {DepKind, DepLoc, Data.ColonLoc, Data.OmpAllMemoryLoc}, DepModifier, Vars, TotalDepCount.getZExtValue()); if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) && @@ -21206,12 +21377,11 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data, return C; } -OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, - Expr *Device, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ModifierLoc, - SourceLocation EndLoc) { - assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 50) && +OMPClause *SemaOpenMP::ActOnOpenMPDeviceClause( + OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation ModifierLoc, + SourceLocation EndLoc) { + assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 50) && "Unexpected device modifier in OpenMP < 50."); bool ErrorFound = false; @@ -21228,7 +21398,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, // OpenMP [2.9.1, Restrictions] // The device expression must evaluate to a non-negative integer value. - ErrorFound = !isNonNegativeIntegerValue(ValExpr, *this, OMPC_device, + ErrorFound = !isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_device, /*StrictlyPositive=*/false) || ErrorFound; if (ErrorFound) @@ -21239,7 +21409,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, // the reverse_offload clause must be specified. if (Modifier == OMPC_DEVICE_ancestor) { if (!DSAStack->hasRequiresDeclWithClause()) { - targetDiag( + SemaRef.targetDiag( StartLoc, diag::err_omp_device_ancestor_without_requires_reverse_offload); ErrorFound = true; @@ -21248,15 +21418,16 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); OpenMPDirectiveKind CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_device, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + getOpenMPCaptureRegionForClause(DKind, OMPC_device, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) + return new (getASTContext()) OMPDeviceClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, ModifierLoc, EndLoc); } @@ -22449,7 +22620,7 @@ static void checkMappableExpressionList( } } -OMPClause *Sema::ActOnOpenMPMapClause( +OMPClause *SemaOpenMP::ActOnOpenMPMapClause( Expr *IteratorModifier, ArrayRef MapTypeModifiers, ArrayRef MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, @@ -22484,7 +22655,7 @@ OMPClause *Sema::ActOnOpenMPMapClause( } MappableVarListInfo MVLI(VarList); - checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, Locs.StartLoc, + checkMappableExpressionList(SemaRef, DSAStack, OMPC_map, MVLI, Locs.StartLoc, MapperIdScopeSpec, MapperId, UnresolvedMappers, MapType, Modifiers, IsMapTypeImplicit, NoDiagnose); @@ -22492,17 +22663,17 @@ OMPClause *Sema::ActOnOpenMPMapClause( // We need to produce a map clause even if we don't have variables so that // other diagnostics related with non-existing map clauses are accurate. return OMPMapClause::Create( - Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, MVLI.VarComponents, MVLI.UDMapperList, IteratorModifier, Modifiers, - ModifiersLoc, MapperIdScopeSpec.getWithLocInContext(Context), MapperId, - MapType, IsMapTypeImplicit, MapLoc); + ModifiersLoc, MapperIdScopeSpec.getWithLocInContext(getASTContext()), + MapperId, MapType, IsMapTypeImplicit, MapLoc); } -QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, - TypeResult ParsedType) { +QualType SemaOpenMP::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, + TypeResult ParsedType) { assert(ParsedType.isUsable()); - QualType ReductionType = GetTypeFromParser(ParsedType.get()); + QualType ReductionType = SemaRef.GetTypeFromParser(ParsedType.get()); if (ReductionType.isNull()) return QualType(); @@ -22530,15 +22701,17 @@ QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, return ReductionType; } -Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart( +SemaOpenMP::DeclGroupPtrTy +SemaOpenMP::ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope) { SmallVector Decls; Decls.reserve(ReductionTypes.size()); - LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPReductionName, - forRedeclarationInCurContext()); + LookupResult Lookup(SemaRef, Name, SourceLocation(), + Sema::LookupOMPReductionName, + SemaRef.forRedeclarationInCurContext()); // [OpenMP 4.0], 2.15 declare reduction Directive, Restrictions // A reduction-identifier may not be re-declared in the current scope for the // same type or for a type that is compatible according to the base language @@ -22549,12 +22722,12 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart( if (S != nullptr) { // Find previous declaration with the same name not referenced in other // declarations. - FunctionScopeInfo *ParentFn = getEnclosingFunction(); + FunctionScopeInfo *ParentFn = SemaRef.getEnclosingFunction(); InCompoundScope = (ParentFn != nullptr) && !ParentFn->CompoundScopes.empty(); - LookupName(Lookup, S); - FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false, - /*AllowInlineNamespace=*/false); + SemaRef.LookupName(Lookup, S); + SemaRef.FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false, + /*AllowInlineNamespace=*/false); llvm::DenseMap UsedAsPrevious; LookupResult::Filter Filter = Lookup.makeFilter(); while (Filter.hasNext()) { @@ -22597,8 +22770,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart( Invalid = true; } PreviousRedeclTypes[TyData.first.getCanonicalType()] = TyData.second; - auto *DRD = OMPDeclareReductionDecl::Create(Context, DC, TyData.second, - Name, TyData.first, PrevDRD); + auto *DRD = OMPDeclareReductionDecl::Create( + getASTContext(), DC, TyData.second, Name, TyData.first, PrevDRD); DC->addDecl(DRD); DRD->setAccess(AS); Decls.push_back(DRD); @@ -22609,24 +22782,24 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart( } return DeclGroupPtrTy::make( - DeclGroupRef::Create(Context, Decls.begin(), Decls.size())); + DeclGroupRef::Create(getASTContext(), Decls.begin(), Decls.size())); } -void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) { +void SemaOpenMP::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) { auto *DRD = cast(D); // Enter new function scope. - PushFunctionScope(); - setFunctionHasBranchProtectedScope(); - getCurFunction()->setHasOMPDeclareReductionCombiner(); + SemaRef.PushFunctionScope(); + SemaRef.setFunctionHasBranchProtectedScope(); + SemaRef.getCurFunction()->setHasOMPDeclareReductionCombiner(); if (S != nullptr) - PushDeclContext(S, DRD); + SemaRef.PushDeclContext(S, DRD); else - CurContext = DRD; + SemaRef.CurContext = DRD; - PushExpressionEvaluationContext( - ExpressionEvaluationContext::PotentiallyEvaluated); + SemaRef.PushExpressionEvaluationContext( + Sema::ExpressionEvaluationContext::PotentiallyEvaluated); QualType ReductionType = DRD->getType(); // Create 'T* omp_parm;T omp_in;'. All references to 'omp_in' will @@ -22636,7 +22809,7 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) { // pointers. // Create 'T omp_in;' variable. VarDecl *OmpInParm = - buildVarDecl(*this, D->getLocation(), ReductionType, "omp_in"); + buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_in"); // Create 'T* omp_parm;T omp_out;'. All references to 'omp_out' will // be replaced by '*omp_parm' during codegen. This required because 'omp_out' // uses semantics of argument handles by value, but it should be passed by @@ -22644,28 +22817,29 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) { // pointers. // Create 'T omp_out;' variable. VarDecl *OmpOutParm = - buildVarDecl(*this, D->getLocation(), ReductionType, "omp_out"); + buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_out"); if (S != nullptr) { - PushOnScopeChains(OmpInParm, S); - PushOnScopeChains(OmpOutParm, S); + SemaRef.PushOnScopeChains(OmpInParm, S); + SemaRef.PushOnScopeChains(OmpOutParm, S); } else { DRD->addDecl(OmpInParm); DRD->addDecl(OmpOutParm); } Expr *InE = - ::buildDeclRefExpr(*this, OmpInParm, ReductionType, D->getLocation()); + ::buildDeclRefExpr(SemaRef, OmpInParm, ReductionType, D->getLocation()); Expr *OutE = - ::buildDeclRefExpr(*this, OmpOutParm, ReductionType, D->getLocation()); + ::buildDeclRefExpr(SemaRef, OmpOutParm, ReductionType, D->getLocation()); DRD->setCombinerData(InE, OutE); } -void Sema::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner) { +void SemaOpenMP::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, + Expr *Combiner) { auto *DRD = cast(D); - DiscardCleanupsInEvaluationContext(); - PopExpressionEvaluationContext(); + SemaRef.DiscardCleanupsInEvaluationContext(); + SemaRef.PopExpressionEvaluationContext(); - PopDeclContext(); - PopFunctionScopeInfo(); + SemaRef.PopDeclContext(); + SemaRef.PopFunctionScopeInfo(); if (Combiner != nullptr) DRD->setCombiner(Combiner); @@ -22673,20 +22847,21 @@ void Sema::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner) { DRD->setInvalidDecl(); } -VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) { +VarDecl *SemaOpenMP::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, + Decl *D) { auto *DRD = cast(D); // Enter new function scope. - PushFunctionScope(); - setFunctionHasBranchProtectedScope(); + SemaRef.PushFunctionScope(); + SemaRef.setFunctionHasBranchProtectedScope(); if (S != nullptr) - PushDeclContext(S, DRD); + SemaRef.PushDeclContext(S, DRD); else - CurContext = DRD; + SemaRef.CurContext = DRD; - PushExpressionEvaluationContext( - ExpressionEvaluationContext::PotentiallyEvaluated); + SemaRef.PushExpressionEvaluationContext( + Sema::ExpressionEvaluationContext::PotentiallyEvaluated); QualType ReductionType = DRD->getType(); // Create 'T* omp_parm;T omp_priv;'. All references to 'omp_priv' will @@ -22696,7 +22871,7 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) { // pointers. // Create 'T omp_priv;' variable. VarDecl *OmpPrivParm = - buildVarDecl(*this, D->getLocation(), ReductionType, "omp_priv"); + buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_priv"); // Create 'T* omp_parm;T omp_orig;'. All references to 'omp_orig' will // be replaced by '*omp_parm' during codegen. This required because 'omp_orig' // uses semantics of argument handles by value, but it should be passed by @@ -22704,30 +22879,30 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) { // pointers. // Create 'T omp_orig;' variable. VarDecl *OmpOrigParm = - buildVarDecl(*this, D->getLocation(), ReductionType, "omp_orig"); + buildVarDecl(SemaRef, D->getLocation(), ReductionType, "omp_orig"); if (S != nullptr) { - PushOnScopeChains(OmpPrivParm, S); - PushOnScopeChains(OmpOrigParm, S); + SemaRef.PushOnScopeChains(OmpPrivParm, S); + SemaRef.PushOnScopeChains(OmpOrigParm, S); } else { DRD->addDecl(OmpPrivParm); DRD->addDecl(OmpOrigParm); } Expr *OrigE = - ::buildDeclRefExpr(*this, OmpOrigParm, ReductionType, D->getLocation()); + ::buildDeclRefExpr(SemaRef, OmpOrigParm, ReductionType, D->getLocation()); Expr *PrivE = - ::buildDeclRefExpr(*this, OmpPrivParm, ReductionType, D->getLocation()); + ::buildDeclRefExpr(SemaRef, OmpPrivParm, ReductionType, D->getLocation()); DRD->setInitializerData(OrigE, PrivE); return OmpPrivParm; } -void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, - VarDecl *OmpPrivParm) { +void SemaOpenMP::ActOnOpenMPDeclareReductionInitializerEnd( + Decl *D, Expr *Initializer, VarDecl *OmpPrivParm) { auto *DRD = cast(D); - DiscardCleanupsInEvaluationContext(); - PopExpressionEvaluationContext(); + SemaRef.DiscardCleanupsInEvaluationContext(); + SemaRef.PopExpressionEvaluationContext(); - PopDeclContext(); - PopFunctionScopeInfo(); + SemaRef.PopDeclContext(); + SemaRef.PopFunctionScopeInfo(); if (Initializer != nullptr) { DRD->setInitializer(Initializer, OMPDeclareReductionInitKind::Call); @@ -22741,13 +22916,13 @@ void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, } } -Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd( +SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid) { for (Decl *D : DeclReductions.get()) { if (IsValid) { if (S) - PushOnScopeChains(cast(D), S, - /*AddToContext=*/false); + SemaRef.PushOnScopeChains(cast(D), S, + /*AddToContext=*/false); } else { D->setInvalidDecl(); } @@ -22755,25 +22930,26 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd( return DeclReductions; } -TypeResult Sema::ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D) { - TypeSourceInfo *TInfo = GetTypeForDeclarator(D); +TypeResult SemaOpenMP::ActOnOpenMPDeclareMapperVarDecl(Scope *S, + Declarator &D) { + TypeSourceInfo *TInfo = SemaRef.GetTypeForDeclarator(D); QualType T = TInfo->getType(); if (D.isInvalidType()) return true; if (getLangOpts().CPlusPlus) { // Check that there are no default arguments (C++ only). - CheckExtraCXXDefaultArguments(D); + SemaRef.CheckExtraCXXDefaultArguments(D); } - return CreateParsedType(T, TInfo); + return SemaRef.CreateParsedType(T, TInfo); } -QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, - TypeResult ParsedType) { +QualType SemaOpenMP::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, + TypeResult ParsedType) { assert(ParsedType.isUsable() && "Expect usable parsed mapper type"); - QualType MapperType = GetTypeFromParser(ParsedType.get()); + QualType MapperType = SemaRef.GetTypeFromParser(ParsedType.get()); assert(!MapperType.isNull() && "Expect valid mapper type"); // [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions @@ -22785,12 +22961,13 @@ QualType Sema::ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, return MapperType; } -Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective( +SemaOpenMP::DeclGroupPtrTy SemaOpenMP::ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef Clauses, Decl *PrevDeclInScope) { - LookupResult Lookup(*this, Name, SourceLocation(), LookupOMPMapperName, - forRedeclarationInCurContext()); + LookupResult Lookup(SemaRef, Name, SourceLocation(), + Sema::LookupOMPMapperName, + SemaRef.forRedeclarationInCurContext()); // [OpenMP 5.0], 2.19.7.3 declare mapper Directive, Restrictions // A mapper-identifier may not be redeclared in the current scope for the // same type or for a type that is compatible according to the base language @@ -22801,12 +22978,12 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective( if (S != nullptr) { // Find previous declaration with the same name not referenced in other // declarations. - FunctionScopeInfo *ParentFn = getEnclosingFunction(); + FunctionScopeInfo *ParentFn = SemaRef.getEnclosingFunction(); InCompoundScope = (ParentFn != nullptr) && !ParentFn->CompoundScopes.empty(); - LookupName(Lookup, S); - FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false, - /*AllowInlineNamespace=*/false); + SemaRef.LookupName(Lookup, S); + SemaRef.FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false, + /*AllowInlineNamespace=*/false); llvm::DenseMap UsedAsPrevious; LookupResult::Filter Filter = Lookup.makeFilter(); while (Filter.hasNext()) { @@ -22851,13 +23028,14 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective( // mappers. SmallVector ClausesWithImplicit(Clauses.begin(), Clauses.end()); - if (LangOpts.OpenMP >= 50) - processImplicitMapsWithDefaultMappers(*this, DSAStack, ClausesWithImplicit); - auto *DMD = - OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name, MapperType, VN, - ClausesWithImplicit, PrevDMD); + if (getLangOpts().OpenMP >= 50) + processImplicitMapsWithDefaultMappers(SemaRef, DSAStack, + ClausesWithImplicit); + auto *DMD = OMPDeclareMapperDecl::Create(getASTContext(), DC, StartLoc, Name, + MapperType, VN, ClausesWithImplicit, + PrevDMD); if (S) - PushOnScopeChains(DMD, S); + SemaRef.PushOnScopeChains(DMD, S); else DC->addDecl(DMD); DMD->setAccess(AS); @@ -22873,105 +23051,106 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective( return DeclGroupPtrTy::make(DeclGroupRef(DMD)); } -ExprResult -Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, - SourceLocation StartLoc, - DeclarationName VN) { +ExprResult SemaOpenMP::ActOnOpenMPDeclareMapperDirectiveVarDecl( + Scope *S, QualType MapperType, SourceLocation StartLoc, + DeclarationName VN) { TypeSourceInfo *TInfo = - Context.getTrivialTypeSourceInfo(MapperType, StartLoc); - auto *VD = VarDecl::Create(Context, Context.getTranslationUnitDecl(), - StartLoc, StartLoc, VN.getAsIdentifierInfo(), - MapperType, TInfo, SC_None); + getASTContext().getTrivialTypeSourceInfo(MapperType, StartLoc); + auto *VD = VarDecl::Create( + getASTContext(), getASTContext().getTranslationUnitDecl(), StartLoc, + StartLoc, VN.getAsIdentifierInfo(), MapperType, TInfo, SC_None); if (S) - PushOnScopeChains(VD, S, /*AddToContext=*/false); - Expr *E = buildDeclRefExpr(*this, VD, MapperType, StartLoc); + SemaRef.PushOnScopeChains(VD, S, /*AddToContext=*/false); + Expr *E = buildDeclRefExpr(SemaRef, VD, MapperType, StartLoc); DSAStack->addDeclareMapperVarRef(E); return E; } -void Sema::ActOnOpenMPIteratorVarDecl(VarDecl *VD) { +void SemaOpenMP::ActOnOpenMPIteratorVarDecl(VarDecl *VD) { if (DSAStack->getDeclareMapperVarRef()) DSAStack->addIteratorVarDecl(VD); } -bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const { - assert(LangOpts.OpenMP && "Expected OpenMP mode."); +bool SemaOpenMP::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const { + assert(getLangOpts().OpenMP && "Expected OpenMP mode."); const Expr *Ref = DSAStack->getDeclareMapperVarRef(); if (const auto *DRE = cast_or_null(Ref)) { if (VD->getCanonicalDecl() == DRE->getDecl()->getCanonicalDecl()) return true; - if (VD->isUsableInConstantExpressions(Context)) + if (VD->isUsableInConstantExpressions(getASTContext())) return true; - if (LangOpts.OpenMP >= 52 && DSAStack->isIteratorVarDecl(VD)) + if (getLangOpts().OpenMP >= 52 && DSAStack->isIteratorVarDecl(VD)) return true; return false; } return true; } -const ValueDecl *Sema::getOpenMPDeclareMapperVarName() const { - assert(LangOpts.OpenMP && "Expected OpenMP mode."); +const ValueDecl *SemaOpenMP::getOpenMPDeclareMapperVarName() const { + assert(getLangOpts().OpenMP && "Expected OpenMP mode."); return cast(DSAStack->getDeclareMapperVarRef())->getDecl(); } -OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNumTeamsClause(Expr *NumTeams, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = NumTeams; Stmt *HelperValStmt = nullptr; // OpenMP [teams Constrcut, Restrictions] // The num_teams expression must evaluate to a positive integer value. - if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams, + if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_num_teams, /*StrictlyPositive=*/true)) return nullptr; OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); - OpenMPDirectiveKind CaptureRegion = - getOpenMPCaptureRegionForClause(DKind, OMPC_num_teams, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause( + DKind, OMPC_num_teams, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) OMPNumTeamsClause(ValExpr, HelperValStmt, CaptureRegion, - StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) OMPNumTeamsClause( + ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = ThreadLimit; Stmt *HelperValStmt = nullptr; // OpenMP [teams Constrcut, Restrictions] // The thread_limit expression must evaluate to a positive integer value. - if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit, + if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_thread_limit, /*StrictlyPositive=*/true)) return nullptr; OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause( - DKind, OMPC_thread_limit, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + DKind, OMPC_thread_limit, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) OMPThreadLimitClause( + return new (getASTContext()) OMPThreadLimitClause( ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPPriorityClause(Expr *Priority, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = Priority; Stmt *HelperValStmt = nullptr; OpenMPDirectiveKind CaptureRegion = OMPD_unknown; @@ -22979,20 +23158,20 @@ OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority, // OpenMP [2.9.1, task Constrcut] // The priority-value is a non-negative numerical scalar expression. if (!isNonNegativeIntegerValue( - ValExpr, *this, OMPC_priority, + ValExpr, SemaRef, OMPC_priority, /*StrictlyPositive=*/false, /*BuildCapture=*/true, DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt)) return nullptr; - return new (Context) OMPPriorityClause(ValExpr, HelperValStmt, CaptureRegion, - StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) OMPPriorityClause( + ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPGrainsizeClause( +OMPClause *SemaOpenMP::ActOnOpenMPGrainsizeClause( OpenMPGrainsizeClauseModifier Modifier, Expr *Grainsize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) { - assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) && + assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 51) && "Unexpected grainsize modifier in OpenMP < 51."); if (ModifierLoc.isValid() && Modifier == OMPC_GRAINSIZE_unknown) { @@ -23010,23 +23189,23 @@ OMPClause *Sema::ActOnOpenMPGrainsizeClause( // OpenMP [2.9.2, taskloop Constrcut] // The parameter of the grainsize clause must be a positive integer // expression. - if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize, + if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_grainsize, /*StrictlyPositive=*/true, /*BuildCapture=*/true, DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt)) return nullptr; - return new (Context) + return new (getASTContext()) OMPGrainsizeClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, ModifierLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPNumTasksClause( +OMPClause *SemaOpenMP::ActOnOpenMPNumTasksClause( OpenMPNumTasksClauseModifier Modifier, Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) { - assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) && + assert((ModifierLoc.isInvalid() || getLangOpts().OpenMP >= 51) && "Unexpected num_tasks modifier in OpenMP < 51."); if (ModifierLoc.isValid() && Modifier == OMPC_NUMTASKS_unknown) { @@ -23045,19 +23224,20 @@ OMPClause *Sema::ActOnOpenMPNumTasksClause( // The parameter of the num_tasks clause must be a positive integer // expression. if (!isNonNegativeIntegerValue( - ValExpr, *this, OMPC_num_tasks, + ValExpr, SemaRef, OMPC_num_tasks, /*StrictlyPositive=*/true, /*BuildCapture=*/true, DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt)) return nullptr; - return new (Context) + return new (getASTContext()) OMPNumTasksClause(Modifier, ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, ModifierLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPHintClause(Expr *Hint, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { // OpenMP [2.13.2, critical construct, Description] // ... where hint-expression is an integer constant expression that evaluates // to a valid lock hint. @@ -23065,7 +23245,7 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint, false); if (HintExpr.isInvalid()) return nullptr; - return new (Context) + return new (getASTContext()) OMPHintClause(HintExpr.get(), StartLoc, LParenLoc, EndLoc); } @@ -23085,13 +23265,14 @@ static bool findOMPEventHandleT(Sema &S, SourceLocation Loc, return true; } -OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPDetachClause(Expr *Evt, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (!Evt->isValueDependent() && !Evt->isTypeDependent() && !Evt->isInstantiationDependent() && !Evt->containsUnexpandedParameterPack()) { - if (!findOMPEventHandleT(*this, Evt->getExprLoc(), DSAStack)) + if (!findOMPEventHandleT(SemaRef, Evt->getExprLoc(), DSAStack)) return nullptr; // OpenMP 5.0, 2.10.1 task Construct. // event-handle is a variable of the omp_event_handle_t type. @@ -23107,9 +23288,9 @@ OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, << "omp_event_handle_t" << 0 << Evt->getSourceRange(); return nullptr; } - if (!Context.hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(), - VD->getType()) || - VD->getType().isConstant(Context)) { + if (!getASTContext().hasSameUnqualifiedType(DSAStack->getOMPEventHandleT(), + VD->getType()) || + VD->getType().isConstant(getASTContext())) { Diag(Evt->getExprLoc(), diag::err_omp_var_expected) << "omp_event_handle_t" << 1 << VD->getType() << Evt->getSourceRange(); @@ -23124,15 +23305,16 @@ OMPClause *Sema::ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, Diag(Evt->getExprLoc(), diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_firstprivate); - reportOriginalDsa(*this, DSAStack, VD, DVar); + reportOriginalDsa(SemaRef, DSAStack, VD, DVar); return nullptr; } } - return new (Context) OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc); + return new (getASTContext()) + OMPDetachClause(Evt, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPDistScheduleClause( +OMPClause *SemaOpenMP::ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { @@ -23163,7 +23345,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause( // chunk_size must be a loop invariant integer expression with a positive // value. if (std::optional Result = - ValExpr->getIntegerConstantExpr(Context)) { + ValExpr->getIntegerConstantExpr(getASTContext())) { if (Result->isSigned() && !Result->isStrictlyPositive()) { Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause) << "dist_schedule" << ChunkSize->getSourceRange(); @@ -23171,22 +23353,22 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause( } } else if (getOpenMPCaptureRegionForClause( DSAStack->getCurrentDirective(), OMPC_dist_schedule, - LangOpts.OpenMP) != OMPD_unknown && - !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + getLangOpts().OpenMP) != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } } } - return new (Context) + return new (getASTContext()) OMPDistScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind, ValExpr, HelperValStmt); } -OMPClause *Sema::ActOnOpenMPDefaultmapClause( +OMPClause *SemaOpenMP::ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc) { @@ -23213,10 +23395,10 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause( } else { bool isDefaultmapModifier = (M != OMPC_DEFAULTMAP_MODIFIER_unknown); bool isDefaultmapKind = (Kind != OMPC_DEFAULTMAP_unknown) || - (LangOpts.OpenMP >= 50 && KindLoc.isInvalid()); + (getLangOpts().OpenMP >= 50 && KindLoc.isInvalid()); if (!isDefaultmapKind || !isDefaultmapModifier) { StringRef KindValue = "'scalar', 'aggregate', 'pointer'"; - if (LangOpts.OpenMP == 50) { + if (getLangOpts().OpenMP == 50) { StringRef ModifierValue = "'alloc', 'from', 'to', 'tofrom', " "'firstprivate', 'none', 'default'"; if (!isDefaultmapKind && isDefaultmapModifier) { @@ -23268,13 +23450,13 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause( DSAStack->setDefaultDMAAttr(M, Kind, StartLoc); } - return new (Context) + return new (getASTContext()) OMPDefaultmapClause(StartLoc, LParenLoc, MLoc, KindLoc, EndLoc, Kind, M); } -bool Sema::ActOnStartOpenMPDeclareTargetContext( +bool SemaOpenMP::ActOnStartOpenMPDeclareTargetContext( DeclareTargetContextInfo &DTCI) { - DeclContext *CurLexicalContext = getCurLexicalContext(); + DeclContext *CurLexicalContext = SemaRef.getCurLexicalContext(); if (!CurLexicalContext->isFileContext() && !CurLexicalContext->isExternCContext() && !CurLexicalContext->isExternCXXContext() && @@ -23294,20 +23476,20 @@ bool Sema::ActOnStartOpenMPDeclareTargetContext( return true; } -const Sema::DeclareTargetContextInfo -Sema::ActOnOpenMPEndDeclareTargetDirective() { +const SemaOpenMP::DeclareTargetContextInfo +SemaOpenMP::ActOnOpenMPEndDeclareTargetDirective() { assert(!DeclareTargetNesting.empty() && "check isInOpenMPDeclareTargetContext() first!"); return DeclareTargetNesting.pop_back_val(); } -void Sema::ActOnFinishedOpenMPDeclareTargetContext( +void SemaOpenMP::ActOnFinishedOpenMPDeclareTargetContext( DeclareTargetContextInfo &DTCI) { for (auto &It : DTCI.ExplicitlyMapped) ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT, DTCI); } -void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() { +void SemaOpenMP::DiagnoseUnterminatedOpenMPDeclareTarget() { if (DeclareTargetNesting.empty()) return; DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back(); @@ -23315,23 +23497,23 @@ void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() { << getOpenMPDirectiveName(DTCI.Kind); } -NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope, - CXXScopeSpec &ScopeSpec, - const DeclarationNameInfo &Id) { - LookupResult Lookup(*this, Id, LookupOrdinaryName); - LookupParsedName(Lookup, CurScope, &ScopeSpec, true); +NamedDecl *SemaOpenMP::lookupOpenMPDeclareTargetName( + Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id) { + LookupResult Lookup(SemaRef, Id, Sema::LookupOrdinaryName); + SemaRef.LookupParsedName(Lookup, CurScope, &ScopeSpec, true); if (Lookup.isAmbiguous()) return nullptr; Lookup.suppressDiagnostics(); if (!Lookup.isSingleResult()) { - VarOrFuncDeclFilterCCC CCC(*this); + VarOrFuncDeclFilterCCC CCC(SemaRef); if (TypoCorrection Corrected = - CorrectTypo(Id, LookupOrdinaryName, CurScope, nullptr, CCC, - CTK_ErrorRecovery)) { - diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest) - << Id.getName()); + SemaRef.CorrectTypo(Id, Sema::LookupOrdinaryName, CurScope, nullptr, + CCC, Sema::CTK_ErrorRecovery)) { + SemaRef.diagnoseTypo(Corrected, + SemaRef.PDiag(diag::err_undeclared_var_use_suggest) + << Id.getName()); checkDeclIsAllowedInOpenMPTarget(nullptr, Corrected.getCorrectionDecl()); return nullptr; } @@ -23349,9 +23531,9 @@ NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope, return ND; } -void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, - OMPDeclareTargetDeclAttr::MapTypeTy MT, - DeclareTargetContextInfo &DTCI) { +void SemaOpenMP::ActOnOpenMPDeclareTargetName( + NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, + DeclareTargetContextInfo &DTCI) { assert((isa(ND) || isa(ND) || isa(ND)) && "Expected variable, function or function template."); @@ -23367,7 +23549,7 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, } // Diagnose marking after use as it may lead to incorrect diagnosis and // codegen. - if (LangOpts.OpenMP >= 50 && + if (getLangOpts().OpenMP >= 50 && (ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced())) Diag(Loc, diag::warn_omp_declare_target_after_first_use); @@ -23406,14 +23588,14 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, IsIndirect = true; } auto *A = OMPDeclareTargetDeclAttr::CreateImplicit( - Context, MT, DTCI.DT, IndirectE, IsIndirect, Level, + getASTContext(), MT, DTCI.DT, IndirectE, IsIndirect, Level, SourceRange(Loc, Loc)); ND->addAttr(A); - if (ASTMutationListener *ML = Context.getASTMutationListener()) + if (ASTMutationListener *ML = getASTContext().getASTMutationListener()) ML->DeclarationMarkedOpenMPDeclareTarget(ND, A); checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Loc); if (auto *VD = dyn_cast(ND); - LangOpts.OpenMP && VD && VD->hasAttr() && + getLangOpts().OpenMP && VD && VD->hasAttr() && VD->hasGlobalStorage()) ActOnOpenMPDeclareTargetInitializer(ND); } @@ -23457,8 +23639,8 @@ static bool checkValueDeclInTarget(SourceLocation SL, SourceRange SR, /*FullCheck=*/false); } -void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, - SourceLocation IdLoc) { +void SemaOpenMP::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, + SourceLocation IdLoc) { if (!D || D->isInvalidDecl()) return; SourceRange SR = E ? E->getSourceRange() : D->getSourceRange(); @@ -23472,7 +23654,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, // directive. if (DSAStack->isThreadPrivate(VD)) { Diag(SL, diag::err_omp_threadprivate_in_target); - reportOriginalDsa(*this, DSAStack, VD, DSAStack->getTopDSA(VD, false)); + reportOriginalDsa(SemaRef, DSAStack, VD, DSAStack->getTopDSA(VD, false)); return; } } @@ -23491,7 +23673,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, // Problem if any with var declared with incomplete type will be reported // as normal, so no need to check it here. if ((E || !VD->getType()->isIncompleteType()) && - !checkValueDeclInTarget(SL, SR, *this, DSAStack, VD)) + !checkValueDeclInTarget(SL, SR, SemaRef, DSAStack, VD)) return; if (!E && isInOpenMPDeclareTargetContext()) { // Checking declaration inside declare target region. @@ -23511,13 +23693,13 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, IsIndirect = true; } auto *A = OMPDeclareTargetDeclAttr::CreateImplicit( - Context, + getASTContext(), getLangOpts().OpenMP >= 52 ? OMPDeclareTargetDeclAttr::MT_Enter : OMPDeclareTargetDeclAttr::MT_To, DTCI.DT, IndirectE, IsIndirect, Level, SourceRange(DTCI.Loc, DTCI.Loc)); D->addAttr(A); - if (ASTMutationListener *ML = Context.getASTMutationListener()) + if (ASTMutationListener *ML = getASTContext().getASTMutationListener()) ML->DeclarationMarkedOpenMPDeclareTarget(D, A); } return; @@ -23525,7 +23707,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, } if (!E) return; - checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D); + checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), SemaRef, D); } /// This class visits every VarDecl that the initializer references and adds @@ -23571,13 +23753,13 @@ class GlobalDeclRefChecker final /// Adding OMPDeclareTargetDeclAttr to variables with static storage /// duration that are referenced in the initializer expression list of /// variables with static storage duration in declare target directive. -void Sema::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) { +void SemaOpenMP::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) { GlobalDeclRefChecker Checker; if (isa(TargetDecl)) Checker.declareTargetInitializer(TargetDecl); } -OMPClause *Sema::ActOnOpenMPToClause( +OMPClause *SemaOpenMP::ActOnOpenMPToClause( ArrayRef MotionModifiers, ArrayRef MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, @@ -23603,18 +23785,18 @@ OMPClause *Sema::ActOnOpenMPToClause( } MappableVarListInfo MVLI(VarList); - checkMappableExpressionList(*this, DSAStack, OMPC_to, MVLI, Locs.StartLoc, + checkMappableExpressionList(SemaRef, DSAStack, OMPC_to, MVLI, Locs.StartLoc, MapperIdScopeSpec, MapperId, UnresolvedMappers); if (MVLI.ProcessedVarList.empty()) return nullptr; return OMPToClause::Create( - Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc, - MapperIdScopeSpec.getWithLocInContext(Context), MapperId); + MapperIdScopeSpec.getWithLocInContext(getASTContext()), MapperId); } -OMPClause *Sema::ActOnOpenMPFromClause( +OMPClause *SemaOpenMP::ActOnOpenMPFromClause( ArrayRef MotionModifiers, ArrayRef MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, @@ -23640,19 +23822,20 @@ OMPClause *Sema::ActOnOpenMPFromClause( } MappableVarListInfo MVLI(VarList); - checkMappableExpressionList(*this, DSAStack, OMPC_from, MVLI, Locs.StartLoc, + checkMappableExpressionList(SemaRef, DSAStack, OMPC_from, MVLI, Locs.StartLoc, MapperIdScopeSpec, MapperId, UnresolvedMappers); if (MVLI.ProcessedVarList.empty()) return nullptr; return OMPFromClause::Create( - Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, MVLI.VarComponents, MVLI.UDMapperList, Modifiers, ModifiersLoc, - MapperIdScopeSpec.getWithLocInContext(Context), MapperId); + MapperIdScopeSpec.getWithLocInContext(getASTContext()), MapperId); } -OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs) { +OMPClause * +SemaOpenMP::ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs) { MappableVarListInfo MVLI(VarList); SmallVector PrivateCopies; SmallVector Inits; @@ -23662,7 +23845,7 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. MVLI.ProcessedVarList.push_back(RefExpr); @@ -23687,30 +23870,30 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, // Build the private variable and the expression that refers to it. auto VDPrivate = - buildVarDecl(*this, ELoc, Type, D->getName(), + buildVarDecl(SemaRef, ELoc, Type, D->getName(), D->hasAttrs() ? &D->getAttrs() : nullptr, VD ? cast(SimpleRefExpr) : nullptr); if (VDPrivate->isInvalidDecl()) continue; - CurContext->addDecl(VDPrivate); + SemaRef.CurContext->addDecl(VDPrivate); DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr( - *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc); + SemaRef, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc); // Add temporary variable to initialize the private copy of the pointer. VarDecl *VDInit = - buildVarDecl(*this, RefExpr->getExprLoc(), Type, ".devptr.temp"); + buildVarDecl(SemaRef, RefExpr->getExprLoc(), Type, ".devptr.temp"); DeclRefExpr *VDInitRefExpr = buildDeclRefExpr( - *this, VDInit, RefExpr->getType(), RefExpr->getExprLoc()); - AddInitializerToDecl(VDPrivate, - DefaultLvalueConversion(VDInitRefExpr).get(), - /*DirectInit=*/false); + SemaRef, VDInit, RefExpr->getType(), RefExpr->getExprLoc()); + SemaRef.AddInitializerToDecl( + VDPrivate, SemaRef.DefaultLvalueConversion(VDInitRefExpr).get(), + /*DirectInit=*/false); // If required, build a capture to implement the privatization initialized // with the current list item value. DeclRefExpr *Ref = nullptr; if (!VD) - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref); PrivateCopies.push_back(VDPrivateRefExpr); Inits.push_back(VDInitRefExpr); @@ -23732,12 +23915,13 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef VarList, return nullptr; return OMPUseDevicePtrClause::Create( - Context, Locs, MVLI.ProcessedVarList, PrivateCopies, Inits, + getASTContext(), Locs, MVLI.ProcessedVarList, PrivateCopies, Inits, MVLI.VarBaseDeclarations, MVLI.VarComponents); } -OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs) { +OMPClause * +SemaOpenMP::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs) { MappableVarListInfo MVLI(VarList); for (Expr *RefExpr : VarList) { @@ -23745,7 +23929,7 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange, + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange, /*AllowArraySection=*/true); if (Res.second) { // It will be analyzed later. @@ -23760,7 +23944,7 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, // with the current list item value. DeclRefExpr *Ref = nullptr; if (!VD) - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref); // We need to add a data sharing attribute for this variable to make sure it @@ -23775,7 +23959,8 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, Expr *Component = SimpleRefExpr; if (VD && (isa(RefExpr->IgnoreParenImpCasts()) || isa(RefExpr->IgnoreParenImpCasts()))) - Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get(); + Component = + SemaRef.DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get(); MVLI.VarComponents.back().emplace_back(Component, D, /*IsNonContiguous=*/false); } @@ -23783,20 +23968,21 @@ OMPClause *Sema::ActOnOpenMPUseDeviceAddrClause(ArrayRef VarList, if (MVLI.ProcessedVarList.empty()) return nullptr; - return OMPUseDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList, - MVLI.VarBaseDeclarations, - MVLI.VarComponents); + return OMPUseDeviceAddrClause::Create( + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + MVLI.VarComponents); } -OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs) { +OMPClause * +SemaOpenMP::ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs) { MappableVarListInfo MVLI(VarList); for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. MVLI.ProcessedVarList.push_back(RefExpr); @@ -23822,7 +24008,7 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_is_device_ptr) << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -23866,20 +24052,21 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef VarList, if (MVLI.ProcessedVarList.empty()) return nullptr; - return OMPIsDevicePtrClause::Create(Context, Locs, MVLI.ProcessedVarList, - MVLI.VarBaseDeclarations, - MVLI.VarComponents); + return OMPIsDevicePtrClause::Create( + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + MVLI.VarComponents); } -OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, - const OMPVarListLocTy &Locs) { +OMPClause * +SemaOpenMP::ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, + const OMPVarListLocTy &Locs) { MappableVarListInfo MVLI(VarList); for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP has_device_addr clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange, + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange, /*AllowArraySection=*/true); if (Res.second) { // It will be analyzed later. @@ -23897,7 +24084,7 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_has_device_addr) << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); - reportOriginalDsa(*this, DSAStack, D, DVar); + reportOriginalDsa(SemaRef, DSAStack, D, DVar); continue; } @@ -23922,16 +24109,17 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, auto *VD = dyn_cast(D); if (VD && (isa(RefExpr->IgnoreParenImpCasts()) || isa(RefExpr->IgnoreParenImpCasts()))) - Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get(); + Component = + SemaRef.DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get(); OMPClauseMappableExprCommon::MappableComponent MC( Component, D, /*IsNonContiguous=*/false); DSAStack->addMappableExpressionComponents( D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr); // Record the expression we've just processed. - if (!VD && !CurContext->isDependentContext()) { + if (!VD && !SemaRef.CurContext->isDependentContext()) { DeclRefExpr *Ref = - buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true); + buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/true); assert(Ref && "has_device_addr capture failed"); MVLI.ProcessedVarList.push_back(Ref); } else @@ -23952,27 +24140,27 @@ OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef VarList, if (MVLI.ProcessedVarList.empty()) return nullptr; - return OMPHasDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList, - MVLI.VarBaseDeclarations, - MVLI.VarComponents); + return OMPHasDeviceAddrClause::Create( + getASTContext(), Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations, + MVLI.VarComponents); } -OMPClause *Sema::ActOnOpenMPAllocateClause( +OMPClause *SemaOpenMP::ActOnOpenMPAllocateClause( Expr *Allocator, ArrayRef VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (Allocator) { // OpenMP [2.11.4 allocate Clause, Description] // allocator is an expression of omp_allocator_handle_t type. - if (!findOMPAllocatorHandleT(*this, Allocator->getExprLoc(), DSAStack)) + if (!findOMPAllocatorHandleT(SemaRef, Allocator->getExprLoc(), DSAStack)) return nullptr; - ExprResult AllocatorRes = DefaultLvalueConversion(Allocator); + ExprResult AllocatorRes = SemaRef.DefaultLvalueConversion(Allocator); if (AllocatorRes.isInvalid()) return nullptr; - AllocatorRes = PerformImplicitConversion(AllocatorRes.get(), - DSAStack->getOMPAllocatorHandleT(), - Sema::AA_Initializing, - /*AllowExplicit=*/true); + AllocatorRes = SemaRef.PerformImplicitConversion( + AllocatorRes.get(), DSAStack->getOMPAllocatorHandleT(), + Sema::AA_Initializing, + /*AllowExplicit=*/true); if (AllocatorRes.isInvalid()) return nullptr; Allocator = AllocatorRes.get(); @@ -23982,9 +24170,9 @@ OMPClause *Sema::ActOnOpenMPAllocateClause( // target region must specify an allocator expression unless a requires // directive with the dynamic_allocators clause is present in the same // compilation unit. - if (LangOpts.OpenMPIsTargetDevice && + if (getLangOpts().OpenMPIsTargetDevice && !DSAStack->hasRequiresDeclWithClause()) - targetDiag(StartLoc, diag::err_expected_allocator_expression); + SemaRef.targetDiag(StartLoc, diag::err_expected_allocator_expression); } // Analyze and build list of variables. SmallVector Vars; @@ -23993,7 +24181,7 @@ OMPClause *Sema::ActOnOpenMPAllocateClause( SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) { // It will be analyzed later. Vars.push_back(RefExpr); @@ -24004,9 +24192,9 @@ OMPClause *Sema::ActOnOpenMPAllocateClause( auto *VD = dyn_cast(D); DeclRefExpr *Ref = nullptr; - if (!VD && !CurContext->isDependentContext()) - Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false); - Vars.push_back((VD || CurContext->isDependentContext()) + if (!VD && !SemaRef.CurContext->isDependentContext()) + Ref = buildCapture(SemaRef, D, SimpleRefExpr, /*WithInit=*/false); + Vars.push_back((VD || SemaRef.CurContext->isDependentContext()) ? RefExpr->IgnoreParens() : Ref); } @@ -24016,21 +24204,21 @@ OMPClause *Sema::ActOnOpenMPAllocateClause( if (Allocator) DSAStack->addInnerAllocatorExpr(Allocator); - return OMPAllocateClause::Create(Context, StartLoc, LParenLoc, Allocator, - ColonLoc, EndLoc, Vars); + return OMPAllocateClause::Create(getASTContext(), StartLoc, LParenLoc, + Allocator, ColonLoc, EndLoc, Vars); } -OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPNontemporalClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP nontemporal clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange); + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange); if (Res.second) // It will be analyzed later. Vars.push_back(RefExpr); @@ -24055,32 +24243,34 @@ OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPNontemporalClause::Create(Context, StartLoc, LParenLoc, EndLoc, - Vars); + return OMPNontemporalClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, Vars); } -StmtResult Sema::ActOnOpenMPScopeDirective(ArrayRef Clauses, - Stmt *AStmt, SourceLocation StartLoc, - SourceLocation EndLoc) { +StmtResult SemaOpenMP::ActOnOpenMPScopeDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { if (!AStmt) return StmtError(); - setFunctionHasBranchProtectedScope(); + SemaRef.setFunctionHasBranchProtectedScope(); - return OMPScopeDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); + return OMPScopeDirective::Create(getASTContext(), StartLoc, EndLoc, Clauses, + AStmt); } -OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPInclusiveClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP nontemporal clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange, + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange, /*AllowArraySection=*/true); if (Res.second) // It will be analyzed later. @@ -24107,20 +24297,21 @@ OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPInclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); + return OMPInclusiveClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, Vars); } -OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef VarList, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPExclusiveClause(ArrayRef VarList, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { SmallVector Vars; for (Expr *RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP nontemporal clause."); SourceLocation ELoc; SourceRange ERange; Expr *SimpleRefExpr = RefExpr; - auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange, + auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange, /*AllowArraySection=*/true); if (Res.second) // It will be analyzed later. @@ -24150,7 +24341,8 @@ OMPClause *Sema::ActOnOpenMPExclusiveClause(ArrayRef VarList, if (Vars.empty()) return nullptr; - return OMPExclusiveClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); + return OMPExclusiveClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, Vars); } /// Tries to find omp_alloctrait_t type. @@ -24168,19 +24360,20 @@ static bool findOMPAlloctraitT(Sema &S, SourceLocation Loc, DSAStackTy *Stack) { return true; } -OMPClause *Sema::ActOnOpenMPUsesAllocatorClause( +OMPClause *SemaOpenMP::ActOnOpenMPUsesAllocatorClause( SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef Data) { + ASTContext &Context = getASTContext(); // OpenMP [2.12.5, target Construct] // allocator is an identifier of omp_allocator_handle_t type. - if (!findOMPAllocatorHandleT(*this, StartLoc, DSAStack)) + if (!findOMPAllocatorHandleT(SemaRef, StartLoc, DSAStack)) return nullptr; // OpenMP [2.12.5, target Construct] // allocator-traits-array is an identifier of const omp_alloctrait_t * type. if (llvm::any_of( Data, [](const UsesAllocatorsData &D) { return D.AllocatorTraits; }) && - !findOMPAlloctraitT(*this, StartLoc, DSAStack)) + !findOMPAlloctraitT(SemaRef, StartLoc, DSAStack)) return nullptr; llvm::SmallPtrSet, 4> PredefinedAllocators; for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) { @@ -24188,8 +24381,8 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause( StringRef Allocator = OMPAllocateDeclAttr::ConvertAllocatorTypeTyToStr(AllocatorKind); DeclarationName AllocatorName = &Context.Idents.get(Allocator); - PredefinedAllocators.insert(LookupSingleName( - TUScope, AllocatorName, StartLoc, Sema::LookupAnyName)); + PredefinedAllocators.insert(SemaRef.LookupSingleName( + SemaRef.TUScope, AllocatorName, StartLoc, Sema::LookupAnyName)); } SmallVector NewData; @@ -24206,7 +24399,7 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause( bool IsPredefinedAllocator = false; if (DRE) { OMPAllocateDeclAttr::AllocatorTypeTy AllocatorTy = - getAllocatorKind(*this, DSAStack, AllocatorExpr); + getAllocatorKind(SemaRef, DSAStack, AllocatorExpr); IsPredefinedAllocator = AllocatorTy != OMPAllocateDeclAttr::AllocatorTypeTy::OMPUserDefinedMemAlloc; @@ -24251,7 +24444,7 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause( } // No allocator traits - just convert it to rvalue. if (!D.AllocatorTraits) - AllocatorExpr = DefaultLvalueConversion(AllocatorExpr).get(); + AllocatorExpr = SemaRef.DefaultLvalueConversion(AllocatorExpr).get(); DSAStack->addUsesAllocatorsDecl( DRE->getDecl(), IsPredefinedAllocator @@ -24298,11 +24491,11 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause( NewD.LParenLoc = D.LParenLoc; NewD.RParenLoc = D.RParenLoc; } - return OMPUsesAllocatorsClause::Create(Context, StartLoc, LParenLoc, EndLoc, - NewData); + return OMPUsesAllocatorsClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, NewData); } -OMPClause *Sema::ActOnOpenMPAffinityClause( +OMPClause *SemaOpenMP::ActOnOpenMPAffinityClause( SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef Locators) { SmallVector Vars; @@ -24325,8 +24518,8 @@ OMPClause *Sema::ActOnOpenMPAffinityClause( ExprResult Res; { - Sema::TentativeAnalysisScope Trap(*this); - Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr); + Sema::TentativeAnalysisScope Trap(SemaRef); + Res = SemaRef.CreateBuiltinUnaryOp(ELoc, UO_AddrOf, SimpleExpr); } if (!Res.isUsable() && !isa(SimpleExpr) && !isa(SimpleExpr)) { @@ -24337,15 +24530,15 @@ OMPClause *Sema::ActOnOpenMPAffinityClause( Vars.push_back(SimpleExpr); } - return OMPAffinityClause::Create(Context, StartLoc, LParenLoc, ColonLoc, - EndLoc, Modifier, Vars); + return OMPAffinityClause::Create(getASTContext(), StartLoc, LParenLoc, + ColonLoc, EndLoc, Modifier, Vars); } -OMPClause *Sema::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, - SourceLocation KindLoc, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, + SourceLocation KindLoc, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { if (Kind == OMPC_BIND_unknown) { Diag(KindLoc, diag::err_omp_unexpected_clause_value) << getListOfPossibleValues(OMPC_bind, /*First=*/0, @@ -24354,39 +24547,40 @@ OMPClause *Sema::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, return nullptr; } - return OMPBindClause::Create(Context, Kind, KindLoc, StartLoc, LParenLoc, - EndLoc); + return OMPBindClause::Create(getASTContext(), Kind, KindLoc, StartLoc, + LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPXDynCGroupMemClause(Expr *Size, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { +OMPClause *SemaOpenMP::ActOnOpenMPXDynCGroupMemClause(Expr *Size, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { Expr *ValExpr = Size; Stmt *HelperValStmt = nullptr; // OpenMP [2.5, Restrictions] // The ompx_dyn_cgroup_mem expression must evaluate to a positive integer // value. - if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_ompx_dyn_cgroup_mem, + if (!isNonNegativeIntegerValue(ValExpr, SemaRef, OMPC_ompx_dyn_cgroup_mem, /*StrictlyPositive=*/false)) return nullptr; OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective(); OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause( - DKind, OMPC_ompx_dyn_cgroup_mem, LangOpts.OpenMP); - if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) { - ValExpr = MakeFullExpr(ValExpr).get(); + DKind, OMPC_ompx_dyn_cgroup_mem, getLangOpts().OpenMP); + if (CaptureRegion != OMPD_unknown && + !SemaRef.CurContext->isDependentContext()) { + ValExpr = SemaRef.MakeFullExpr(ValExpr).get(); llvm::MapVector Captures; - ValExpr = tryBuildCapture(*this, ValExpr, Captures).get(); - HelperValStmt = buildPreInits(Context, Captures); + ValExpr = tryBuildCapture(SemaRef, ValExpr, Captures).get(); + HelperValStmt = buildPreInits(getASTContext(), Captures); } - return new (Context) OMPXDynCGroupMemClause( + return new (getASTContext()) OMPXDynCGroupMemClause( ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPDoacrossClause( +OMPClause *SemaOpenMP::ActOnOpenMPDoacrossClause( OpenMPDoacrossClauseModifier DepType, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { @@ -24405,7 +24599,7 @@ OMPClause *Sema::ActOnOpenMPDoacrossClause( DSAStackTy::OperatorOffsetTy OpsOffs; llvm::APSInt TotalDepCount(/*BitWidth=*/32); DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon( - *this, + SemaRef, DepType == OMPC_DOACROSS_source || DepType == OMPC_DOACROSS_source_omp_cur_iteration || DepType == OMPC_DOACROSS_sink_omp_cur_iteration, @@ -24413,22 +24607,587 @@ OMPClause *Sema::ActOnOpenMPDoacrossClause( Vars = VarOffset.Vars; OpsOffs = VarOffset.OpsOffs; TotalDepCount = VarOffset.TotalDepCount; - auto *C = OMPDoacrossClause::Create(Context, StartLoc, LParenLoc, EndLoc, - DepType, DepLoc, ColonLoc, Vars, + auto *C = OMPDoacrossClause::Create(getASTContext(), StartLoc, LParenLoc, + EndLoc, DepType, DepLoc, ColonLoc, Vars, TotalDepCount.getZExtValue()); if (DSAStack->isParentOrderedRegion()) DSAStack->addDoacrossDependClause(C, OpsOffs); return C; } -OMPClause *Sema::ActOnOpenMPXAttributeClause(ArrayRef Attrs, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc) { - return new (Context) OMPXAttributeClause(Attrs, StartLoc, LParenLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPXAttributeClause(ArrayRef Attrs, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { + return new (getASTContext()) + OMPXAttributeClause(Attrs, StartLoc, LParenLoc, EndLoc); } -OMPClause *Sema::ActOnOpenMPXBareClause(SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (Context) OMPXBareClause(StartLoc, EndLoc); +OMPClause *SemaOpenMP::ActOnOpenMPXBareClause(SourceLocation StartLoc, + SourceLocation EndLoc) { + return new (getASTContext()) OMPXBareClause(StartLoc, EndLoc); +} + +ExprResult SemaOpenMP::ActOnOMPArraySectionExpr( + Expr *Base, SourceLocation LBLoc, Expr *LowerBound, + SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, + Expr *Stride, SourceLocation RBLoc) { + ASTContext &Context = getASTContext(); + if (Base->hasPlaceholderType() && + !Base->hasPlaceholderType(BuiltinType::OMPArraySection)) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(Base); + if (Result.isInvalid()) + return ExprError(); + Base = Result.get(); + } + if (LowerBound && LowerBound->getType()->isNonOverloadPlaceholderType()) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(LowerBound); + if (Result.isInvalid()) + return ExprError(); + Result = SemaRef.DefaultLvalueConversion(Result.get()); + if (Result.isInvalid()) + return ExprError(); + LowerBound = Result.get(); + } + if (Length && Length->getType()->isNonOverloadPlaceholderType()) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(Length); + if (Result.isInvalid()) + return ExprError(); + Result = SemaRef.DefaultLvalueConversion(Result.get()); + if (Result.isInvalid()) + return ExprError(); + Length = Result.get(); + } + if (Stride && Stride->getType()->isNonOverloadPlaceholderType()) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(Stride); + if (Result.isInvalid()) + return ExprError(); + Result = SemaRef.DefaultLvalueConversion(Result.get()); + if (Result.isInvalid()) + return ExprError(); + Stride = Result.get(); + } + + // Build an unanalyzed expression if either operand is type-dependent. + if (Base->isTypeDependent() || + (LowerBound && + (LowerBound->isTypeDependent() || LowerBound->isValueDependent())) || + (Length && (Length->isTypeDependent() || Length->isValueDependent())) || + (Stride && (Stride->isTypeDependent() || Stride->isValueDependent()))) { + return new (Context) OMPArraySectionExpr( + Base, LowerBound, Length, Stride, Context.DependentTy, VK_LValue, + OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); + } + + // Perform default conversions. + QualType OriginalTy = OMPArraySectionExpr::getBaseOriginalType(Base); + QualType ResultTy; + if (OriginalTy->isAnyPointerType()) { + ResultTy = OriginalTy->getPointeeType(); + } else if (OriginalTy->isArrayType()) { + ResultTy = OriginalTy->getAsArrayTypeUnsafe()->getElementType(); + } else { + return ExprError( + Diag(Base->getExprLoc(), diag::err_omp_typecheck_section_value) + << Base->getSourceRange()); + } + // C99 6.5.2.1p1 + if (LowerBound) { + auto Res = PerformOpenMPImplicitIntegerConversion(LowerBound->getExprLoc(), + LowerBound); + if (Res.isInvalid()) + return ExprError(Diag(LowerBound->getExprLoc(), + diag::err_omp_typecheck_section_not_integer) + << 0 << LowerBound->getSourceRange()); + LowerBound = Res.get(); + + if (LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || + LowerBound->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) + Diag(LowerBound->getExprLoc(), diag::warn_omp_section_is_char) + << 0 << LowerBound->getSourceRange(); + } + if (Length) { + auto Res = + PerformOpenMPImplicitIntegerConversion(Length->getExprLoc(), Length); + if (Res.isInvalid()) + return ExprError(Diag(Length->getExprLoc(), + diag::err_omp_typecheck_section_not_integer) + << 1 << Length->getSourceRange()); + Length = Res.get(); + + if (Length->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || + Length->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) + Diag(Length->getExprLoc(), diag::warn_omp_section_is_char) + << 1 << Length->getSourceRange(); + } + if (Stride) { + ExprResult Res = + PerformOpenMPImplicitIntegerConversion(Stride->getExprLoc(), Stride); + if (Res.isInvalid()) + return ExprError(Diag(Stride->getExprLoc(), + diag::err_omp_typecheck_section_not_integer) + << 1 << Stride->getSourceRange()); + Stride = Res.get(); + + if (Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || + Stride->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) + Diag(Stride->getExprLoc(), diag::warn_omp_section_is_char) + << 1 << Stride->getSourceRange(); + } + + // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly, + // C++ [expr.sub]p1: The type "T" shall be a completely-defined object + // type. Note that functions are not objects, and that (in C99 parlance) + // incomplete types are not object types. + if (ResultTy->isFunctionType()) { + Diag(Base->getExprLoc(), diag::err_omp_section_function_type) + << ResultTy << Base->getSourceRange(); + return ExprError(); + } + + if (SemaRef.RequireCompleteType(Base->getExprLoc(), ResultTy, + diag::err_omp_section_incomplete_type, Base)) + return ExprError(); + + if (LowerBound && !OriginalTy->isAnyPointerType()) { + Expr::EvalResult Result; + if (LowerBound->EvaluateAsInt(Result, Context)) { + // OpenMP 5.0, [2.1.5 Array Sections] + // The array section must be a subset of the original array. + llvm::APSInt LowerBoundValue = Result.Val.getInt(); + if (LowerBoundValue.isNegative()) { + Diag(LowerBound->getExprLoc(), + diag::err_omp_section_not_subset_of_array) + << LowerBound->getSourceRange(); + return ExprError(); + } + } + } + + if (Length) { + Expr::EvalResult Result; + if (Length->EvaluateAsInt(Result, Context)) { + // OpenMP 5.0, [2.1.5 Array Sections] + // The length must evaluate to non-negative integers. + llvm::APSInt LengthValue = Result.Val.getInt(); + if (LengthValue.isNegative()) { + Diag(Length->getExprLoc(), diag::err_omp_section_length_negative) + << toString(LengthValue, /*Radix=*/10, /*Signed=*/true) + << Length->getSourceRange(); + return ExprError(); + } + } + } else if (ColonLocFirst.isValid() && + (OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() && + !OriginalTy->isVariableArrayType()))) { + // OpenMP 5.0, [2.1.5 Array Sections] + // When the size of the array dimension is not known, the length must be + // specified explicitly. + Diag(ColonLocFirst, diag::err_omp_section_length_undefined) + << (!OriginalTy.isNull() && OriginalTy->isArrayType()); + return ExprError(); + } + + if (Stride) { + Expr::EvalResult Result; + if (Stride->EvaluateAsInt(Result, Context)) { + // OpenMP 5.0, [2.1.5 Array Sections] + // The stride must evaluate to a positive integer. + llvm::APSInt StrideValue = Result.Val.getInt(); + if (!StrideValue.isStrictlyPositive()) { + Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive) + << toString(StrideValue, /*Radix=*/10, /*Signed=*/true) + << Stride->getSourceRange(); + return ExprError(); + } + } + } + + if (!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) { + ExprResult Result = SemaRef.DefaultFunctionArrayLvalueConversion(Base); + if (Result.isInvalid()) + return ExprError(); + Base = Result.get(); + } + return new (Context) OMPArraySectionExpr( + Base, LowerBound, Length, Stride, Context.OMPArraySectionTy, VK_LValue, + OK_Ordinary, ColonLocFirst, ColonLocSecond, RBLoc); } + +ExprResult SemaOpenMP::ActOnOMPArrayShapingExpr( + Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, + ArrayRef Dims, ArrayRef Brackets) { + ASTContext &Context = getASTContext(); + if (Base->hasPlaceholderType()) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(Base); + if (Result.isInvalid()) + return ExprError(); + Result = SemaRef.DefaultLvalueConversion(Result.get()); + if (Result.isInvalid()) + return ExprError(); + Base = Result.get(); + } + QualType BaseTy = Base->getType(); + // Delay analysis of the types/expressions if instantiation/specialization is + // required. + if (!BaseTy->isPointerType() && Base->isTypeDependent()) + return OMPArrayShapingExpr::Create(Context, Context.DependentTy, Base, + LParenLoc, RParenLoc, Dims, Brackets); + if (!BaseTy->isPointerType() || + (!Base->isTypeDependent() && + BaseTy->getPointeeType()->isIncompleteType())) + return ExprError(Diag(Base->getExprLoc(), + diag::err_omp_non_pointer_type_array_shaping_base) + << Base->getSourceRange()); + + SmallVector NewDims; + bool ErrorFound = false; + for (Expr *Dim : Dims) { + if (Dim->hasPlaceholderType()) { + ExprResult Result = SemaRef.CheckPlaceholderExpr(Dim); + if (Result.isInvalid()) { + ErrorFound = true; + continue; + } + Result = SemaRef.DefaultLvalueConversion(Result.get()); + if (Result.isInvalid()) { + ErrorFound = true; + continue; + } + Dim = Result.get(); + } + if (!Dim->isTypeDependent()) { + ExprResult Result = + PerformOpenMPImplicitIntegerConversion(Dim->getExprLoc(), Dim); + if (Result.isInvalid()) { + ErrorFound = true; + Diag(Dim->getExprLoc(), diag::err_omp_typecheck_shaping_not_integer) + << Dim->getSourceRange(); + continue; + } + Dim = Result.get(); + Expr::EvalResult EvResult; + if (!Dim->isValueDependent() && Dim->EvaluateAsInt(EvResult, Context)) { + // OpenMP 5.0, [2.1.4 Array Shaping] + // Each si is an integral type expression that must evaluate to a + // positive integer. + llvm::APSInt Value = EvResult.Val.getInt(); + if (!Value.isStrictlyPositive()) { + Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive) + << toString(Value, /*Radix=*/10, /*Signed=*/true) + << Dim->getSourceRange(); + ErrorFound = true; + continue; + } + } + } + NewDims.push_back(Dim); + } + if (ErrorFound) + return ExprError(); + return OMPArrayShapingExpr::Create(Context, Context.OMPArrayShapingTy, Base, + LParenLoc, RParenLoc, NewDims, Brackets); +} + +ExprResult SemaOpenMP::ActOnOMPIteratorExpr(Scope *S, + SourceLocation IteratorKwLoc, + SourceLocation LLoc, + SourceLocation RLoc, + ArrayRef Data) { + ASTContext &Context = getASTContext(); + SmallVector ID; + bool IsCorrect = true; + for (const OMPIteratorData &D : Data) { + TypeSourceInfo *TInfo = nullptr; + SourceLocation StartLoc; + QualType DeclTy; + if (!D.Type.getAsOpaquePtr()) { + // OpenMP 5.0, 2.1.6 Iterators + // In an iterator-specifier, if the iterator-type is not specified then + // the type of that iterator is of int type. + DeclTy = Context.IntTy; + StartLoc = D.DeclIdentLoc; + } else { + DeclTy = Sema::GetTypeFromParser(D.Type, &TInfo); + StartLoc = TInfo->getTypeLoc().getBeginLoc(); + } + + bool IsDeclTyDependent = DeclTy->isDependentType() || + DeclTy->containsUnexpandedParameterPack() || + DeclTy->isInstantiationDependentType(); + if (!IsDeclTyDependent) { + if (!DeclTy->isIntegralType(Context) && !DeclTy->isAnyPointerType()) { + // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ + // The iterator-type must be an integral or pointer type. + Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) + << DeclTy; + IsCorrect = false; + continue; + } + if (DeclTy.isConstant(Context)) { + // OpenMP 5.0, 2.1.6 Iterators, Restrictions, C/C++ + // The iterator-type must not be const qualified. + Diag(StartLoc, diag::err_omp_iterator_not_integral_or_pointer) + << DeclTy; + IsCorrect = false; + continue; + } + } + + // Iterator declaration. + assert(D.DeclIdent && "Identifier expected."); + // Always try to create iterator declarator to avoid extra error messages + // about unknown declarations use. + auto *VD = + VarDecl::Create(Context, SemaRef.CurContext, StartLoc, D.DeclIdentLoc, + D.DeclIdent, DeclTy, TInfo, SC_None); + VD->setImplicit(); + if (S) { + // Check for conflicting previous declaration. + DeclarationNameInfo NameInfo(VD->getDeclName(), D.DeclIdentLoc); + LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration); + Previous.suppressDiagnostics(); + SemaRef.LookupName(Previous, S); + + SemaRef.FilterLookupForScope(Previous, SemaRef.CurContext, S, + /*ConsiderLinkage=*/false, + /*AllowInlineNamespace=*/false); + if (!Previous.empty()) { + NamedDecl *Old = Previous.getRepresentativeDecl(); + Diag(D.DeclIdentLoc, diag::err_redefinition) << VD->getDeclName(); + Diag(Old->getLocation(), diag::note_previous_definition); + } else { + SemaRef.PushOnScopeChains(VD, S); + } + } else { + SemaRef.CurContext->addDecl(VD); + } + + /// Act on the iterator variable declaration. + ActOnOpenMPIteratorVarDecl(VD); + + Expr *Begin = D.Range.Begin; + if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) { + ExprResult BeginRes = + SemaRef.PerformImplicitConversion(Begin, DeclTy, Sema::AA_Converting); + Begin = BeginRes.get(); + } + Expr *End = D.Range.End; + if (!IsDeclTyDependent && End && !End->isTypeDependent()) { + ExprResult EndRes = + SemaRef.PerformImplicitConversion(End, DeclTy, Sema::AA_Converting); + End = EndRes.get(); + } + Expr *Step = D.Range.Step; + if (!IsDeclTyDependent && Step && !Step->isTypeDependent()) { + if (!Step->getType()->isIntegralType(Context)) { + Diag(Step->getExprLoc(), diag::err_omp_iterator_step_not_integral) + << Step << Step->getSourceRange(); + IsCorrect = false; + continue; + } + std::optional Result = + Step->getIntegerConstantExpr(Context); + // OpenMP 5.0, 2.1.6 Iterators, Restrictions + // If the step expression of a range-specification equals zero, the + // behavior is unspecified. + if (Result && Result->isZero()) { + Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero) + << Step << Step->getSourceRange(); + IsCorrect = false; + continue; + } + } + if (!Begin || !End || !IsCorrect) { + IsCorrect = false; + continue; + } + OMPIteratorExpr::IteratorDefinition &IDElem = ID.emplace_back(); + IDElem.IteratorDecl = VD; + IDElem.AssignmentLoc = D.AssignLoc; + IDElem.Range.Begin = Begin; + IDElem.Range.End = End; + IDElem.Range.Step = Step; + IDElem.ColonLoc = D.ColonLoc; + IDElem.SecondColonLoc = D.SecColonLoc; + } + if (!IsCorrect) { + // Invalidate all created iterator declarations if error is found. + for (const OMPIteratorExpr::IteratorDefinition &D : ID) { + if (Decl *ID = D.IteratorDecl) + ID->setInvalidDecl(); + } + return ExprError(); + } + SmallVector Helpers; + if (!SemaRef.CurContext->isDependentContext()) { + // Build number of ityeration for each iteration range. + // Ni = ((Stepi > 0) ? ((Endi + Stepi -1 - Begini)/Stepi) : + // ((Begini-Stepi-1-Endi) / -Stepi); + for (OMPIteratorExpr::IteratorDefinition &D : ID) { + // (Endi - Begini) + ExprResult Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Sub, + D.Range.End, D.Range.Begin); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + ExprResult St, St1; + if (D.Range.Step) { + St = D.Range.Step; + // (Endi - Begini) + Stepi + Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res.get(), + St.get()); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + // (Endi - Begini) + Stepi - 1 + Res = SemaRef.CreateBuiltinBinOp( + D.AssignmentLoc, BO_Sub, Res.get(), + SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 1).get()); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + // ((Endi - Begini) + Stepi - 1) / Stepi + Res = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res.get(), + St.get()); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + St1 = SemaRef.CreateBuiltinUnaryOp(D.AssignmentLoc, UO_Minus, + D.Range.Step); + // (Begini - Endi) + ExprResult Res1 = SemaRef.CreateBuiltinBinOp( + D.AssignmentLoc, BO_Sub, D.Range.Begin, D.Range.End); + if (!Res1.isUsable()) { + IsCorrect = false; + continue; + } + // (Begini - Endi) - Stepi + Res1 = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, Res1.get(), + St1.get()); + if (!Res1.isUsable()) { + IsCorrect = false; + continue; + } + // (Begini - Endi) - Stepi - 1 + Res1 = SemaRef.CreateBuiltinBinOp( + D.AssignmentLoc, BO_Sub, Res1.get(), + SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 1).get()); + if (!Res1.isUsable()) { + IsCorrect = false; + continue; + } + // ((Begini - Endi) - Stepi - 1) / (-Stepi) + Res1 = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Div, Res1.get(), + St1.get()); + if (!Res1.isUsable()) { + IsCorrect = false; + continue; + } + // Stepi > 0. + ExprResult CmpRes = SemaRef.CreateBuiltinBinOp( + D.AssignmentLoc, BO_GT, D.Range.Step, + SemaRef.ActOnIntegerConstant(D.AssignmentLoc, 0).get()); + if (!CmpRes.isUsable()) { + IsCorrect = false; + continue; + } + Res = SemaRef.ActOnConditionalOp(D.AssignmentLoc, D.AssignmentLoc, + CmpRes.get(), Res.get(), Res1.get()); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + } + Res = SemaRef.ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false); + if (!Res.isUsable()) { + IsCorrect = false; + continue; + } + + // Build counter update. + // Build counter. + auto *CounterVD = VarDecl::Create(Context, SemaRef.CurContext, + D.IteratorDecl->getBeginLoc(), + D.IteratorDecl->getBeginLoc(), nullptr, + Res.get()->getType(), nullptr, SC_None); + CounterVD->setImplicit(); + ExprResult RefRes = + SemaRef.BuildDeclRefExpr(CounterVD, CounterVD->getType(), VK_LValue, + D.IteratorDecl->getBeginLoc()); + // Build counter update. + // I = Begini + counter * Stepi; + ExprResult UpdateRes; + if (D.Range.Step) { + UpdateRes = SemaRef.CreateBuiltinBinOp( + D.AssignmentLoc, BO_Mul, + SemaRef.DefaultLvalueConversion(RefRes.get()).get(), St.get()); + } else { + UpdateRes = SemaRef.DefaultLvalueConversion(RefRes.get()); + } + if (!UpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + UpdateRes = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Add, + D.Range.Begin, UpdateRes.get()); + if (!UpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + ExprResult VDRes = + SemaRef.BuildDeclRefExpr(cast(D.IteratorDecl), + cast(D.IteratorDecl)->getType(), + VK_LValue, D.IteratorDecl->getBeginLoc()); + UpdateRes = SemaRef.CreateBuiltinBinOp(D.AssignmentLoc, BO_Assign, + VDRes.get(), UpdateRes.get()); + if (!UpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + UpdateRes = + SemaRef.ActOnFinishFullExpr(UpdateRes.get(), /*DiscardedValue=*/true); + if (!UpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + ExprResult CounterUpdateRes = SemaRef.CreateBuiltinUnaryOp( + D.AssignmentLoc, UO_PreInc, RefRes.get()); + if (!CounterUpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + CounterUpdateRes = SemaRef.ActOnFinishFullExpr(CounterUpdateRes.get(), + /*DiscardedValue=*/true); + if (!CounterUpdateRes.isUsable()) { + IsCorrect = false; + continue; + } + OMPIteratorHelperData &HD = Helpers.emplace_back(); + HD.CounterVD = CounterVD; + HD.Upper = Res.get(); + HD.Update = UpdateRes.get(); + HD.CounterUpdate = CounterUpdateRes.get(); + } + } else { + Helpers.assign(ID.size(), {}); + } + if (!IsCorrect) { + // Invalidate all created iterator declarations if error is found. + for (const OMPIteratorExpr::IteratorDefinition &D : ID) { + if (Decl *ID = D.IteratorDecl) + ID->setInvalidDecl(); + } + return ExprError(); + } + return OMPIteratorExpr::Create(Context, Context.OMPIteratorTy, IteratorKwLoc, + LLoc, RLoc, ID, Helpers); +} + +SemaOpenMP::SemaOpenMP(Sema &S) + : SemaBase(S), VarDataSharingAttributesStack(nullptr) {} diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index a1d16cc2c633b..b3ffd32402bd0 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -31,11 +31,14 @@ #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Overload.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaSYCL.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateDeduction.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" @@ -1548,10 +1551,10 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New, // Don't allow overloading of destructors. (In theory we could, but it // would be a giant change to clang.) if (!isa(New)) { - Sema::CUDAFunctionTarget NewTarget = SemaRef.IdentifyCUDATarget(New), - OldTarget = SemaRef.IdentifyCUDATarget(Old); - if (NewTarget != Sema::CFT_InvalidTarget) { - assert((OldTarget != Sema::CFT_InvalidTarget) && + CUDAFunctionTarget NewTarget = SemaRef.CUDA().IdentifyTarget(New), + OldTarget = SemaRef.CUDA().IdentifyTarget(Old); + if (NewTarget != CUDAFunctionTarget::InvalidTarget) { + assert((OldTarget != CUDAFunctionTarget::InvalidTarget) && "Unexpected invalid target."); // Allow overloading of functions with same signature and different CUDA @@ -7116,7 +7119,7 @@ void Sema::AddOverloadCandidate( // inferred for the member automatically, based on the bases and fields of // the class. if (!(Caller && Caller->isImplicit()) && - !IsAllowedCUDACall(Caller, Function)) { + !CUDA().IsAllowedCall(Caller, Function)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_target; return; @@ -7634,7 +7637,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, // (CUDA B.1): Check for invalid calls between targets. if (getLangOpts().CUDA) - if (!IsAllowedCUDACall(getCurFunctionDecl(/*AllowLambda=*/true), Method)) { + if (!CUDA().IsAllowedCall(getCurFunctionDecl(/*AllowLambda=*/true), + Method)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_target; return; @@ -10456,7 +10460,7 @@ bool clang::isBetterOverloadCandidate( // If other rules cannot determine which is better, CUDA preference will be // used again to determine which is better. // - // TODO: Currently IdentifyCUDAPreference does not return correct values + // TODO: Currently IdentifyPreference does not return correct values // for functions called in global variable initializers due to missing // correct context about device/host. Therefore we can only enforce this // rule when there is a caller. We should enforce this rule for functions @@ -10468,14 +10472,14 @@ bool clang::isBetterOverloadCandidate( if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function && S.getLangOpts().GPUExcludeWrongSideOverloads) { if (FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true)) { - bool IsCallerImplicitHD = Sema::isCUDAImplicitHostDeviceFunction(Caller); + bool IsCallerImplicitHD = SemaCUDA::isImplicitHostDeviceFunction(Caller); bool IsCand1ImplicitHD = - Sema::isCUDAImplicitHostDeviceFunction(Cand1.Function); + SemaCUDA::isImplicitHostDeviceFunction(Cand1.Function); bool IsCand2ImplicitHD = - Sema::isCUDAImplicitHostDeviceFunction(Cand2.Function); - auto P1 = S.IdentifyCUDAPreference(Caller, Cand1.Function); - auto P2 = S.IdentifyCUDAPreference(Caller, Cand2.Function); - assert(P1 != Sema::CFP_Never && P2 != Sema::CFP_Never); + SemaCUDA::isImplicitHostDeviceFunction(Cand2.Function); + auto P1 = S.CUDA().IdentifyPreference(Caller, Cand1.Function); + auto P2 = S.CUDA().IdentifyPreference(Caller, Cand2.Function); + assert(P1 != SemaCUDA::CFP_Never && P2 != SemaCUDA::CFP_Never); // The implicit HD function may be a function in a system header which // is forced by pragma. In device compilation, if we prefer HD candidates // over wrong-sided candidates, overloading resolution may change, which @@ -10489,8 +10493,8 @@ bool clang::isBetterOverloadCandidate( auto EmitThreshold = (S.getLangOpts().CUDAIsDevice && IsCallerImplicitHD && (IsCand1ImplicitHD || IsCand2ImplicitHD)) - ? Sema::CFP_Never - : Sema::CFP_WrongSide; + ? SemaCUDA::CFP_Never + : SemaCUDA::CFP_WrongSide; auto Cand1Emittable = P1 > EmitThreshold; auto Cand2Emittable = P2 > EmitThreshold; if (Cand1Emittable && !Cand2Emittable) @@ -10774,8 +10778,8 @@ bool clang::isBetterOverloadCandidate( // to determine which is better. if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) { FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true); - return S.IdentifyCUDAPreference(Caller, Cand1.Function) > - S.IdentifyCUDAPreference(Caller, Cand2.Function); + return S.CUDA().IdentifyPreference(Caller, Cand1.Function) > + S.CUDA().IdentifyPreference(Caller, Cand2.Function); } // General member function overloading is handled above, so this only handles @@ -10907,15 +10911,15 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc, llvm::any_of(Candidates, [&](OverloadCandidate *Cand) { // Check viable function only. return Cand->Viable && Cand->Function && - S.IdentifyCUDAPreference(Caller, Cand->Function) == - Sema::CFP_SameSide; + S.CUDA().IdentifyPreference(Caller, Cand->Function) == + SemaCUDA::CFP_SameSide; }); if (ContainsSameSideCandidate) { auto IsWrongSideCandidate = [&](OverloadCandidate *Cand) { // Check viable function only to avoid unnecessary data copying/moving. return Cand->Viable && Cand->Function && - S.IdentifyCUDAPreference(Caller, Cand->Function) == - Sema::CFP_WrongSide; + S.CUDA().IdentifyPreference(Caller, Cand->Function) == + SemaCUDA::CFP_WrongSide; }; llvm::erase_if(Candidates, IsWrongSideCandidate); } @@ -11110,9 +11114,9 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD, if (Complain && S.getLangOpts().SYCLIsDevice && S.getLangOpts().SYCLAllowFuncPtr) { if (!FD->hasAttr()) { - S.SYCLDiagIfDeviceCode(Loc, - diag::err_sycl_taking_address_of_wrong_function, - Sema::DeviceDiagnosticReason::Sycl); + S.SYCL().DiagIfDeviceCode(Loc, + diag::err_sycl_taking_address_of_wrong_function, + Sema::DeviceDiagnosticReason::Sycl); } } @@ -11963,8 +11967,8 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) { FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true); FunctionDecl *Callee = Cand->Function; - Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller), - CalleeTarget = S.IdentifyCUDATarget(Callee); + CUDAFunctionTarget CallerTarget = S.CUDA().IdentifyTarget(Caller), + CalleeTarget = S.CUDA().IdentifyTarget(Callee); std::string FnDesc; std::pair FnKindPair = @@ -11974,32 +11978,32 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) { S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target) << (unsigned)FnKindPair.first << (unsigned)ocs_non_template << FnDesc /* Ignored */ - << CalleeTarget << CallerTarget; + << llvm::to_underlying(CalleeTarget) << llvm::to_underlying(CallerTarget); // This could be an implicit constructor for which we could not infer the // target due to a collsion. Diagnose that case. CXXMethodDecl *Meth = dyn_cast(Callee); if (Meth != nullptr && Meth->isImplicit()) { CXXRecordDecl *ParentClass = Meth->getParent(); - Sema::CXXSpecialMember CSM; + CXXSpecialMemberKind CSM; switch (FnKindPair.first) { default: return; case oc_implicit_default_constructor: - CSM = Sema::CXXDefaultConstructor; + CSM = CXXSpecialMemberKind::DefaultConstructor; break; case oc_implicit_copy_constructor: - CSM = Sema::CXXCopyConstructor; + CSM = CXXSpecialMemberKind::CopyConstructor; break; case oc_implicit_move_constructor: - CSM = Sema::CXXMoveConstructor; + CSM = CXXSpecialMemberKind::MoveConstructor; break; case oc_implicit_copy_assignment: - CSM = Sema::CXXCopyAssignment; + CSM = CXXSpecialMemberKind::CopyAssignment; break; case oc_implicit_move_assignment: - CSM = Sema::CXXMoveAssignment; + CSM = CXXSpecialMemberKind::MoveAssignment; break; }; @@ -12011,9 +12015,9 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) { } } - S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth, - /* ConstRHS */ ConstRHS, - /* Diagnose */ true); + S.CUDA().inferTargetForImplicitSpecialMember(ParentClass, CSM, Meth, + /* ConstRHS */ ConstRHS, + /* Diagnose */ true); } } @@ -13085,7 +13089,7 @@ class AddressOfFunctionResolver { if (S.getLangOpts().CUDA) { FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true); if (!(Caller && Caller->isImplicit()) && - !S.IsAllowedCUDACall(Caller, FunDecl)) + !S.CUDA().IsAllowedCall(Caller, FunDecl)) return false; } if (FunDecl->isMultiVersion()) { @@ -13205,8 +13209,8 @@ class AddressOfFunctionResolver { } void EliminateSuboptimalCudaMatches() { - S.EraseUnwantedCUDAMatches(S.getCurFunctionDecl(/*AllowLambda=*/true), - Matches); + S.CUDA().EraseUnwantedMatches(S.getCurFunctionDecl(/*AllowLambda=*/true), + Matches); } public: @@ -13360,8 +13364,8 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) { // Return positive for better, negative for worse, 0 for equal preference. auto CheckCUDAPreference = [&](FunctionDecl *FD1, FunctionDecl *FD2) { FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); - return static_cast(IdentifyCUDAPreference(Caller, FD1)) - - static_cast(IdentifyCUDAPreference(Caller, FD2)); + return static_cast(CUDA().IdentifyPreference(Caller, FD1)) - + static_cast(CUDA().IdentifyPreference(Caller, FD2)); }; auto CheckMoreConstrained = [&](FunctionDecl *FD1, @@ -14194,15 +14198,13 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, break; case OR_Deleted: { - CandidateSet->NoteCandidates( - PartialDiagnosticAt(Fn->getBeginLoc(), - SemaRef.PDiag(diag::err_ovl_deleted_call) - << ULE->getName() << Fn->getSourceRange()), - SemaRef, OCD_AllCandidates, Args); + FunctionDecl *FDecl = (*Best)->Function; + SemaRef.DiagnoseUseOfDeletedFunction(Fn->getBeginLoc(), + Fn->getSourceRange(), ULE->getName(), + *CandidateSet, FDecl, Args); // We emitted an error for the unavailable/deleted function call but keep // the call in the AST. - FunctionDecl *FDecl = (*Best)->Function; ExprResult Res = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl); if (Res.isInvalid()) @@ -14420,9 +14422,16 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, ArrayRef ArgsArray(Args, NumArgs); if (Input->isTypeDependent()) { + ExprValueKind VK = ExprValueKind::VK_PRValue; + // [C++26][expr.unary.op][expr.pre.incr] + // The * operator yields an lvalue of type + // The pre/post increment operators yied an lvalue. + if (Opc == UO_PreDec || Opc == UO_PreInc || Opc == UO_Deref) + VK = VK_LValue; + if (Fns.empty()) - return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy, - VK_PRValue, OK_Ordinary, OpLoc, false, + return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy, VK, + OK_Ordinary, OpLoc, false, CurFPFeatureOverrides()); CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators @@ -14431,7 +14440,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, if (Fn.isInvalid()) return ExprError(); return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), ArgsArray, - Context.DependentTy, VK_PRValue, OpLoc, + Context.DependentTy, VK, OpLoc, CurFPFeatureOverrides()); } @@ -14524,7 +14533,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, // operator node. ExprResult InputRes = PerformImplicitConversion( Input, Best->BuiltinParamTypes[0], Best->Conversions[0], AA_Passing, - CCK_ForBuiltinOverloadedOp); + CheckedConversionKind::ForBuiltinOverloadedOp); if (InputRes.isInvalid()) return ExprError(); Input = InputRes.get(); @@ -14554,20 +14563,24 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, UnaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); - case OR_Deleted: + case OR_Deleted: { // CreateOverloadedUnaryOp fills the first element of ArgsArray with the // object whose method was called. Later in NoteCandidates size of ArgsArray // is passed further and it eventually ends up compared to number of // function candidate parameters which never includes the object parameter, // so slice ArgsArray to make sure apples are compared to apples. + StringLiteral *Msg = Best->Function->getDeletedMessage(); CandidateSet.NoteCandidates( PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper) << UnaryOperator::getOpcodeStr(Opc) + << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) << Input->getSourceRange()), *this, OCD_AllCandidates, ArgsArray.drop_front(), UnaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); } + } // Either we found no viable overloaded operator or we matched a // built-in operator. In either case, fall through to trying to @@ -15003,14 +15016,14 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc, // operator node. ExprResult ArgsRes0 = PerformImplicitConversion( Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0], - AA_Passing, CCK_ForBuiltinOverloadedOp); + AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp); if (ArgsRes0.isInvalid()) return ExprError(); Args[0] = ArgsRes0.get(); ExprResult ArgsRes1 = PerformImplicitConversion( Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1], - AA_Passing, CCK_ForBuiltinOverloadedOp); + AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp); if (ArgsRes1.isInvalid()) return ExprError(); Args[1] = ArgsRes1.get(); @@ -15084,13 +15097,14 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc, OpLoc); return ExprError(); - case OR_Deleted: + case OR_Deleted: { if (isImplicitlyDeleted(Best->Function)) { FunctionDecl *DeletedFD = Best->Function; DefaultedFunctionKind DFK = getDefaultedFunctionKind(DeletedFD); if (DFK.isSpecialMember()) { Diag(OpLoc, diag::err_ovl_deleted_special_oper) - << Args[0]->getType() << DFK.asSpecialMember(); + << Args[0]->getType() + << llvm::to_underlying(DFK.asSpecialMember()); } else { assert(DFK.isComparison()); Diag(OpLoc, diag::err_ovl_deleted_comparison) @@ -15102,16 +15116,20 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc, NoteDeletedFunction(DeletedFD); return ExprError(); } + + StringLiteral *Msg = Best->Function->getDeletedMessage(); CandidateSet.NoteCandidates( PartialDiagnosticAt( - OpLoc, PDiag(diag::err_ovl_deleted_oper) - << getOperatorSpelling(Best->Function->getDeclName() - .getCXXOverloadedOperator()) - << Args[0]->getSourceRange() - << Args[1]->getSourceRange()), + OpLoc, + PDiag(diag::err_ovl_deleted_oper) + << getOperatorSpelling(Best->Function->getDeclName() + .getCXXOverloadedOperator()) + << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef()) + << Args[0]->getSourceRange() << Args[1]->getSourceRange()), *this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); + } } // We matched a built-in operator; build it. @@ -15376,14 +15394,14 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, // operator node. ExprResult ArgsRes0 = PerformImplicitConversion( Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0], - AA_Passing, CCK_ForBuiltinOverloadedOp); + AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp); if (ArgsRes0.isInvalid()) return ExprError(); Args[0] = ArgsRes0.get(); ExprResult ArgsRes1 = PerformImplicitConversion( Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1], - AA_Passing, CCK_ForBuiltinOverloadedOp); + AA_Passing, CheckedConversionKind::ForBuiltinOverloadedOp); if (ArgsRes1.isInvalid()) return ExprError(); Args[1] = ArgsRes1.get(); @@ -15423,14 +15441,18 @@ ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, } return ExprError(); - case OR_Deleted: + case OR_Deleted: { + StringLiteral *Msg = Best->Function->getDeletedMessage(); CandidateSet.NoteCandidates( - PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_deleted_oper) - << "[]" << Args[0]->getSourceRange() - << Range), + PartialDiagnosticAt(LLoc, + PDiag(diag::err_ovl_deleted_oper) + << "[]" << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) + << Args[0]->getSourceRange() << Range), *this, OCD_AllCandidates, Args, "[]", LLoc); return ExprError(); } + } // We matched a built-in operator; build it. return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc); @@ -15644,11 +15666,9 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, *this, OCD_AmbiguousCandidates, Args); break; case OR_Deleted: - CandidateSet.NoteCandidates( - PartialDiagnosticAt(UnresExpr->getMemberLoc(), - PDiag(diag::err_ovl_deleted_member_call) - << DeclName << MemExprE->getSourceRange()), - *this, OCD_AllCandidates, Args); + DiagnoseUseOfDeletedFunction( + UnresExpr->getMemberLoc(), MemExprE->getSourceRange(), DeclName, + CandidateSet, Best->Function, Args, /*IsMember=*/true); break; } // Overload resolution fails, try to recover. @@ -15912,15 +15932,21 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, *this, OCD_AmbiguousCandidates, Args); break; - case OR_Deleted: + case OR_Deleted: { + // FIXME: Is this diagnostic here really necessary? It seems that + // 1. we don't have any tests for this diagnostic, and + // 2. we already issue err_deleted_function_use for this later on anyway. + StringLiteral *Msg = Best->Function->getDeletedMessage(); CandidateSet.NoteCandidates( PartialDiagnosticAt(Object.get()->getBeginLoc(), PDiag(diag::err_ovl_deleted_object_call) - << Object.get()->getType() + << Object.get()->getType() << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) << Object.get()->getSourceRange()), *this, OCD_AllCandidates, Args); break; } + } if (Best == CandidateSet.end()) return true; @@ -16119,13 +16145,17 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, *this, OCD_AmbiguousCandidates, Base); return ExprError(); - case OR_Deleted: + case OR_Deleted: { + StringLiteral *Msg = Best->Function->getDeletedMessage(); CandidateSet.NoteCandidates( PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper) - << "->" << Base->getSourceRange()), + << "->" << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) + << Base->getSourceRange()), *this, OCD_AllCandidates, Base); return ExprError(); } + } CheckMemberOperatorAccess(OpLoc, Base, nullptr, Best->FoundDecl); @@ -16539,3 +16569,17 @@ bool clang::shouldEnforceArgLimit(bool PartialOverloading, return false; return true; } + +void Sema::DiagnoseUseOfDeletedFunction(SourceLocation Loc, SourceRange Range, + DeclarationName Name, + OverloadCandidateSet &CandidateSet, + FunctionDecl *Fn, MultiExprArg Args, + bool IsMember) { + StringLiteral *Msg = Fn->getDeletedMessage(); + CandidateSet.NoteCandidates( + PartialDiagnosticAt(Loc, PDiag(diag::err_ovl_deleted_call) + << IsMember << Name << (Msg != nullptr) + << (Msg ? Msg->getString() : StringRef()) + << Range), + *this, OCD_AllCandidates, Args); +} diff --git a/clang/lib/Sema/SemaPseudoObject.cpp b/clang/lib/Sema/SemaPseudoObject.cpp index 528c261c4a297..c6a0a182d3583 100644 --- a/clang/lib/Sema/SemaPseudoObject.cpp +++ b/clang/lib/Sema/SemaPseudoObject.cpp @@ -613,9 +613,9 @@ bool ObjCPropertyOpBuilder::findGetter() { // Must build the getter selector the hard way. ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter(); assert(setter && "both setter and getter are null - cannot happen"); - IdentifierInfo *setterName = - setter->getSelector().getIdentifierInfoForSlot(0); - IdentifierInfo *getterName = + const IdentifierInfo *setterName = + setter->getSelector().getIdentifierInfoForSlot(0); + const IdentifierInfo *getterName = &S.Context.Idents.get(setterName->getName().substr(3)); GetterSelector = S.PP.getSelectorTable().getNullarySelector(getterName); @@ -640,9 +640,9 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) { SetterSelector = setter->getSelector(); return true; } else { - IdentifierInfo *getterName = - RefExpr->getImplicitPropertyGetter()->getSelector() - .getIdentifierInfoForSlot(0); + const IdentifierInfo *getterName = RefExpr->getImplicitPropertyGetter() + ->getSelector() + .getIdentifierInfoForSlot(0); SetterSelector = SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(), S.PP.getSelectorTable(), @@ -667,7 +667,8 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) { front = isLowercase(front) ? toUppercase(front) : toLowercase(front); SmallString<100> PropertyName = thisPropertyName; PropertyName[0] = front; - IdentifierInfo *AltMember = &S.PP.getIdentifierTable().get(PropertyName); + const IdentifierInfo *AltMember = + &S.PP.getIdentifierTable().get(PropertyName); if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration( AltMember, prop->getQueryKind())) if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) { @@ -1126,9 +1127,8 @@ static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT, return; // dictionary subscripting. // - (id)objectForKeyedSubscript:(id)key; - IdentifierInfo *KeyIdents[] = { - &S.Context.Idents.get("objectForKeyedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &S.Context.Idents.get("objectForKeyedSubscript")}; Selector GetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); ObjCMethodDecl *Getter = S.LookupMethodInObjectType(GetterSelector, ContainerT, true /*instance*/); @@ -1136,7 +1136,7 @@ static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT, return; QualType T = Getter->parameters()[0]->getType(); S.CheckObjCConversion(Key->getSourceRange(), T, Key, - Sema::CCK_ImplicitConversion); + CheckedConversionKind::Implicit); } bool ObjCSubscriptOpBuilder::findAtIndexGetter() { @@ -1169,16 +1169,14 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() { if (!arrayRef) { // dictionary subscripting. // - (id)objectForKeyedSubscript:(id)key; - IdentifierInfo *KeyIdents[] = { - &S.Context.Idents.get("objectForKeyedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &S.Context.Idents.get("objectForKeyedSubscript")}; AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); } else { // - (id)objectAtIndexedSubscript:(size_t)index; - IdentifierInfo *KeyIdents[] = { - &S.Context.Idents.get("objectAtIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &S.Context.Idents.get("objectAtIndexedSubscript")}; AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); } @@ -1274,18 +1272,16 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() { if (!arrayRef) { // dictionary subscripting. // - (void)setObject:(id)object forKeyedSubscript:(id)key; - IdentifierInfo *KeyIdents[] = { - &S.Context.Idents.get("setObject"), - &S.Context.Idents.get("forKeyedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &S.Context.Idents.get("setObject"), + &S.Context.Idents.get("forKeyedSubscript")}; AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents); } else { // - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index; - IdentifierInfo *KeyIdents[] = { - &S.Context.Idents.get("setObject"), - &S.Context.Idents.get("atIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &S.Context.Idents.get("setObject"), + &S.Context.Idents.get("atIndexedSubscript")}; AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents); } AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType, @@ -1474,7 +1470,7 @@ ExprResult MSPropertyOpBuilder::buildGet() { } UnqualifiedId GetterName; - IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId(); + const IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId(); GetterName.setIdentifier(II, RefExpr->getMemberLoc()); CXXScopeSpec SS; SS.Adopt(RefExpr->getQualifierLoc()); @@ -1503,7 +1499,7 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl, } UnqualifiedId SetterName; - IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId(); + const IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId(); SetterName.setIdentifier(II, RefExpr->getMemberLoc()); CXXScopeSpec SS; SS.Adopt(RefExpr->getQualifierLoc()); diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp index b1fb7f25066ab..ec137637236d3 100644 --- a/clang/lib/Sema/SemaSYCL.cpp +++ b/clang/lib/Sema/SemaSYCL.cpp @@ -8,6 +8,7 @@ // This implements Semantic Analysis for SYCL constructs. //===----------------------------------------------------------------------===// +#include "clang/Sema/SemaSYCL.h" #include "TreeTransform.h" #include "clang/AST/AST.h" #include "clang/AST/Mangle.h" @@ -69,7 +70,7 @@ static constexpr llvm::StringLiteral LibstdcxxFailedAssertion = "__failed_assertion"; constexpr unsigned MaxKernelArgsSize = 2048; -bool Sema::isSyclType(QualType Ty, SYCLTypeAttr::SYCLType TypeName) { +bool SemaSYCL::isSyclType(QualType Ty, SYCLTypeAttr::SYCLType TypeName) { const auto *RD = Ty->getAsCXXRecordDecl(); if (!RD) return false; @@ -87,8 +88,8 @@ bool Sema::isSyclType(QualType Ty, SYCLTypeAttr::SYCLType TypeName) { } static bool isSyclAccessorType(QualType Ty) { - return Sema::isSyclType(Ty, SYCLTypeAttr::accessor) || - Sema::isSyclType(Ty, SYCLTypeAttr::local_accessor); + return SemaSYCL::isSyclType(Ty, SYCLTypeAttr::accessor) || + SemaSYCL::isSyclType(Ty, SYCLTypeAttr::local_accessor); } // FIXME: Accessor property lists should be modified to use compile-time @@ -105,25 +106,25 @@ static bool isAccessorPropertyType(QualType Ty, return false; } -static bool isSyclSpecialType(QualType Ty, Sema &S) { +static bool isSyclSpecialType(QualType Ty, SemaSYCL &S) { return S.isTypeDecoratedWithDeclAttribute(Ty); } -ExprResult Sema::ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT) { +ExprResult SemaSYCL::ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT) { TypeSourceInfo *TInfo = nullptr; - QualType QT = GetTypeFromParser(PT, &TInfo); + QualType QT = Sema::GetTypeFromParser(PT, &TInfo); assert(TInfo && "couldn't get type info from a type from the parser?"); SourceLocation TypeLoc = TInfo->getTypeLoc().getBeginLoc(); return BuildSYCLBuiltinNumFieldsExpr(TypeLoc, QT); } -ExprResult Sema::BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc, - QualType SourceTy) { +ExprResult SemaSYCL::BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc, + QualType SourceTy) { if (!SourceTy->isDependentType()) { - if (RequireCompleteType(Loc, SourceTy, - diag::err_sycl_type_trait_requires_complete_type, - /*__builtin_num_fields*/ 0)) + if (SemaRef.RequireCompleteType( + Loc, SourceTy, diag::err_sycl_type_trait_requires_complete_type, + /*__builtin_num_fields*/ 0)) return ExprError(); if (!SourceTy->isRecordType()) { @@ -132,24 +133,25 @@ ExprResult Sema::BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc, return ExprError(); } } - return new (Context) - SYCLBuiltinNumFieldsExpr(Loc, SourceTy, Context.getSizeType()); + return new (getASTContext()) + SYCLBuiltinNumFieldsExpr(Loc, SourceTy, getASTContext().getSizeType()); } -ExprResult Sema::ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx) { +ExprResult SemaSYCL::ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx) { TypeSourceInfo *TInfo = nullptr; - QualType QT = GetTypeFromParser(PT, &TInfo); + QualType QT = Sema::GetTypeFromParser(PT, &TInfo); assert(TInfo && "couldn't get type info from a type from the parser?"); SourceLocation TypeLoc = TInfo->getTypeLoc().getBeginLoc(); return BuildSYCLBuiltinFieldTypeExpr(TypeLoc, QT, Idx); } -ExprResult Sema::BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, - QualType SourceTy, Expr *Idx) { +ExprResult SemaSYCL::BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, + QualType SourceTy, + Expr *Idx) { // If the expression appears in an evaluated context, we want to give an // error so that users don't attempt to use the value of this expression. - if (!isUnevaluatedContext()) { + if (!SemaRef.isUnevaluatedContext()) { Diag(Loc, diag::err_sycl_builtin_type_trait_evaluated) << /*__builtin_field_type*/ 0; return ExprError(); @@ -162,9 +164,9 @@ ExprResult Sema::BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, QualType FieldTy = SourceTy; ExprValueKind ValueKind = VK_PRValue; if (!SourceTy->isDependentType()) { - if (RequireCompleteType(Loc, SourceTy, - diag::err_sycl_type_trait_requires_complete_type, - /*__builtin_field_type*/ 1)) + if (SemaRef.RequireCompleteType( + Loc, SourceTy, diag::err_sycl_type_trait_requires_complete_type, + /*__builtin_field_type*/ 1)) return ExprError(); if (!SourceTy->isRecordType()) { @@ -174,7 +176,8 @@ ExprResult Sema::BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, } if (!Idx->isValueDependent()) { - std::optional IdxVal = Idx->getIntegerConstantExpr(Context); + std::optional IdxVal = + Idx->getIntegerConstantExpr(getASTContext()); if (IdxVal) { RecordDecl *RD = SourceTy->getAsRecordDecl(); assert(RD && "Record type but no record decl?"); @@ -209,25 +212,25 @@ ExprResult Sema::BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc, } } } - return new (Context) + return new (getASTContext()) SYCLBuiltinFieldTypeExpr(Loc, SourceTy, Idx, FieldTy, ValueKind); } -ExprResult Sema::ActOnSYCLBuiltinNumBasesExpr(ParsedType PT) { +ExprResult SemaSYCL::ActOnSYCLBuiltinNumBasesExpr(ParsedType PT) { TypeSourceInfo *TInfo = nullptr; - QualType QT = GetTypeFromParser(PT, &TInfo); + QualType QT = Sema::GetTypeFromParser(PT, &TInfo); assert(TInfo && "couldn't get type info from a type from the parser?"); SourceLocation TypeLoc = TInfo->getTypeLoc().getBeginLoc(); return BuildSYCLBuiltinNumBasesExpr(TypeLoc, QT); } -ExprResult Sema::BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc, - QualType SourceTy) { +ExprResult SemaSYCL::BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc, + QualType SourceTy) { if (!SourceTy->isDependentType()) { - if (RequireCompleteType(Loc, SourceTy, - diag::err_sycl_type_trait_requires_complete_type, - /*__builtin_num_bases*/ 2)) + if (SemaRef.RequireCompleteType( + Loc, SourceTy, diag::err_sycl_type_trait_requires_complete_type, + /*__builtin_num_bases*/ 2)) return ExprError(); if (!SourceTy->isRecordType()) { @@ -236,24 +239,25 @@ ExprResult Sema::BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc, return ExprError(); } } - return new (Context) - SYCLBuiltinNumBasesExpr(Loc, SourceTy, Context.getSizeType()); + return new (getASTContext()) + SYCLBuiltinNumBasesExpr(Loc, SourceTy, getASTContext().getSizeType()); } -ExprResult Sema::ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx) { +ExprResult SemaSYCL::ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx) { TypeSourceInfo *TInfo = nullptr; - QualType QT = GetTypeFromParser(PT, &TInfo); + QualType QT = SemaRef.GetTypeFromParser(PT, &TInfo); assert(TInfo && "couldn't get type info from a type from the parser?"); SourceLocation TypeLoc = TInfo->getTypeLoc().getBeginLoc(); return BuildSYCLBuiltinBaseTypeExpr(TypeLoc, QT, Idx); } -ExprResult Sema::BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, - QualType SourceTy, Expr *Idx) { +ExprResult SemaSYCL::BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, + QualType SourceTy, + Expr *Idx) { // If the expression appears in an evaluated context, we want to give an // error so that users don't attempt to use the value of this expression. - if (!isUnevaluatedContext()) { + if (!SemaRef.isUnevaluatedContext()) { Diag(Loc, diag::err_sycl_builtin_type_trait_evaluated) << /*__builtin_base_type*/ 1; return ExprError(); @@ -265,9 +269,9 @@ ExprResult Sema::BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, // later to the real type. QualType BaseTy = SourceTy; if (!SourceTy->isDependentType()) { - if (RequireCompleteType(Loc, SourceTy, - diag::err_sycl_type_trait_requires_complete_type, - /*__builtin_base_type*/ 3)) + if (SemaRef.RequireCompleteType( + Loc, SourceTy, diag::err_sycl_type_trait_requires_complete_type, + /*__builtin_base_type*/ 3)) return ExprError(); if (!SourceTy->isRecordType()) { @@ -277,7 +281,8 @@ ExprResult Sema::BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, } if (!Idx->isValueDependent()) { - std::optional IdxVal = Idx->getIntegerConstantExpr(Context); + std::optional IdxVal = + Idx->getIntegerConstantExpr(getASTContext()); if (IdxVal) { CXXRecordDecl *RD = SourceTy->getAsCXXRecordDecl(); assert(RD && "Record type but no record decl?"); @@ -303,7 +308,8 @@ ExprResult Sema::BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, } } } - return new (Context) SYCLBuiltinBaseTypeExpr(Loc, SourceTy, Idx, BaseTy); + return new (getASTContext()) + SYCLBuiltinBaseTypeExpr(Loc, SourceTy, Idx, BaseTy); } // This information is from Section 4.13 of the SYCL spec @@ -366,7 +372,7 @@ static bool IsSyclMathFunc(unsigned BuiltinID) { return true; } -bool Sema::isDeclAllowedInSYCLDeviceCode(const Decl *D) { +bool SemaSYCL::isDeclAllowedInSYCLDeviceCode(const Decl *D) { if (const FunctionDecl *FD = dyn_cast(D)) { const IdentifierInfo *II = FD->getIdentifier(); @@ -378,7 +384,7 @@ bool Sema::isDeclAllowedInSYCLDeviceCode(const Decl *D) { return true; // Allow to use `::printf` only for CUDA. - if (Context.getTargetInfo().getTriple().isNVPTX()) { + if (getASTContext().getTargetInfo().getTriple().isNVPTX()) { if (FD->getBuiltinID() == Builtin::BIprintf) return true; } @@ -392,13 +398,16 @@ bool Sema::isDeclAllowedInSYCLDeviceCode(const Decl *D) { return false; } -static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) { - if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty)) +SemaSYCL::SemaSYCL(Sema &S) + : SemaBase(S), SyclIntHeader(nullptr), SyclIntFooter(nullptr) {} + +static bool isZeroSizedArray(SemaSYCL &S, QualType Ty) { + if (const auto *CAT = S.getASTContext().getAsConstantArrayType(Ty)) return CAT->isZeroSize(); return false; } -static void checkSYCLType(Sema &S, QualType Ty, SourceRange Loc, +static void checkSYCLType(SemaSYCL &S, QualType Ty, SourceRange Loc, llvm::DenseSet Visited, SourceRange UsedAtLoc = SourceRange()) { // Not all variable types are supported inside SYCL kernels, @@ -417,14 +426,14 @@ static void checkSYCLType(Sema &S, QualType Ty, SourceRange Loc, // zero length arrays if (isZeroSizedArray(S, Ty)) { - S.SYCLDiagIfDeviceCode(Loc.getBegin(), diag::err_typecheck_zero_array_size) + S.DiagIfDeviceCode(Loc.getBegin(), diag::err_typecheck_zero_array_size) << 1; Emitting = true; } // variable length arrays if (Ty->isVariableArrayType()) { - S.SYCLDiagIfDeviceCode(Loc.getBegin(), diag::err_vla_unsupported) << 0; + S.DiagIfDeviceCode(Loc.getBegin(), diag::err_vla_unsupported) << 0; Emitting = true; } @@ -438,14 +447,14 @@ static void checkSYCLType(Sema &S, QualType Ty, SourceRange Loc, Ty->isSpecificBuiltinType(BuiltinType::LongDouble) || Ty->isSpecificBuiltinType(BuiltinType::BFloat16) || (Ty->isSpecificBuiltinType(BuiltinType::Float128) && - !S.Context.getTargetInfo().hasFloat128Type())) { - S.SYCLDiagIfDeviceCode(Loc.getBegin(), diag::err_type_unsupported) + !S.getASTContext().getTargetInfo().hasFloat128Type())) { + S.DiagIfDeviceCode(Loc.getBegin(), diag::err_type_unsupported) << Ty.getUnqualifiedType().getCanonicalType(); Emitting = true; } if (Emitting && UsedAtLoc.isValid()) - S.SYCLDiagIfDeviceCode(UsedAtLoc.getBegin(), diag::note_used_here); + S.DiagIfDeviceCode(UsedAtLoc.getBegin(), diag::note_used_here); //--- now recurse --- // Pointers complicate recursion. Add this type to Visited. @@ -466,7 +475,7 @@ static void checkSYCLType(Sema &S, QualType Ty, SourceRange Loc, } } -void Sema::checkSYCLDeviceVarDecl(VarDecl *Var) { +void SemaSYCL::checkSYCLDeviceVarDecl(VarDecl *Var) { assert(getLangOpts().SYCLIsDevice && "Should only be called during SYCL compilation"); QualType Ty = Var->getType(); @@ -506,7 +515,7 @@ static bool isSYCLUndefinedAllowed(const FunctionDecl *Callee, // Helper function to report conflicting function attributes. // F - the function, A1 - function attribute, A2 - the attribute it conflicts // with. -static void reportConflictingAttrs(Sema &S, FunctionDecl *F, const Attr *A1, +static void reportConflictingAttrs(SemaSYCL &S, FunctionDecl *F, const Attr *A1, const Attr *A2) { S.Diag(F->getLocation(), diag::err_conflicting_sycl_kernel_attributes); S.Diag(A1->getLocation(), diag::note_conflicting_attribute); @@ -520,7 +529,7 @@ static int64_t getIntExprValue(const Expr *E, ASTContext &Ctx) { } // Collect function attributes related to SYCL. -static void collectSYCLAttributes(Sema &S, FunctionDecl *FD, +static void collectSYCLAttributes(SemaSYCL &S, FunctionDecl *FD, llvm::SmallVectorImpl &Attrs, bool DirectlyCalled) { if (!FD->hasAttrs()) @@ -557,14 +566,14 @@ static void collectSYCLAttributes(Sema &S, FunctionDecl *FD, } class DiagDeviceFunction : public RecursiveASTVisitor { - Sema &SemaRef; + SemaSYCL &SemaSYCLRef; const llvm::SmallPtrSetImpl &RecursiveFuncs; public: DiagDeviceFunction( - Sema &S, + SemaSYCL &S, const llvm::SmallPtrSetImpl &RecursiveFuncs) - : RecursiveASTVisitor(), SemaRef(S), RecursiveFuncs(RecursiveFuncs) {} + : RecursiveASTVisitor(), SemaSYCLRef(S), RecursiveFuncs(RecursiveFuncs) {} void CheckBody(Stmt *ToBeDiagnosed) { TraverseStmt(ToBeDiagnosed); } @@ -578,18 +587,18 @@ class DiagDeviceFunction : public RecursiveASTVisitor { // all functions used by kernel have already been parsed and have // definitions. if (RecursiveFuncs.count(Callee)) { - SemaRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) - << Sema::KernelCallRecursiveFunction; - SemaRef.Diag(Callee->getSourceRange().getBegin(), - diag::note_sycl_recursive_function_declared_here) - << Sema::KernelCallRecursiveFunction; + SemaSYCLRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelCallRecursiveFunction; + SemaSYCLRef.Diag(Callee->getSourceRange().getBegin(), + diag::note_sycl_recursive_function_declared_here) + << SemaSYCL::KernelCallRecursiveFunction; } if (const CXXMethodDecl *Method = dyn_cast(Callee)) if (Method->isVirtual() && - !SemaRef.getLangOpts().SYCLAllowVirtualFunctions) - SemaRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) - << Sema::KernelCallVirtualFunction; + !SemaSYCLRef.getLangOpts().SYCLAllowVirtualFunctions) + SemaSYCLRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelCallVirtualFunction; if (auto const *FD = dyn_cast(Callee)) { // FIXME: We need check all target specified attributes for error if @@ -598,41 +607,44 @@ class DiagDeviceFunction : public RecursiveASTVisitor { // currently. Erich is currently working on that in LLVM, once that is // committed we need to change this". if (FD->hasAttr()) { - SemaRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) - << Sema::KernelCallDllimportFunction; - SemaRef.Diag(FD->getLocation(), diag::note_callee_decl) << FD; + SemaSYCLRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelCallDllimportFunction; + SemaSYCLRef.Diag(FD->getLocation(), diag::note_callee_decl) << FD; } } // Specifically check if the math library function corresponding to this // builtin is supported for SYCL unsigned BuiltinID = Callee->getBuiltinID(); if (BuiltinID && !IsSyclMathFunc(BuiltinID)) { - StringRef Name = SemaRef.Context.BuiltinInfo.getName(BuiltinID); - SemaRef.Diag(e->getExprLoc(), diag::err_builtin_target_unsupported) + StringRef Name = + SemaSYCLRef.getASTContext().BuiltinInfo.getName(BuiltinID); + SemaSYCLRef.Diag(e->getExprLoc(), diag::err_builtin_target_unsupported) << Name << "SYCL device"; } - } else if (!SemaRef.getLangOpts().SYCLAllowFuncPtr && + } else if (!SemaSYCLRef.getLangOpts().SYCLAllowFuncPtr && !e->isTypeDependent() && !isa(e->getCallee())) { bool MaybeConstantExpr = false; Expr *NonDirectCallee = e->getCallee(); if (!NonDirectCallee->isValueDependent()) MaybeConstantExpr = - NonDirectCallee->isCXX11ConstantExpr(SemaRef.getASTContext()); + NonDirectCallee->isCXX11ConstantExpr(SemaSYCLRef.getASTContext()); if (!MaybeConstantExpr) - SemaRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) - << Sema::KernelCallFunctionPointer; + SemaSYCLRef.Diag(e->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelCallFunctionPointer; } return true; } bool VisitCXXTypeidExpr(CXXTypeidExpr *E) { - SemaRef.Diag(E->getExprLoc(), diag::err_sycl_restrict) << Sema::KernelRTTI; + SemaSYCLRef.Diag(E->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelRTTI; return true; } bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) { - SemaRef.Diag(E->getExprLoc(), diag::err_sycl_restrict) << Sema::KernelRTTI; + SemaSYCLRef.Diag(E->getExprLoc(), diag::err_sycl_restrict) + << SemaSYCL::KernelRTTI; return true; } @@ -667,7 +679,7 @@ class DiagDeviceFunction : public RecursiveASTVisitor { bool TraverseIfStmt(IfStmt *S) { if (std::optional ActiveStmt = - S->getNondiscardedCase(SemaRef.Context)) { + S->getNondiscardedCase(SemaSYCLRef.getASTContext())) { if (*ActiveStmt) return TraverseStmt(*ActiveStmt); return true; @@ -686,7 +698,7 @@ class DiagDeviceFunction : public RecursiveASTVisitor { class DeviceFunctionTracker { friend class SingleDeviceFunctionTracker; CallGraph CG; - Sema &SemaRef; + SemaSYCL &SemaSYCLRef; // The list of functions used on the device, kept so we can diagnose on them // later. llvm::SmallPtrSet DeviceFunctions; @@ -696,7 +708,7 @@ class DeviceFunctionTracker { for (CallGraphNode::CallRecord Record : CG.getRoot()->callees()) if (auto *FD = dyn_cast(Record.Callee->getDecl())) if (FD->hasBody() && FD->hasAttr()) - SemaRef.addSyclDeviceDecl(FD); + SemaSYCLRef.addSyclDeviceDecl(FD); } CallGraphNode *getNodeForKernel(FunctionDecl *Kernel) { @@ -712,14 +724,14 @@ class DeviceFunctionTracker { } public: - DeviceFunctionTracker(Sema &S) : SemaRef(S) { - CG.setSkipConstantExpressions(S.Context); + DeviceFunctionTracker(SemaSYCL &S) : SemaSYCLRef(S) { + CG.setSkipConstantExpressions(S.getASTContext()); CG.addToCallGraph(S.getASTContext().getTranslationUnitDecl()); CollectSyclExternalFuncs(); } ~DeviceFunctionTracker() { - DiagDeviceFunction Diagnoser{SemaRef, RecursiveFunctions}; + DiagDeviceFunction Diagnoser{SemaSYCLRef, RecursiveFunctions}; for (const FunctionDecl *FD : DeviceFunctions) if (const FunctionDecl *Def = FD->getDefinition()) Diagnoser.CheckBody(Def->getBody()); @@ -786,8 +798,8 @@ class SingleDeviceFunctionTracker { // cases later in finalizeSYCLDelayedAnalysis(). if (!CurrentDecl->isDefined() && !CurrentDecl->hasAttr() && !CurrentDecl->hasAttr()) - Parent.SemaRef.addFDToReachableFromSyclDevice(CurrentDecl, - CallStack.back()); + Parent.SemaSYCLRef.addFDToReachableFromSyclDevice(CurrentDecl, + CallStack.back()); // If this is a parallel_for_work_item that is declared in the // sycl namespace, mark it with the WorkItem scope attribute. @@ -798,9 +810,8 @@ class SingleDeviceFunctionTracker { CurrentDecl->getIdentifier()->getName() == "parallel_for_work_item" && isDeclaredInSYCLNamespace(CurrentDecl) && !CurrentDecl->hasAttr()) { - CurrentDecl->addAttr( - SYCLScopeAttr::CreateImplicit(Parent.SemaRef.getASTContext(), - SYCLScopeAttr::Level::WorkItem)); + CurrentDecl->addAttr(SYCLScopeAttr::CreateImplicit( + Parent.SemaSYCLRef.getASTContext(), SYCLScopeAttr::Level::WorkItem)); } // We previously thought we could skip this function if we'd seen it before, @@ -824,8 +835,8 @@ class SingleDeviceFunctionTracker { // Collect attributes for functions that aren't the root kernel. if (!CallStack.empty()) { bool DirectlyCalled = CallStack.size() == 1; - collectSYCLAttributes(Parent.SemaRef, CurrentDecl, CollectedAttributes, - DirectlyCalled); + collectSYCLAttributes(Parent.SemaSYCLRef, CurrentDecl, + CollectedAttributes, DirectlyCalled); } // Calculate the kernel body. Note the 'isSYCLKernelBodyFunction' only @@ -882,7 +893,7 @@ class SingleDeviceFunctionTracker { // Always inline the KernelBody in the kernel entry point. For ESIMD // inlining is handled later down the pipeline. if (KernelBody && - Parent.SemaRef.getLangOpts().SYCLForceInlineKernelLambda && + Parent.SemaSYCLRef.getLangOpts().SYCLForceInlineKernelLambda && !KernelBody->hasAttr() && !KernelBody->hasAttr() && !KernelBody->hasAttr()) { @@ -948,7 +959,7 @@ class MarkWIScopeFnVisitor : public RecursiveASTVisitor { // not a direct call - continue search return true; QualType Ty = Ctx.getRecordType(Call->getRecordDecl()); - if (!Sema::isSyclType(Ty, SYCLTypeAttr::group)) + if (!SemaSYCL::isSyclType(Ty, SYCLTypeAttr::group)) // not a member of sycl::group - continue search return true; auto Name = Callee->getName(); @@ -967,7 +978,7 @@ class MarkWIScopeFnVisitor : public RecursiveASTVisitor { }; static bool isSYCLPrivateMemoryVar(VarDecl *VD) { - return Sema::isSyclType(VD->getType(), SYCLTypeAttr::private_memory); + return SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::private_memory); } static void addScopeAttrToLocalVars(CXXMethodDecl &F) { @@ -1046,7 +1057,7 @@ static ParamDesc makeParamDesc(ASTContext &Ctx, StringRef Name, QualType Ty) { /// \return the target of given SYCL accessor type static target getAccessTarget(QualType FieldTy, const ClassTemplateSpecializationDecl *AccTy) { - if (Sema::isSyclType(FieldTy, SYCLTypeAttr::local_accessor)) + if (SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::local_accessor)) return local; return static_cast( @@ -1066,7 +1077,7 @@ static QualType calculateKernelNameType(ASTContext &Ctx, // Gets a name for the OpenCL kernel function, calculated from the first // template argument of the kernel caller function. static std::pair -constructKernelName(Sema &S, const FunctionDecl *KernelCallerFunc, +constructKernelName(SemaSYCL &S, const FunctionDecl *KernelCallerFunc, MangleContext &MC) { QualType KernelNameType = calculateKernelNameType(S.getASTContext(), KernelCallerFunc); @@ -1108,7 +1119,7 @@ static ParmVarDecl *getSyclKernelHandlerArg(FunctionDecl *KernelCallerFunc) { // Specialization constants in SYCL 2020 are not captured by lambda and // accessed through new optional lambda argument kernel_handler auto IsHandlerLambda = [](ParmVarDecl *PVD) { - return Sema::isSyclType(PVD->getType(), SYCLTypeAttr::kernel_handler); + return SemaSYCL::isSyclType(PVD->getType(), SYCLTypeAttr::kernel_handler); }; assert(llvm::count_if(KernelCallerFunc->parameters(), IsHandlerLambda) <= 1 && @@ -1149,7 +1160,7 @@ template <> struct bind_param { using type = FieldDecl *; }; template using bind_param_t = typename bind_param::type; class KernelObjVisitor { - Sema &SemaRef; + SemaSYCL &SemaSYCLRef; template void VisitUnionImpl(const CXXRecordDecl *Owner, ParentTy &Parent, @@ -1230,7 +1241,7 @@ class KernelObjVisitor { for (const auto &Base : Range) { QualType BaseTy = Base.getType(); // Handle accessor class as base - if (isSyclSpecialType(BaseTy, SemaRef)) + if (isSyclSpecialType(BaseTy, SemaSYCLRef)) (void)std::initializer_list{ (Handlers.handleSyclSpecialType(Owner, Base, BaseTy), 0)...}; else @@ -1289,7 +1300,7 @@ class KernelObjVisitor { return; const ConstantArrayType *CAT = - SemaRef.getASTContext().getAsConstantArrayType(ArrayTy); + SemaSYCLRef.getASTContext().getAsConstantArrayType(ArrayTy); assert(CAT && "Should only be called on constant-size array."); QualType ET = CAT->getElementType(); uint64_t ElemCount = CAT->getSize().getZExtValue(); @@ -1308,7 +1319,7 @@ class KernelObjVisitor { template void visitField(const CXXRecordDecl *Owner, FieldDecl *Field, QualType FieldTy, HandlerTys &... Handlers) { - if (isSyclSpecialType(FieldTy, SemaRef)) + if (isSyclSpecialType(FieldTy, SemaSYCLRef)) KF_FOR_EACH(handleSyclSpecialType, Field, FieldTy); else if (FieldTy->isStructureOrClassType()) { if (KF_FOR_EACH(handleStructType, Field, FieldTy)) { @@ -1333,7 +1344,7 @@ class KernelObjVisitor { } public: - KernelObjVisitor(Sema &S) : SemaRef(S) {} + KernelObjVisitor(SemaSYCL &S) : SemaSYCLRef(S) {} template void VisitRecordBases(const CXXRecordDecl *KernelFunctor, @@ -1443,15 +1454,15 @@ class SyclKernelFieldHandlerBase { // universally required data). class SyclKernelFieldHandler : public SyclKernelFieldHandlerBase { protected: - Sema &SemaRef; - SyclKernelFieldHandler(Sema &S) : SemaRef(S) {} + SemaSYCL &SemaSYCLRef; + SyclKernelFieldHandler(SemaSYCL &S) : SemaSYCLRef(S) {} // Returns 'true' if the thing we're visiting (Based on the FD/QualType pair) // is an element of an array. FD will always be the array field. When // traversing the array field, Ty will be the type of the array field or the // type of array element (or some decomposed type from array). bool isArrayElement(const FieldDecl *FD, QualType Ty) const { - return !SemaRef.getASTContext().hasSameType(FD->getType(), Ty); + return !SemaSYCLRef.getASTContext().hasSameType(FD->getType(), Ty); } }; @@ -1611,7 +1622,7 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { bool checkNotCopyableToKernel(const FieldDecl *FD, QualType FieldTy) { if (FieldTy->isArrayType()) { if (const auto *CAT = - SemaRef.getASTContext().getAsConstantArrayType(FieldTy)) { + SemaSYCLRef.getASTContext().getAsConstantArrayType(FieldTy)) { QualType ET = CAT->getElementType(); return checkNotCopyableToKernel(FD, ET); } @@ -1625,24 +1636,24 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { bool checkPropertyListType(TemplateArgument PropList, SourceLocation Loc) { if (PropList.getKind() != TemplateArgument::ArgKind::Type) - return SemaRef.Diag( + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_template_param); QualType PropListTy = PropList.getAsType(); - if (!Sema::isSyclType(PropListTy, SYCLTypeAttr::accessor_property_list)) - return SemaRef.Diag( + if (!SemaSYCL::isSyclType(PropListTy, SYCLTypeAttr::accessor_property_list)) + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_template_param); const auto *AccPropListDecl = cast(PropListTy->getAsRecordDecl()); if (AccPropListDecl->getTemplateArgs().size() != 1) - return SemaRef.Diag(Loc, - diag::err_sycl_invalid_property_list_param_number) + return SemaSYCLRef.Diag(Loc, + diag::err_sycl_invalid_property_list_param_number) << "accessor_property_list"; const auto TemplArg = AccPropListDecl->getTemplateArgs()[0]; if (TemplArg.getKind() != TemplateArgument::ArgKind::Pack) - return SemaRef.Diag( + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_list_template_param) << /*accessor_property_list*/ 0 << /*parameter pack*/ 0; @@ -1650,7 +1661,7 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { for (TemplateArgument::pack_iterator Prop = TemplArg.pack_begin(); Prop != TemplArg.pack_end(); ++Prop) { if (Prop->getKind() != TemplateArgument::ArgKind::Type) - return SemaRef.Diag( + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_list_template_param) << /*accessor_property_list pack argument*/ 1 << /*type*/ 1; @@ -1666,20 +1677,20 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { const auto *PropDecl = cast(PropTy->getAsRecordDecl()); if (PropDecl->getTemplateArgs().size() != 1) - return SemaRef.Diag(Loc, - diag::err_sycl_invalid_property_list_param_number) + return SemaSYCLRef.Diag(Loc, + diag::err_sycl_invalid_property_list_param_number) << "buffer_location"; const auto BufferLoc = PropDecl->getTemplateArgs()[0]; if (BufferLoc.getKind() != TemplateArgument::ArgKind::Integral) - return SemaRef.Diag( + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_list_template_param) << /*buffer_location*/ 2 << /*non-negative integer*/ 2; int LocationID = static_cast(BufferLoc.getAsIntegral().getExtValue()); if (LocationID < 0) - return SemaRef.Diag( + return SemaSYCLRef.Diag( Loc, diag::err_sycl_invalid_accessor_property_list_template_param) << /*buffer_location*/ 2 << /*non-negative integer*/ 2; @@ -1688,16 +1699,16 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { } bool checkSyclSpecialType(QualType Ty, SourceRange Loc) { - assert(isSyclSpecialType(Ty, SemaRef) && + assert(isSyclSpecialType(Ty, SemaSYCLRef) && "Should only be called on sycl special class types."); // Annotated pointers and annotated arguments must be captured // directly by the SYCL kernel. - if ((Sema::isSyclType(Ty, SYCLTypeAttr::annotated_ptr) || - Sema::isSyclType(Ty, SYCLTypeAttr::annotated_arg)) && + if ((SemaSYCL::isSyclType(Ty, SYCLTypeAttr::annotated_ptr) || + SemaSYCL::isSyclType(Ty, SYCLTypeAttr::annotated_arg)) && (StructFieldDepth > 0 || StructBaseDepth > 0)) - return SemaRef.Diag(Loc.getBegin(), - diag::err_bad_kernel_param_data_members) + return SemaSYCLRef.Diag(Loc.getBegin(), + diag::err_bad_kernel_param_data_members) << Ty << /*Struct*/ 1; const RecordDecl *RecD = Ty->getAsRecordDecl(); @@ -1709,7 +1720,7 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { // Parameter packs are used by properties so they are always valid. if (TA.getKind() != TemplateArgument::Pack) { llvm::DenseSet Visited; - checkSYCLType(SemaRef, TA.getAsType(), Loc, Visited); + checkSYCLType(SemaSYCLRef, TA.getAsType(), Loc, Visited); } if (TAL.size() > 5) @@ -1719,7 +1730,7 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { } public: - SyclKernelFieldChecker(Sema &S) + SyclKernelFieldChecker(SemaSYCL &S) : SyclKernelFieldHandler(S), Diag(S.getASTContext().getDiagnostics()) {} static constexpr const bool VisitNthArrayElement = false; bool isValid() { return !IsInvalid; } @@ -1737,7 +1748,7 @@ class SyclKernelFieldChecker : public SyclKernelFieldHandler { if (RD->isLambda()) { for (const LambdaCapture &LC : RD->captures()) if (LC.capturesThis() && LC.isImplicit()) { - SemaRef.Diag(LC.getLocation(), diag::err_implicit_this_capture); + SemaSYCLRef.Diag(LC.getLocation(), diag::err_implicit_this_capture); IsInvalid = true; } } @@ -1809,7 +1820,7 @@ class SyclKernelUnionChecker : public SyclKernelFieldHandler { DiagnosticsEngine &Diag; public: - SyclKernelUnionChecker(Sema &S) + SyclKernelUnionChecker(SemaSYCL &S) : SyclKernelFieldHandler(S), Diag(S.getASTContext().getDiagnostics()) {} bool isValid() { return !IsInvalid; } static constexpr const bool VisitUnionBody = true; @@ -1856,7 +1867,7 @@ class SyclKernelDecompMarker : public SyclKernelFieldHandler { static constexpr const bool VisitUnionBody = false; static constexpr const bool VisitNthArrayElement = false; - SyclKernelDecompMarker(Sema &S) : SyclKernelFieldHandler(S) { + SyclKernelDecompMarker(SemaSYCL &S) : SyclKernelFieldHandler(S) { // In order to prevent checking this over and over, just add a dummy-base // entry. CollectionStack.push_back(true); @@ -1894,14 +1905,14 @@ class SyclKernelDecompMarker : public SyclKernelFieldHandler { if (CollectionStack.pop_back_val()) { if (!RD->hasAttr()) RD->addAttr(SYCLRequiresDecompositionAttr::CreateImplicit( - SemaRef.getASTContext())); + SemaSYCLRef.getASTContext())); CollectionStack.back() = true; PointerStack.pop_back(); } else if (PointerStack.pop_back_val()) { PointerStack.back() = true; if (!RD->hasAttr()) - RD->addAttr( - SYCLGenerateNewTypeAttr::CreateImplicit(SemaRef.getASTContext())); + RD->addAttr(SYCLGenerateNewTypeAttr::CreateImplicit( + SemaSYCLRef.getASTContext())); } return true; } @@ -1924,14 +1935,14 @@ class SyclKernelDecompMarker : public SyclKernelFieldHandler { if (CollectionStack.pop_back_val()) { if (!RD->hasAttr()) RD->addAttr(SYCLRequiresDecompositionAttr::CreateImplicit( - SemaRef.getASTContext())); + SemaSYCLRef.getASTContext())); CollectionStack.back() = true; PointerStack.pop_back(); } else if (PointerStack.pop_back_val()) { PointerStack.back() = true; if (!RD->hasAttr()) - RD->addAttr( - SYCLGenerateNewTypeAttr::CreateImplicit(SemaRef.getASTContext())); + RD->addAttr(SYCLGenerateNewTypeAttr::CreateImplicit( + SemaSYCLRef.getASTContext())); } return true; } @@ -1953,20 +1964,20 @@ class SyclKernelDecompMarker : public SyclKernelFieldHandler { // times. if (!FD->hasAttr()) FD->addAttr(SYCLRequiresDecompositionAttr::CreateImplicit( - SemaRef.getASTContext())); + SemaSYCLRef.getASTContext())); CollectionStack.back() = true; PointerStack.pop_back(); } else if (PointerStack.pop_back_val()) { if (!FD->hasAttr()) - FD->addAttr( - SYCLGenerateNewTypeAttr::CreateImplicit(SemaRef.getASTContext())); + FD->addAttr(SYCLGenerateNewTypeAttr::CreateImplicit( + SemaSYCLRef.getASTContext())); PointerStack.back() = true; } return true; } }; -static QualType ModifyAddressSpace(Sema &SemaRef, QualType Ty) { +static QualType ModifyAddressSpace(SemaSYCL &SemaSYCLRef, QualType Ty) { // USM allows to use raw pointers instead of buffers/accessors, but these // pointers point to the specially allocated memory. For pointer fields, // except for function pointer fields, we add a kernel argument with the @@ -1981,9 +1992,9 @@ static QualType ModifyAddressSpace(Sema &SemaRef, QualType Ty) { if (!PointeeTy->isFunctionType() && AS != LangAS::sycl_global_device && AS != LangAS::sycl_global_host) Quals.setAddressSpace(LangAS::sycl_global); - PointeeTy = SemaRef.getASTContext().getQualifiedType( + PointeeTy = SemaSYCLRef.getASTContext().getQualifiedType( PointeeTy.getUnqualifiedType(), Quals); - return SemaRef.getASTContext().getPointerType(PointeeTy); + return SemaSYCLRef.getASTContext().getPointerType(PointeeTy); } // This visitor is used to traverse a non-decomposed record/array to @@ -1996,7 +2007,7 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { IdentifierInfo *getModifiedName(IdentifierInfo *Id) { std::string Name = Id ? (Twine("__generated_") + Id->getName()).str() : "__generated_"; - return &SemaRef.getASTContext().Idents.get(Name); + return &SemaSYCLRef.getASTContext().Idents.get(Name); } // Create Decl for the new type we are generating. @@ -2004,7 +2015,7 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { // the visitor traverses kernel object record fields. void createNewType(const CXXRecordDecl *RD) { auto *ModifiedRD = CXXRecordDecl::Create( - SemaRef.getASTContext(), RD->getTagKind(), + SemaSYCLRef.getASTContext(), RD->getTagKind(), const_cast(RD->getDeclContext()), SourceLocation(), SourceLocation(), getModifiedName(RD->getIdentifier())); ModifiedRD->startDefinition(); @@ -2017,7 +2028,7 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { void addField(const FieldDecl *FD, QualType FieldTy) { assert(!ModifiedRecords.empty() && "ModifiedRecords should have at least 1 record"); - ASTContext &Ctx = SemaRef.getASTContext(); + ASTContext &Ctx = SemaSYCLRef.getASTContext(); auto *Field = FieldDecl::Create( Ctx, ModifiedRecords.back(), SourceLocation(), SourceLocation(), getModifiedName(FD->getIdentifier()), FieldTy, @@ -2032,9 +2043,10 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { void createBaseSpecifier(const CXXRecordDecl *Parent, const CXXRecordDecl *RD, const CXXBaseSpecifier &BS) { - TypeSourceInfo *TInfo = SemaRef.getASTContext().getTrivialTypeSourceInfo( - QualType(RD->getTypeForDecl(), 0), SourceLocation()); - CXXBaseSpecifier *ModifiedBase = SemaRef.CheckBaseSpecifier( + TypeSourceInfo *TInfo = + SemaSYCLRef.getASTContext().getTrivialTypeSourceInfo( + QualType(RD->getTypeForDecl(), 0), SourceLocation()); + CXXBaseSpecifier *ModifiedBase = SemaSYCLRef.SemaRef.CheckBaseSpecifier( const_cast(Parent), SourceRange(), BS.isVirtual(), BS.getAccessSpecifier(), TInfo, SourceLocation()); ModifiedBases.push_back(ModifiedBase); @@ -2062,12 +2074,12 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { public: static constexpr const bool VisitInsideSimpleContainersWithPointer = true; static constexpr const bool VisitNthArrayElement = false; - SyclKernelPointerHandler(Sema &S, const CXXRecordDecl *RD) + SyclKernelPointerHandler(SemaSYCL &S, const CXXRecordDecl *RD) : SyclKernelFieldHandler(S) { createNewType(RD); } - SyclKernelPointerHandler(Sema &S) : SyclKernelFieldHandler(S) {} + SyclKernelPointerHandler(SemaSYCL &S) : SyclKernelFieldHandler(S) {} bool enterStruct(const CXXRecordDecl *, FieldDecl *, QualType Ty) final { createNewType(Ty->getAsCXXRecordDecl()); @@ -2107,9 +2119,9 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { QualType ModifiedArrayElement = ModifiedArrayElementsOrArray.pop_back_val(); const ConstantArrayType *CAT = - SemaRef.getASTContext().getAsConstantArrayType(ArrayTy); + SemaSYCLRef.getASTContext().getAsConstantArrayType(ArrayTy); assert(CAT && "Should only be called on constant-size array."); - QualType ModifiedArray = SemaRef.getASTContext().getConstantArrayType( + QualType ModifiedArray = SemaSYCLRef.getASTContext().getConstantArrayType( ModifiedArrayElement, CAT->getSize(), const_cast(CAT->getSizeExpr()), CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers()); @@ -2129,7 +2141,7 @@ class SyclKernelPointerHandler : public SyclKernelFieldHandler { } bool handlePointerType(FieldDecl *FD, QualType FieldTy) final { - QualType ModifiedPointerType = ModifyAddressSpace(SemaRef, FieldTy); + QualType ModifiedPointerType = ModifyAddressSpace(SemaSYCLRef, FieldTy); if (!isArrayElement(FD, FieldTy)) addField(FD, ModifiedPointerType); else @@ -2202,19 +2214,19 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // This only happens with the accessor types. StringRef Name = "_arg__base"; ParamDesc newParamDesc = - makeParamDesc(SemaRef.getASTContext(), Name, FieldTy); + makeParamDesc(SemaSYCLRef.getASTContext(), Name, FieldTy); addParam(newParamDesc, FieldTy); } // Add a parameter with specified name and type void addParam(StringRef Name, QualType ParamTy) { ParamDesc newParamDesc = - makeParamDesc(SemaRef.getASTContext(), Name, ParamTy); + makeParamDesc(SemaSYCLRef.getASTContext(), Name, ParamTy); addParam(newParamDesc, ParamTy); } void addParam(ParamDesc newParamDesc, QualType FieldTy) { // Create a new ParmVarDecl based on the new info. - ASTContext &Ctx = SemaRef.getASTContext(); + ASTContext &Ctx = SemaSYCLRef.getASTContext(); auto *NewParam = ParmVarDecl::Create( Ctx, KernelDecl, SourceLocation(), SourceLocation(), std::get<1>(newParamDesc), std::get<0>(newParamDesc), @@ -2253,7 +2265,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { void handleNoAliasProperty(ParmVarDecl *Param, QualType PropTy, SourceLocation Loc) { - ASTContext &Ctx = SemaRef.getASTContext(); + ASTContext &Ctx = SemaSYCLRef.getASTContext(); Param->addAttr(RestrictAttr::CreateImplicit(Ctx, Loc)); } @@ -2264,11 +2276,11 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // If we have more than 1 buffer_location properties on a single // accessor - emit an error if (Param->hasAttr()) { - SemaRef.Diag(Loc, diag::err_sycl_compiletime_property_duplication) + SemaSYCLRef.Diag(Loc, diag::err_sycl_compiletime_property_duplication) << "buffer_location"; return; } - ASTContext &Ctx = SemaRef.getASTContext(); + ASTContext &Ctx = SemaSYCLRef.getASTContext(); const auto *PropDecl = cast(PropTy->getAsRecordDecl()); const auto BufferLoc = PropDecl->getTemplateArgs()[0]; @@ -2283,21 +2295,21 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { handleAccessorPropertyList(Params.back(), RecordDecl, Loc); // If "accessor" type check if read only - if (Sema::isSyclType(FieldTy, SYCLTypeAttr::accessor)) { + if (SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::accessor)) { // Get access mode of accessor. const auto *AccessorSpecializationDecl = cast(RecordDecl); const TemplateArgument &AccessModeArg = AccessorSpecializationDecl->getTemplateArgs().get(2); if (isReadOnlyAccessor(AccessModeArg)) - Params.back()->addAttr( - SYCLAccessorReadonlyAttr::CreateImplicit(SemaRef.getASTContext())); + Params.back()->addAttr(SYCLAccessorReadonlyAttr::CreateImplicit( + SemaSYCLRef.getASTContext())); } // Add implicit attribute to parameter decl when it is a read only // SYCL accessor. Params.back()->addAttr( - SYCLAccessorPtrAttr::CreateImplicit(SemaRef.getASTContext())); + SYCLAccessorPtrAttr::CreateImplicit(SemaSYCLRef.getASTContext())); } // All special SYCL objects must have __init method. We extract types for @@ -2324,7 +2336,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // Propagate add_ir_attributes_kernel_parameter attribute. if (const auto *AddIRAttr = Param->getAttr()) - Params.back()->addAttr(AddIRAttr->clone(SemaRef.getASTContext())); + Params.back()->addAttr(AddIRAttr->clone(SemaSYCLRef.getASTContext())); // FIXME: This code is temporary, and will be removed once __init_esimd // is removed and property list refactored. @@ -2374,8 +2386,8 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // pointers in 'right' address space. PointerHandler.getNewRecordType() // returns this generated type. QualType GenerateNewRecordType(const CXXRecordDecl *RD) { - SyclKernelPointerHandler PointerHandler(SemaRef, RD); - KernelObjVisitor Visitor{SemaRef}; + SyclKernelPointerHandler PointerHandler(SemaSYCLRef, RD); + KernelObjVisitor Visitor{SemaSYCLRef}; Visitor.VisitRecordBases(RD, PointerHandler); Visitor.VisitRecordFields(RD, PointerHandler); return PointerHandler.getNewRecordType(); @@ -2387,29 +2399,29 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // a new array with all pointers in the required address space. QualType GenerateNewArrayType(FieldDecl *FD, QualType FieldTy) { const auto *Owner = dyn_cast(FD->getParent()); - SyclKernelPointerHandler PointerHandler(SemaRef); - KernelObjVisitor Visitor{SemaRef}; + SyclKernelPointerHandler PointerHandler(SemaSYCLRef); + KernelObjVisitor Visitor{SemaSYCLRef}; Visitor.visitArray(Owner, FD, FieldTy, PointerHandler); return PointerHandler.getNewArrayType(); } public: static constexpr const bool VisitInsideSimpleContainers = false; - SyclKernelDeclCreator(Sema &S, SourceLocation Loc, bool IsInline, + SyclKernelDeclCreator(SemaSYCL &S, SourceLocation Loc, bool IsInline, bool IsSIMDKernel, FunctionDecl *SYCLKernel) : SyclKernelFieldHandler(S), KernelDecl( createKernelDecl(S.getASTContext(), Loc, IsInline, IsSIMDKernel)), - FuncContext(SemaRef, KernelDecl) { + FuncContext(SemaSYCLRef.SemaRef, KernelDecl) { S.addSyclOpenCLKernel(SYCLKernel, KernelDecl); if (const auto *AddIRAttrFunc = SYCLKernel->getAttr()) - KernelDecl->addAttr(AddIRAttrFunc->clone(SemaRef.getASTContext())); + KernelDecl->addAttr(AddIRAttrFunc->clone(SemaSYCLRef.getASTContext())); } ~SyclKernelDeclCreator() { - ASTContext &Ctx = SemaRef.getASTContext(); + ASTContext &Ctx = SemaSYCLRef.getASTContext(); FunctionProtoType::ExtProtoInfo Info(CC_OpenCLKernel); SmallVector ArgTys; @@ -2426,9 +2438,9 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // to TransformStmt in replaceWithLocalClone can diagnose something that got // diagnosed on the actual kernel. KernelDecl->addAttr( - SYCLKernelAttr::CreateImplicit(SemaRef.getASTContext())); + SYCLKernelAttr::CreateImplicit(SemaSYCLRef.getASTContext())); - SemaRef.addSyclDeviceDecl(KernelDecl); + SemaSYCLRef.addSyclDeviceDecl(KernelDecl); } bool enterStruct(const CXXRecordDecl *, FieldDecl *, QualType) final { @@ -2489,13 +2501,13 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { RecordDecl *wrapField(FieldDecl *Field, QualType FieldTy) { RecordDecl *WrapperClass = - SemaRef.getASTContext().buildImplicitRecord("__wrapper_class"); + SemaSYCLRef.getASTContext().buildImplicitRecord("__wrapper_class"); WrapperClass->startDefinition(); Field = FieldDecl::Create( - SemaRef.getASTContext(), WrapperClass, SourceLocation(), + SemaSYCLRef.getASTContext(), WrapperClass, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy, - SemaRef.getASTContext().getTrivialTypeSourceInfo(FieldTy, - SourceLocation()), + SemaSYCLRef.getASTContext().getTrivialTypeSourceInfo(FieldTy, + SourceLocation()), /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); Field->setAccess(AS_public); WrapperClass->addDecl(Field); @@ -2504,7 +2516,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { }; bool handlePointerType(FieldDecl *FD, QualType FieldTy) final { - QualType ModTy = ModifyAddressSpace(SemaRef, FieldTy); + QualType ModTy = ModifyAddressSpace(SemaSYCLRef, FieldTy); // When the kernel is generated, struct type kernel arguments are // decomposed; i.e. the parameters of the kernel are the fields of the // struct, and not the struct itself. This causes an error in the backend @@ -2513,7 +2525,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // struct are wrapped in a generated '__wrapper_class'. if (StructDepth) { RecordDecl *WrappedPointer = wrapField(FD, ModTy); - ModTy = SemaRef.getASTContext().getRecordType(WrappedPointer); + ModTy = SemaSYCLRef.getASTContext().getRecordType(WrappedPointer); } addParam(FD, ModTy); @@ -2529,7 +2541,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // Arrays are wrapped in a struct since they cannot be passed directly. RecordDecl *WrappedArray = wrapField(FD, ArrayTy); - addParam(FD, SemaRef.getASTContext().getRecordType(WrappedArray)); + addParam(FD, SemaSYCLRef.getASTContext().getRecordType(WrappedArray)); return true; } @@ -2572,7 +2584,7 @@ class SyclKernelDeclCreator : public SyclKernelFieldHandler { // Generate kernel argument to initialize specialization constants. void handleSyclKernelHandlerType() { - ASTContext &Context = SemaRef.getASTContext(); + ASTContext &Context = SemaSYCLRef.getASTContext(); StringRef Name = "_arg__specialization_constants_buffer"; addParam(Name, Context.getPointerType(Context.getAddrSpaceQualType( Context.CharTy, LangAS::sycl_global))); @@ -2661,14 +2673,14 @@ class ESIMDKernelDiagnostics : public SyclKernelFieldHandler { const CXXRecordDecl *RecordDecl = FieldTy->getAsCXXRecordDecl(); if (IsESIMD && !isSyclAccessorType(FieldTy)) - return SemaRef.Diag(KernelLoc, - diag::err_sycl_esimd_not_supported_for_type) + return SemaSYCLRef.Diag(KernelLoc, + diag::err_sycl_esimd_not_supported_for_type) << RecordDecl; return true; } public: - ESIMDKernelDiagnostics(Sema &S, SourceLocation Loc, bool IsESIMD) + ESIMDKernelDiagnostics(SemaSYCL &S, SourceLocation Loc, bool IsESIMD) : SyclKernelFieldHandler(S), KernelLoc(Loc), IsESIMD(IsESIMD) {} bool handleSyclSpecialType(FieldDecl *FD, QualType FieldTy) final { @@ -2688,7 +2700,7 @@ class SyclKernelArgsSizeChecker : public SyclKernelFieldHandler { void addParam(QualType ArgTy) { SizeOfParams += - SemaRef.getASTContext().getTypeSizeInChars(ArgTy).getQuantity(); + SemaSYCLRef.getASTContext().getTypeSizeInChars(ArgTy).getQuantity(); } bool handleSpecialType(QualType FieldTy) { @@ -2706,12 +2718,12 @@ class SyclKernelArgsSizeChecker : public SyclKernelFieldHandler { public: static constexpr const bool VisitInsideSimpleContainers = false; - SyclKernelArgsSizeChecker(Sema &S, SourceLocation Loc, bool IsESIMD) + SyclKernelArgsSizeChecker(SemaSYCL &S, SourceLocation Loc, bool IsESIMD) : SyclKernelFieldHandler(S), KernelLoc(Loc), IsESIMD(IsESIMD) {} ~SyclKernelArgsSizeChecker() { if (SizeOfParams > MaxKernelArgsSize) - SemaRef.Diag(KernelLoc, diag::warn_sycl_kernel_too_big_args) + SemaSYCLRef.Diag(KernelLoc, diag::warn_sycl_kernel_too_big_args) << SizeOfParams << MaxKernelArgsSize; } @@ -2775,10 +2787,11 @@ class SyclOptReportCreator : public SyclKernelFieldHandler { if (KernelArgParent && KernelArgDescription == "decomposed struct/class") NameToEmitInDescription = KernelArgParent->getName(); - unsigned KernelArgSize = - SemaRef.getASTContext().getTypeSizeInChars(KernelArgType).getQuantity(); + unsigned KernelArgSize = SemaSYCLRef.getASTContext() + .getTypeSizeInChars(KernelArgType) + .getQuantity(); - SemaRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( + SemaSYCLRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( DC.getKernelDecl(), NameToEmitInDescription, IsCompilerGeneratedType ? "Compiler generated" : KernelArgType.getAsString(), @@ -2802,9 +2815,10 @@ class SyclOptReportCreator : public SyclKernelFieldHandler { void addParam(const CXXBaseSpecifier &, QualType KernelArgType, StringRef KernelArgDescription, bool IsCompilerGeneratedType = false) { - unsigned KernelArgSize = - SemaRef.getASTContext().getTypeSizeInChars(KernelArgType).getQuantity(); - SemaRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( + unsigned KernelArgSize = SemaSYCLRef.getASTContext() + .getTypeSizeInChars(KernelArgType) + .getQuantity(); + SemaSYCLRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( DC.getKernelDecl(), KernelArgType.getAsString(), IsCompilerGeneratedType ? "Compiler generated" : KernelArgType.getAsString(), @@ -2814,9 +2828,10 @@ class SyclOptReportCreator : public SyclKernelFieldHandler { // Handles specialization constants. void addParam(QualType KernelArgType, std::string KernelArgDescription) { - unsigned KernelArgSize = - SemaRef.getASTContext().getTypeSizeInChars(KernelArgType).getQuantity(); - SemaRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( + unsigned KernelArgSize = SemaSYCLRef.getASTContext() + .getTypeSizeInChars(KernelArgType) + .getQuantity(); + SemaSYCLRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( DC.getKernelDecl(), "", KernelArgType.getAsString(), KernelInvocationLoc, KernelArgSize, getKernelArgDesc(KernelArgDescription), ""); @@ -2824,7 +2839,8 @@ class SyclOptReportCreator : public SyclKernelFieldHandler { public: static constexpr const bool VisitInsideSimpleContainers = false; - SyclOptReportCreator(Sema &S, SyclKernelDeclCreator &DC, SourceLocation Loc) + SyclOptReportCreator(SemaSYCL &S, SyclKernelDeclCreator &DC, + SourceLocation Loc) : SyclKernelFieldHandler(S), DC(DC), KernelInvocationLoc(Loc) {} bool handleSyclSpecialType(FieldDecl *FD, QualType FieldTy) final { @@ -2838,10 +2854,10 @@ class SyclOptReportCreator : public SyclKernelFieldHandler { std::string KernelArgDescription = "base class " + FieldTy.getAsString(); for (const auto *Param : DC.getParamVarDeclsForCurrentField()) { QualType KernelArgType = Param->getType(); - unsigned KernelArgSize = SemaRef.getASTContext() + unsigned KernelArgSize = SemaSYCLRef.getASTContext() .getTypeSizeInChars(KernelArgType) .getQuantity(); - SemaRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( + SemaSYCLRef.getDiagnostics().getSYCLOptReport().AddKernelArgs( DC.getKernelDecl(), FieldTy.getAsString(), KernelArgType.getAsString(), KernelInvocationLoc, KernelArgSize, getKernelArgDesc(KernelArgDescription), ""); @@ -2948,7 +2964,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { LocalClone->setIsUsed(); std::pair MappingPair = std::make_pair(OriginalParam, LocalClone); - KernelBodyTransform KBT(MappingPair, SemaRef); + KernelBodyTransform KBT(MappingPair, SemaSYCLRef.SemaRef); return KBT.TransformStmt(FunctionBody).get(); } @@ -2957,7 +2973,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // statements in advance to allocate it, so we cannot do this as we go along. CompoundStmt *createKernelBody() { // Push the Kernel function scope to ensure the scope isn't empty - SemaRef.PushFunctionScope(); + SemaSYCLRef.SemaRef.PushFunctionScope(); // Initialize kernel object local clone assert(CollectionInitExprs.size() == 1 && @@ -2984,7 +3000,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { BodyStmts.insert(BodyStmts.end(), FinalizeStmts.begin(), FinalizeStmts.end()); - return CompoundStmt::Create(SemaRef.getASTContext(), BodyStmts, + return CompoundStmt::Create(SemaSYCLRef.getASTContext(), BodyStmts, FPOptionsOverride(), {}, {}); } @@ -2996,7 +3012,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // Mark kernel object with work-group scope attribute to avoid work-item // scope memory allocation. KernelObjClone->addAttr(SYCLScopeAttr::CreateImplicit( - SemaRef.getASTContext(), SYCLScopeAttr::Level::WorkGroup)); + SemaSYCLRef.getASTContext(), SYCLScopeAttr::Level::WorkGroup)); assert(CallOperator && "non callable object is passed as kernel obj"); // Mark the function that it "works" in a work group scope: @@ -3010,9 +3026,9 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // SYCL headers. if (!CallOperator->hasAttr()) { CallOperator->addAttr(SYCLScopeAttr::CreateImplicit( - SemaRef.getASTContext(), SYCLScopeAttr::Level::WorkGroup)); + SemaSYCLRef.getASTContext(), SYCLScopeAttr::Level::WorkGroup)); // Search and mark wait_for calls: - MarkWIScopeFnVisitor MarkWIScope(SemaRef.getASTContext()); + MarkWIScopeFnVisitor MarkWIScope(SemaSYCLRef.getASTContext()); MarkWIScope.TraverseDecl(CallOperator); // Now mark local variables declared in the PFWG lambda with work group // scope attribute @@ -3026,8 +3042,8 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { DeclCreator.getParamVarDeclsForCurrentField()[0]; QualType ParamType = KernelParameter->getOriginalType(); - Expr *DRE = SemaRef.BuildDeclRefExpr(KernelParameter, ParamType, VK_LValue, - KernelCallerSrcLoc); + Expr *DRE = SemaSYCLRef.SemaRef.BuildDeclRefExpr( + KernelParameter, ParamType, VK_LValue, KernelCallerSrcLoc); return DRE; } @@ -3038,8 +3054,8 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { DeclCreator.getParamVarDeclsForCurrentField()[0]; QualType ParamType = KernelParameter->getOriginalType(); - Expr *DRE = SemaRef.BuildDeclRefExpr(KernelParameter, ParamType, VK_LValue, - KernelCallerSrcLoc); + Expr *DRE = SemaSYCLRef.SemaRef.BuildDeclRefExpr( + KernelParameter, ParamType, VK_LValue, KernelCallerSrcLoc); // Struct Type kernel arguments are decomposed. The pointer fields are // then wrapped inside a compiler generated struct. Therefore when @@ -3052,13 +3068,13 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { ParamType = Pointer->getType(); } - DRE = ImplicitCastExpr::Create(SemaRef.Context, ParamType, + DRE = ImplicitCastExpr::Create(SemaSYCLRef.getASTContext(), ParamType, CK_LValueToRValue, DRE, /*BasePath=*/nullptr, VK_PRValue, FPOptionsOverride()); if (PointerTy->getPointeeType().getAddressSpace() != ParamType->getPointeeType().getAddressSpace()) - DRE = ImplicitCastExpr::Create(SemaRef.Context, PointerTy, + DRE = ImplicitCastExpr::Create(SemaSYCLRef.getASTContext(), PointerTy, CK_AddressSpaceConversion, DRE, nullptr, VK_PRValue, FPOptionsOverride()); @@ -3069,8 +3085,8 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { ParmVarDecl *KernelParameter = DeclCreator.getParamVarDeclsForCurrentField()[0]; QualType ParamType = KernelParameter->getOriginalType(); - Expr *DRE = SemaRef.BuildDeclRefExpr(KernelParameter, ParamType, VK_LValue, - KernelCallerSrcLoc); + Expr *DRE = SemaSYCLRef.SemaRef.BuildDeclRefExpr( + KernelParameter, ParamType, VK_LValue, KernelCallerSrcLoc); // Unwrap the array. CXXRecordDecl *WrapperStruct = ParamType->getAsCXXRecordDecl(); @@ -3083,7 +3099,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // array, returns an element initializer. InitializedEntity getFieldEntity(FieldDecl *FD, QualType Ty) { if (isArrayElement(FD, Ty)) - return InitializedEntity::InitializeElement(SemaRef.getASTContext(), + return InitializedEntity::InitializeElement(SemaSYCLRef.getASTContext(), ArrayInfos.back().second, ArrayInfos.back().first); return InitializedEntity::InitializeMember(FD, &VarEntity); @@ -3102,35 +3118,42 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { void addFieldInit(FieldDecl *FD, QualType Ty, MultiExprArg ParamRef, InitializationKind InitKind, InitializedEntity Entity) { - InitializationSequence InitSeq(SemaRef, Entity, InitKind, ParamRef); - ExprResult Init = InitSeq.Perform(SemaRef, Entity, InitKind, ParamRef); + InitializationSequence InitSeq(SemaSYCLRef.SemaRef, Entity, InitKind, + ParamRef); + ExprResult Init = + InitSeq.Perform(SemaSYCLRef.SemaRef, Entity, InitKind, ParamRef); InitListExpr *ParentILE = CollectionInitExprs.back(); - ParentILE->updateInit(SemaRef.getASTContext(), ParentILE->getNumInits(), + ParentILE->updateInit(SemaSYCLRef.getASTContext(), ParentILE->getNumInits(), Init.get()); } void addBaseInit(const CXXBaseSpecifier &BS, QualType Ty, InitializationKind InitKind) { InitializedEntity Entity = InitializedEntity::InitializeBase( - SemaRef.Context, &BS, /*IsInheritedVirtualBase*/ false, &VarEntity); - InitializationSequence InitSeq(SemaRef, Entity, InitKind, std::nullopt); - ExprResult Init = InitSeq.Perform(SemaRef, Entity, InitKind, std::nullopt); + SemaSYCLRef.getASTContext(), &BS, /*IsInheritedVirtualBase*/ false, + &VarEntity); + InitializationSequence InitSeq(SemaSYCLRef.SemaRef, Entity, InitKind, + std::nullopt); + ExprResult Init = + InitSeq.Perform(SemaSYCLRef.SemaRef, Entity, InitKind, std::nullopt); InitListExpr *ParentILE = CollectionInitExprs.back(); - ParentILE->updateInit(SemaRef.getASTContext(), ParentILE->getNumInits(), + ParentILE->updateInit(SemaSYCLRef.getASTContext(), ParentILE->getNumInits(), Init.get()); } void addBaseInit(const CXXBaseSpecifier &BS, QualType Ty, InitializationKind InitKind, MultiExprArg Args) { InitializedEntity Entity = InitializedEntity::InitializeBase( - SemaRef.Context, &BS, /*IsInheritedVirtualBase*/ false, &VarEntity); - InitializationSequence InitSeq(SemaRef, Entity, InitKind, Args); - ExprResult Init = InitSeq.Perform(SemaRef, Entity, InitKind, Args); + SemaSYCLRef.getASTContext(), &BS, /*IsInheritedVirtualBase*/ false, + &VarEntity); + InitializationSequence InitSeq(SemaSYCLRef.SemaRef, Entity, InitKind, Args); + ExprResult Init = + InitSeq.Perform(SemaSYCLRef.SemaRef, Entity, InitKind, Args); InitListExpr *ParentILE = CollectionInitExprs.back(); - ParentILE->updateInit(SemaRef.getASTContext(), ParentILE->getNumInits(), + ParentILE->updateInit(SemaSYCLRef.getASTContext(), ParentILE->getNumInits(), Init.get()); } @@ -3139,14 +3162,17 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { InitializationKind::CreateCopy(KernelCallerSrcLoc, KernelCallerSrcLoc); InitializedEntity Entity = InitializedEntity::InitializeBase( - SemaRef.Context, &BS, /*IsInheritedVirtualBase*/ false, &VarEntity); + SemaSYCLRef.getASTContext(), &BS, /*IsInheritedVirtualBase*/ false, + &VarEntity); Expr *ParamRef = createParamReferenceExpr(); - InitializationSequence InitSeq(SemaRef, Entity, InitKind, ParamRef); - ExprResult Init = InitSeq.Perform(SemaRef, Entity, InitKind, ParamRef); + InitializationSequence InitSeq(SemaSYCLRef.SemaRef, Entity, InitKind, + ParamRef); + ExprResult Init = + InitSeq.Perform(SemaSYCLRef.SemaRef, Entity, InitKind, ParamRef); InitListExpr *ParentILE = CollectionInitExprs.back(); - ParentILE->updateInit(SemaRef.getASTContext(), ParentILE->getNumInits(), + ParentILE->updateInit(SemaSYCLRef.getASTContext(), ParentILE->getNumInits(), Init.get()); } @@ -3157,23 +3183,25 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { } Expr *createGetAddressOf(Expr *E) { - return UnaryOperator::Create(SemaRef.Context, E, UO_AddrOf, - SemaRef.Context.getPointerType(E->getType()), - VK_PRValue, OK_Ordinary, KernelCallerSrcLoc, - false, SemaRef.CurFPFeatureOverrides()); + return UnaryOperator::Create( + SemaSYCLRef.getASTContext(), E, UO_AddrOf, + SemaSYCLRef.getASTContext().getPointerType(E->getType()), VK_PRValue, + OK_Ordinary, KernelCallerSrcLoc, false, + SemaSYCLRef.SemaRef.CurFPFeatureOverrides()); } Expr *createDerefOp(Expr *E) { - return UnaryOperator::Create(SemaRef.Context, E, UO_Deref, + return UnaryOperator::Create(SemaSYCLRef.getASTContext(), E, UO_Deref, E->getType()->getPointeeType(), VK_LValue, OK_Ordinary, KernelCallerSrcLoc, false, - SemaRef.CurFPFeatureOverrides()); + SemaSYCLRef.SemaRef.CurFPFeatureOverrides()); } Expr *createReinterpretCastExpr(Expr *E, QualType To) { return CXXReinterpretCastExpr::Create( - SemaRef.Context, To, VK_PRValue, CK_BitCast, E, - /*Path=*/nullptr, SemaRef.Context.getTrivialTypeSourceInfo(To), + SemaSYCLRef.getASTContext(), To, VK_PRValue, CK_BitCast, E, + /*Path=*/nullptr, + SemaSYCLRef.getASTContext().getTrivialTypeSourceInfo(To), SourceLocation(), SourceLocation(), SourceRange()); } @@ -3185,7 +3213,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { Expr *RCE = createReinterpretCastExpr( createGetAddressOf(createParamReferenceExpr()), - SemaRef.Context.getPointerType(Ty)); + SemaSYCLRef.getASTContext().getPointerType(Ty)); Expr *Initializer = createDerefOp(RCE); addFieldInit(FD, Ty, Initializer); } @@ -3198,7 +3226,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // } Expr *RCE = createReinterpretCastExpr( createGetAddressOf(createParamReferenceExpr()), - SemaRef.Context.getPointerType(Ty)); + SemaSYCLRef.getASTContext().getPointerType(Ty)); Expr *Initializer = createDerefOp(RCE); InitializationKind InitKind = InitializationKind::CreateCopy(KernelCallerSrcLoc, KernelCallerSrcLoc); @@ -3207,7 +3235,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { MemberExpr *buildMemberExpr(Expr *Base, ValueDecl *Member) { DeclAccessPair MemberDAP = DeclAccessPair::make(Member, AS_none); - MemberExpr *Result = SemaRef.BuildMemberExpr( + MemberExpr *Result = SemaSYCLRef.SemaRef.BuildMemberExpr( Base, /*IsArrow */ false, KernelCallerSrcLoc, NestedNameSpecifierLoc(), KernelCallerSrcLoc, Member, MemberDAP, /*HadMultipleCandidates*/ false, @@ -3238,46 +3266,47 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { DeclCreator.getParamVarDeclsForCurrentField(); for (size_t I = 0; I < NumParams; ++I) { QualType ParamType = KernelParameters[I]->getOriginalType(); - ParamDREs[I] = SemaRef.BuildDeclRefExpr(KernelParameters[I], ParamType, - VK_LValue, KernelCallerSrcLoc); + ParamDREs[I] = SemaSYCLRef.SemaRef.BuildDeclRefExpr( + KernelParameters[I], ParamType, VK_LValue, KernelCallerSrcLoc); } MemberExpr *MethodME = buildMemberExpr(MemberExprBases.back(), Method); QualType ResultTy = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); - ResultTy = ResultTy.getNonLValueExprType(SemaRef.Context); + ResultTy = ResultTy.getNonLValueExprType(SemaSYCLRef.getASTContext()); llvm::SmallVector ParamStmts; const auto *Proto = cast(Method->getType()); - SemaRef.GatherArgumentsForCall(KernelCallerSrcLoc, Method, Proto, 0, - ParamDREs, ParamStmts); + SemaSYCLRef.SemaRef.GatherArgumentsForCall(KernelCallerSrcLoc, Method, + Proto, 0, ParamDREs, ParamStmts); // [kernel_obj or wrapper object].accessor.__init(_ValueType*, // range, range, id) AddTo.push_back(CXXMemberCallExpr::Create( - SemaRef.Context, MethodME, ParamStmts, ResultTy, VK, KernelCallerSrcLoc, - FPOptionsOverride())); + SemaSYCLRef.getASTContext(), MethodME, ParamStmts, ResultTy, VK, + KernelCallerSrcLoc, FPOptionsOverride())); } // Creates an empty InitListExpr of the correct number of child-inits // of this to append into. void addCollectionInitListExpr(const CXXRecordDecl *RD) { const ASTRecordLayout &Info = - SemaRef.getASTContext().getASTRecordLayout(RD); + SemaSYCLRef.getASTContext().getASTRecordLayout(RD); uint64_t NumInitExprs = Info.getFieldCount() + RD->getNumBases(); addCollectionInitListExpr(QualType(RD->getTypeForDecl(), 0), NumInitExprs); } InitListExpr *createInitListExpr(const CXXRecordDecl *RD) { const ASTRecordLayout &Info = - SemaRef.getASTContext().getASTRecordLayout(RD); + SemaSYCLRef.getASTContext().getASTRecordLayout(RD); uint64_t NumInitExprs = Info.getFieldCount() + RD->getNumBases(); return createInitListExpr(QualType(RD->getTypeForDecl(), 0), NumInitExprs); } InitListExpr *createInitListExpr(QualType InitTy, uint64_t NumChildInits) { - InitListExpr *ILE = new (SemaRef.getASTContext()) InitListExpr( - SemaRef.getASTContext(), KernelCallerSrcLoc, {}, KernelCallerSrcLoc); - ILE->reserveInits(SemaRef.getASTContext(), NumChildInits); + InitListExpr *ILE = new (SemaSYCLRef.getASTContext()) + InitListExpr(SemaSYCLRef.getASTContext(), KernelCallerSrcLoc, {}, + KernelCallerSrcLoc); + ILE->reserveInits(SemaSYCLRef.getASTContext(), NumChildInits); ILE->setType(InitTy); return ILE; @@ -3289,7 +3318,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { InitListExpr *ILE = createInitListExpr(InitTy, NumChildInits); InitListExpr *ParentILE = CollectionInitExprs.back(); - ParentILE->updateInit(SemaRef.getASTContext(), ParentILE->getNumInits(), + ParentILE->updateInit(SemaSYCLRef.getASTContext(), ParentILE->getNumInits(), ILE); CollectionInitExprs.push_back(ILE); @@ -3342,10 +3371,10 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // Generate __init call for kernel handler argument void handleSpecialType(QualType KernelHandlerTy) { - DeclRefExpr *KernelHandlerCloneRef = - DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), - KernelCallerSrcLoc, KernelHandlerClone, false, - DeclarationNameInfo(), KernelHandlerTy, VK_LValue); + DeclRefExpr *KernelHandlerCloneRef = DeclRefExpr::Create( + SemaSYCLRef.getASTContext(), NestedNameSpecifierLoc(), + KernelCallerSrcLoc, KernelHandlerClone, false, DeclarationNameInfo(), + KernelHandlerTy, VK_LValue); const auto *RecordDecl = KernelHandlerClone->getType()->getAsCXXRecordDecl(); MemberExprBases.push_back(KernelHandlerCloneRef); @@ -3366,21 +3395,23 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { InitializedEntity::InitializeVariable(KernelHandlerClone); InitializationKind InitKind = InitializationKind::CreateDefault(KernelCallerSrcLoc); - InitializationSequence InitSeq(SemaRef, VarEntity, InitKind, std::nullopt); - ExprResult Init = InitSeq.Perform(SemaRef, VarEntity, InitKind, std::nullopt); + InitializationSequence InitSeq(SemaSYCLRef.SemaRef, VarEntity, InitKind, + std::nullopt); + ExprResult Init = + InitSeq.Perform(SemaSYCLRef.SemaRef, VarEntity, InitKind, std::nullopt); KernelHandlerClone->setInit( - SemaRef.MaybeCreateExprWithCleanups(Init.get())); + SemaSYCLRef.SemaRef.MaybeCreateExprWithCleanups(Init.get())); KernelHandlerClone->setInitStyle(VarDecl::CallInit); } Expr *createArraySubscriptExpr(uint64_t Index, Expr *ArrayRef) { - QualType SizeT = SemaRef.getASTContext().getSizeType(); + QualType SizeT = SemaSYCLRef.getASTContext().getSizeType(); llvm::APInt IndexVal{ - static_cast(SemaRef.getASTContext().getTypeSize(SizeT)), + static_cast(SemaSYCLRef.getASTContext().getTypeSize(SizeT)), Index, SizeT->isSignedIntegerType()}; auto IndexLiteral = IntegerLiteral::Create( - SemaRef.getASTContext(), IndexVal, SizeT, KernelCallerSrcLoc); - ExprResult IndexExpr = SemaRef.CreateBuiltinArraySubscriptExpr( + SemaSYCLRef.getASTContext(), IndexVal, SizeT, KernelCallerSrcLoc); + ExprResult IndexExpr = SemaSYCLRef.SemaRef.CreateBuiltinArraySubscriptExpr( ArrayRef, KernelCallerSrcLoc, IndexLiteral, KernelCallerSrcLoc); assert(!IndexExpr.isInvalid()); return IndexExpr.get(); @@ -3399,7 +3430,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { void addArrayElementInit(FieldDecl *FD, QualType T) { Expr *RCE = createReinterpretCastExpr( createGetAddressOf(ArrayParamBases.pop_back_val()), - SemaRef.Context.getPointerType(T)); + SemaSYCLRef.getASTContext().getPointerType(T)); Expr *Initializer = createDerefOp(RCE); addFieldInit(FD, T, Initializer); } @@ -3411,7 +3442,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // generate required array subscript expressions. void createArrayInit(FieldDecl *FD, QualType T) { const ConstantArrayType *CAT = - SemaRef.getASTContext().getAsConstantArrayType(T); + SemaSYCLRef.getASTContext().getAsConstantArrayType(T); if (!CAT) { addArrayElementInit(FD, T); @@ -3452,7 +3483,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { public: static constexpr const bool VisitInsideSimpleContainers = false; - SyclKernelBodyCreator(Sema &S, SyclKernelDeclCreator &DC, + SyclKernelBodyCreator(SemaSYCL &S, SyclKernelDeclCreator &DC, const CXXRecordDecl *KernelObj, FunctionDecl *KernelCallerFunc, bool IsSIMDKernel, CXXMethodDecl *CallOperator) @@ -3466,13 +3497,13 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { CollectionInitExprs.push_back(createInitListExpr(KernelObj)); annotateHierarchicalParallelismAPICalls(); - Stmt *DS = new (S.Context) DeclStmt(DeclGroupRef(KernelObjClone), - KernelCallerSrcLoc, KernelCallerSrcLoc); + Stmt *DS = new (S.getASTContext()) DeclStmt( + DeclGroupRef(KernelObjClone), KernelCallerSrcLoc, KernelCallerSrcLoc); BodyStmts.push_back(DS); DeclRefExpr *KernelObjCloneRef = DeclRefExpr::Create( - S.Context, NestedNameSpecifierLoc(), KernelCallerSrcLoc, KernelObjClone, - false, DeclarationNameInfo(), QualType(KernelObj->getTypeForDecl(), 0), - VK_LValue); + S.getASTContext(), NestedNameSpecifierLoc(), KernelCallerSrcLoc, + KernelObjClone, false, DeclarationNameInfo(), + QualType(KernelObj->getTypeForDecl(), 0), VK_LValue); MemberExprBases.push_back(KernelObjCloneRef); } @@ -3541,13 +3572,13 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { void handleSyclKernelHandlerType(ParmVarDecl *KernelHandlerArg) { // Create and default initialize local clone of kernel handler - createKernelHandlerClone(SemaRef.getASTContext(), + createKernelHandlerClone(SemaSYCLRef.getASTContext(), DeclCreator.getKernelDecl(), KernelHandlerArg); // Add declaration statement to openCL kernel body - Stmt *DS = - new (SemaRef.Context) DeclStmt(DeclGroupRef(KernelHandlerClone), - KernelCallerSrcLoc, KernelCallerSrcLoc); + Stmt *DS = new (SemaSYCLRef.getASTContext()) + DeclStmt(DeclGroupRef(KernelHandlerClone), KernelCallerSrcLoc, + KernelCallerSrcLoc); BodyStmts.push_back(DS); // Generate @@ -3555,7 +3586,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // call if target does not have native support for specialization constants. // Here, specialization_constants_buffer is the compiler generated kernel // argument of type char*. - if (!isDefaultSPIRArch(SemaRef.Context)) + if (!isDefaultSPIRArch(SemaSYCLRef.getASTContext())) handleSpecialType(KernelHandlerArg->getType()); } @@ -3582,11 +3613,12 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { CXXCastPath BasePath; QualType DerivedTy(RD->getTypeForDecl(), 0); QualType BaseTy = BS.getType(); - SemaRef.CheckDerivedToBaseConversion(DerivedTy, BaseTy, KernelCallerSrcLoc, - SourceRange(), &BasePath, - /*IgnoreBaseAccess*/ true); + SemaSYCLRef.SemaRef.CheckDerivedToBaseConversion( + DerivedTy, BaseTy, KernelCallerSrcLoc, SourceRange(), &BasePath, + /*IgnoreBaseAccess*/ true); auto Cast = ImplicitCastExpr::Create( - SemaRef.Context, BaseTy, CK_DerivedToBase, MemberExprBases.back(), + SemaSYCLRef.getASTContext(), BaseTy, CK_DerivedToBase, + MemberExprBases.back(), /* CXXCastPath=*/&BasePath, VK_LValue, FPOptionsOverride()); MemberExprBases.push_back(Cast); addCollectionInitListExpr(BaseTy->getAsCXXRecordDecl()); @@ -3604,7 +3636,7 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { bool enterArray(FieldDecl *FD, QualType ArrayType, QualType ElementType) final { const ConstantArrayType *CAT = - SemaRef.getASTContext().getAsConstantArrayType(ArrayType); + SemaSYCLRef.getASTContext().getAsConstantArrayType(ArrayType); assert(CAT && "Should only be called on constant-size array."); uint64_t ArraySize = CAT->getSize().getZExtValue(); addCollectionInitListExpr(ArrayType, ArraySize); @@ -3648,12 +3680,12 @@ class SyclKernelBodyCreator : public SyclKernelFieldHandler { // Kernels are only the unnamed-lambda feature if the feature is enabled, AND // the first template argument has been corrected by the library to match the // functor type. -static bool IsSYCLUnnamedKernel(Sema &SemaRef, const FunctionDecl *FD) { - if (!SemaRef.getLangOpts().SYCLUnnamedLambda) +static bool IsSYCLUnnamedKernel(SemaSYCL &SemaSYCLRef, const FunctionDecl *FD) { + if (!SemaSYCLRef.getLangOpts().SYCLUnnamedLambda) return false; QualType FunctorTy = GetSYCLKernelObjectType(FD); - QualType TmplArgTy = calculateKernelNameType(SemaRef.Context, FD); - return SemaRef.Context.hasSameType(FunctorTy, TmplArgTy); + QualType TmplArgTy = calculateKernelNameType(SemaSYCLRef.getASTContext(), FD); + return SemaSYCLRef.getASTContext().hasSameType(FunctorTy, TmplArgTy); } class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { @@ -3666,12 +3698,12 @@ class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { int64_t offsetOf(const FieldDecl *FD, QualType ArgTy) const { return isArrayElement(FD, ArgTy) ? 0 - : SemaRef.getASTContext().getFieldOffset(FD) / 8; + : SemaSYCLRef.getASTContext().getFieldOffset(FD) / 8; } int64_t offsetOf(const CXXRecordDecl *RD, const CXXRecordDecl *Base) const { const ASTRecordLayout &Layout = - SemaRef.getASTContext().getASTRecordLayout(RD); + SemaSYCLRef.getASTContext().getASTRecordLayout(RD); return Layout.getBaseClassOffset(Base).getQuantity(); } @@ -3682,20 +3714,21 @@ class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { void addParam(QualType ArgTy, SYCLIntegrationHeader::kernel_param_kind_t Kind, uint64_t OffsetAdj) { uint64_t Size; - Size = SemaRef.getASTContext().getTypeSizeInChars(ArgTy).getQuantity(); + Size = SemaSYCLRef.getASTContext().getTypeSizeInChars(ArgTy).getQuantity(); Header.addParamDesc(Kind, static_cast(Size), static_cast(CurOffset + OffsetAdj)); } public: static constexpr const bool VisitInsideSimpleContainers = false; - SyclKernelIntHeaderCreator(bool IsESIMD, Sema &S, SYCLIntegrationHeader &H, + SyclKernelIntHeaderCreator(bool IsESIMD, SemaSYCL &S, + SYCLIntegrationHeader &H, const CXXRecordDecl *KernelObj, QualType NameType, FunctionDecl *KernelFunc) : SyclKernelFieldHandler(S), Header(H) { // The header needs to access the kernel object size. - int64_t ObjSize = SemaRef.getASTContext() + int64_t ObjSize = SemaSYCLRef.getASTContext() .getTypeSizeInChars(KernelObj->getTypeForDecl()) .getQuantity(); Header.startKernel(KernelFunc, NameType, KernelObj->getLocation(), IsESIMD, @@ -3732,18 +3765,18 @@ class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { Header.addParamDesc(SYCLIntegrationHeader::kind_accessor, Info, CurOffset + offsetOf(FD, FieldTy)); - } else if (Sema::isSyclType(FieldTy, SYCLTypeAttr::stream)) { + } else if (SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::stream)) { addParam(FD, FieldTy, SYCLIntegrationHeader::kind_stream); - } else if (Sema::isSyclType(FieldTy, SYCLTypeAttr::sampler) || - Sema::isSyclType(FieldTy, SYCLTypeAttr::annotated_ptr) || - Sema::isSyclType(FieldTy, SYCLTypeAttr::annotated_arg)) { + } else if (SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::sampler) || + SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::annotated_ptr) || + SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::annotated_arg)) { CXXMethodDecl *InitMethod = getMethodByName(ClassTy, InitMethodName); assert(InitMethod && "type must have __init method"); const ParmVarDecl *InitArg = InitMethod->getParamDecl(0); assert(InitArg && "Init method must have arguments"); QualType T = InitArg->getType(); SYCLIntegrationHeader::kernel_param_kind_t ParamKind = - Sema::isSyclType(FieldTy, SYCLTypeAttr::sampler) + SemaSYCL::isSyclType(FieldTy, SYCLTypeAttr::sampler) ? SYCLIntegrationHeader::kind_sampler : (T->isPointerType() ? SYCLIntegrationHeader::kind_pointer : SYCLIntegrationHeader::kind_std_layout); @@ -3795,7 +3828,7 @@ class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { // The compiler generated kernel argument used to initialize SYCL 2020 // specialization constants, `specialization_constants_buffer`, should // have corresponding entry in integration header. - ASTContext &Context = SemaRef.getASTContext(); + ASTContext &Context = SemaSYCLRef.getASTContext(); // Offset is zero since kernel_handler argument is not part of // kernel object (i.e. it is not captured) addParam(Context.getPointerType(Context.CharTy), @@ -3832,7 +3865,8 @@ class SyclKernelIntHeaderCreator : public SyclKernelFieldHandler { } bool nextElement(QualType ET, uint64_t Index) final { - int64_t Size = SemaRef.getASTContext().getTypeSizeInChars(ET).getQuantity(); + int64_t Size = + SemaSYCLRef.getASTContext().getTypeSizeInChars(ET).getQuantity(); CurOffset = ArrayBaseOffsets.back() + Size * Index; return true; } @@ -3851,7 +3885,7 @@ class SyclKernelIntFooterCreator : public SyclKernelFieldHandler { SYCLIntegrationFooter &Footer; public: - SyclKernelIntFooterCreator(Sema &S, SYCLIntegrationFooter &F) + SyclKernelIntFooterCreator(SemaSYCL &S, SYCLIntegrationFooter &F) : SyclKernelFieldHandler(S), Footer(F) { (void)Footer; // workaround for unused field warning } @@ -3862,7 +3896,7 @@ class SyclKernelIntFooterCreator : public SyclKernelFieldHandler { class SYCLKernelNameTypeVisitor : public TypeVisitor, public ConstTemplateArgumentVisitor { - Sema &S; + SemaSYCL &S; SourceLocation KernelInvocationFuncLoc; QualType KernelNameType; using InnerTypeVisitor = TypeVisitor; @@ -3877,7 +3911,7 @@ class SYCLKernelNameTypeVisitor } public: - SYCLKernelNameTypeVisitor(Sema &S, SourceLocation KernelInvocationFuncLoc, + SYCLKernelNameTypeVisitor(SemaSYCL &S, SourceLocation KernelInvocationFuncLoc, QualType KernelNameType, bool IsUnnamedKernel) : S(S), KernelInvocationFuncLoc(KernelInvocationFuncLoc), KernelNameType(KernelNameType), IsUnnamedKernel(IsUnnamedKernel) {} @@ -4044,8 +4078,8 @@ class SYCLKernelNameTypeVisitor } }; -void Sema::CheckSYCLKernelCall(FunctionDecl *KernelFunc, - ArrayRef Args) { +void SemaSYCL::CheckSYCLKernelCall(FunctionDecl *KernelFunc, + ArrayRef Args) { QualType KernelNameType = calculateKernelNameType(getASTContext(), KernelFunc); SYCLKernelNameTypeVisitor KernelNameTypeVisitor( @@ -4055,7 +4089,7 @@ void Sema::CheckSYCLKernelCall(FunctionDecl *KernelFunc, // FIXME: In place until the library works around its 'host' invocation // issues. - if (!LangOpts.SYCLIsDevice) + if (!SemaRef.LangOpts.SYCLIsDevice) return; const CXXRecordDecl *KernelObj = @@ -4079,11 +4113,11 @@ void Sema::CheckSYCLKernelCall(FunctionDecl *KernelFunc, QualType KernelParamTy = KernelFunc->getParamDecl(0)->getType(); if (KernelParamTy->isReferenceType()) { // passing by reference, so emit warning if not using SYCL 2020 - if (LangOpts.getSYCLVersion() < LangOptions::SYCL_2020) + if (SemaRef.LangOpts.getSYCLVersion() < LangOptions::SYCL_2020) Diag(KernelFunc->getLocation(), diag::warn_sycl_pass_by_reference_future); } else { // passing by value. emit warning if using SYCL 2020 or greater - if (LangOpts.getSYCLVersion() > LangOptions::SYCL_2017) + if (SemaRef.LangOpts.getSYCLVersion() > LangOptions::SYCL_2017) Diag(KernelFunc->getLocation(), diag::warn_sycl_pass_by_value_deprecated); } @@ -4113,7 +4147,7 @@ void Sema::CheckSYCLKernelCall(FunctionDecl *KernelFunc, // For a wrapped parallel_for, copy attributes from original // kernel to wrapped kernel. -void Sema::copySYCLKernelAttrs(CXXMethodDecl *CallOperator) { +void SemaSYCL::copySYCLKernelAttrs(CXXMethodDecl *CallOperator) { // Get the operator() function of the wrapper. assert(CallOperator && "invalid kernel object"); @@ -4161,7 +4195,7 @@ void Sema::copySYCLKernelAttrs(CXXMethodDecl *CallOperator) { } } -void Sema::SetSYCLKernelNames() { +void SemaSYCL::SetSYCLKernelNames() { std::unique_ptr MangleCtx( getASTContext().createMangleContext()); // We assume the list of KernelDescs is the complete list of kernels needing @@ -4178,9 +4212,10 @@ void Sema::SetSYCLKernelNames() { StableName); // Set name of generated kernel. - Pair.second->setDeclName(&Context.Idents.get(KernelName)); + Pair.second->setDeclName(&getASTContext().Idents.get(KernelName)); // Update the AsmLabel for this generated kernel. - Pair.second->addAttr(AsmLabelAttr::CreateImplicit(Context, KernelName)); + Pair.second->addAttr( + AsmLabelAttr::CreateImplicit(getASTContext(), KernelName)); } } @@ -4206,8 +4241,8 @@ void Sema::SetSYCLKernelNames() { // } // // -void Sema::ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, - MangleContext &MC) { +void SemaSYCL::ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, + MangleContext &MC) { // The first argument to the KernelCallerFunc is the lambda object. const CXXRecordDecl *KernelObj = GetSYCLKernelObjectType(KernelCallerFunc)->getAsCXXRecordDecl(); @@ -4254,7 +4289,8 @@ void Sema::ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, CallOperator); SyclKernelIntHeaderCreator int_header( IsSIMDKernel, *this, getSyclIntegrationHeader(), KernelObj, - calculateKernelNameType(Context, KernelCallerFunc), KernelCallerFunc); + calculateKernelNameType(getASTContext(), KernelCallerFunc), + KernelCallerFunc); SyclKernelIntFooterCreator int_footer(*this, getSyclIntegrationFooter()); SyclOptReportCreator opt_report(*this, kernel_decl, KernelObj->getLocation()); @@ -4317,11 +4353,11 @@ static SourceLocation GetSubGroupLoc(const FunctionDecl *FD) { return SourceLocation{}; } -static void CheckSYCL2020SubGroupSizes(Sema &S, FunctionDecl *SYCLKernel, +static void CheckSYCL2020SubGroupSizes(SemaSYCL &S, FunctionDecl *SYCLKernel, const FunctionDecl *FD) { // If they are the same, no error. - if (CalcEffectiveSubGroup(S.Context, S.getLangOpts(), SYCLKernel) == - CalcEffectiveSubGroup(S.Context, S.getLangOpts(), FD)) + if (CalcEffectiveSubGroup(S.getASTContext(), S.getLangOpts(), SYCLKernel) == + CalcEffectiveSubGroup(S.getASTContext(), S.getLangOpts(), FD)) return; // No need to validate __spirv routines here since they @@ -4370,7 +4406,7 @@ static void CheckSYCL2020SubGroupSizes(Sema &S, FunctionDecl *SYCLKernel, // self-documentation purposes that it would be nice to be able to repeat these // on subsequent functions. static void CheckSYCL2020Attributes( - Sema &S, FunctionDecl *SYCLKernel, FunctionDecl *KernelBody, + SemaSYCL &S, FunctionDecl *SYCLKernel, FunctionDecl *KernelBody, const llvm::SmallPtrSetImpl &CalledFuncs) { if (KernelBody) { @@ -4416,7 +4452,7 @@ static void CheckSYCL2020Attributes( } static void PropagateAndDiagnoseDeviceAttr( - Sema &S, const SingleDeviceFunctionTracker &Tracker, Attr *A, + SemaSYCL &S, const SingleDeviceFunctionTracker &Tracker, Attr *A, FunctionDecl *SYCLKernel, FunctionDecl *KernelBody) { switch (A->getKind()) { case attr::Kind::IntelReqdSubGroupSize: { @@ -4446,9 +4482,9 @@ static void PropagateAndDiagnoseDeviceAttr( case attr::Kind::SYCLReqdWorkGroupSize: { auto *RWGSA = cast(A); if (auto *Existing = SYCLKernel->getAttr()) { - if (S.AnyWorkGroupSizesDiffer(Existing->getXDim(), Existing->getYDim(), - Existing->getZDim(), RWGSA->getXDim(), - RWGSA->getYDim(), RWGSA->getZDim())) { + if (S.SemaRef.AnyWorkGroupSizesDiffer( + Existing->getXDim(), Existing->getYDim(), Existing->getZDim(), + RWGSA->getXDim(), RWGSA->getYDim(), RWGSA->getZDim())) { S.Diag(SYCLKernel->getLocation(), diag::err_conflicting_sycl_kernel_attributes); S.Diag(Existing->getLocation(), diag::note_conflicting_attribute); @@ -4457,7 +4493,7 @@ static void PropagateAndDiagnoseDeviceAttr( } } else if (auto *Existing = SYCLKernel->getAttr()) { - if (S.CheckMaxAllowedWorkGroupSize( + if (S.SemaRef.CheckMaxAllowedWorkGroupSize( RWGSA->getXDim(), RWGSA->getYDim(), RWGSA->getZDim(), Existing->getXDim(), Existing->getYDim(), Existing->getZDim())) { S.Diag(SYCLKernel->getLocation(), @@ -4476,9 +4512,9 @@ static void PropagateAndDiagnoseDeviceAttr( case attr::Kind::SYCLWorkGroupSizeHint: { auto *WGSH = cast(A); if (auto *Existing = SYCLKernel->getAttr()) { - if (S.AnyWorkGroupSizesDiffer(Existing->getXDim(), Existing->getYDim(), - Existing->getZDim(), WGSH->getXDim(), - WGSH->getYDim(), WGSH->getZDim())) { + if (S.SemaRef.AnyWorkGroupSizesDiffer( + Existing->getXDim(), Existing->getYDim(), Existing->getZDim(), + WGSH->getXDim(), WGSH->getYDim(), WGSH->getZDim())) { S.Diag(SYCLKernel->getLocation(), diag::err_conflicting_sycl_kernel_attributes); S.Diag(Existing->getLocation(), diag::note_conflicting_attribute); @@ -4492,7 +4528,7 @@ static void PropagateAndDiagnoseDeviceAttr( case attr::Kind::SYCLIntelMaxWorkGroupSize: { auto *SIMWGSA = cast(A); if (auto *Existing = SYCLKernel->getAttr()) { - if (S.CheckMaxAllowedWorkGroupSize( + if (S.SemaRef.CheckMaxAllowedWorkGroupSize( Existing->getXDim(), Existing->getYDim(), Existing->getZDim(), SIMWGSA->getXDim(), SIMWGSA->getYDim(), SIMWGSA->getZDim())) { S.Diag(SYCLKernel->getLocation(), @@ -4547,7 +4583,7 @@ static void PropagateAndDiagnoseDeviceAttr( } } -void Sema::MarkDevices() { +void SemaSYCL::MarkDevices() { // This Tracker object ensures that the SyclDeviceDecls collection includes // the SYCL_EXTERNAL functions, and manages the diagnostics for all of the // functions in the kernel. @@ -4566,7 +4602,7 @@ void Sema::MarkDevices() { for (auto *A : T.GetCollectedAttributes()) PropagateAndDiagnoseDeviceAttr(*this, T, A, T.GetSYCLKernel(), T.GetKernelBody()); - CheckSYCLAddIRAttributesFunctionAttrConflicts(T.GetSYCLKernel()); + SemaRef.CheckSYCLAddIRAttributesFunctionAttrConflicts(T.GetSYCLKernel()); } } @@ -4575,19 +4611,20 @@ void Sema::MarkDevices() { // ----------------------------------------------------------------------------- Sema::SemaDiagnosticBuilder -Sema::SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID, +SemaSYCL::DiagIfDeviceCode(SourceLocation Loc, unsigned DiagID, DeviceDiagnosticReason Reason) { assert(getLangOpts().SYCLIsDevice && "Should only be called during SYCL compilation"); - FunctionDecl *FD = dyn_cast(getCurLexicalContext()); + FunctionDecl *FD = dyn_cast(SemaRef.getCurLexicalContext()); SemaDiagnosticBuilder::Kind DiagKind = [this, FD, Reason] { if (DiagnosingSYCLKernel) return SemaDiagnosticBuilder::K_ImmediateWithCallStack; if (!FD) return SemaDiagnosticBuilder::K_Nop; - if (getEmissionStatus(FD) == Sema::FunctionEmissionStatus::Emitted) { + if (SemaRef.getEmissionStatus(FD) == + Sema::FunctionEmissionStatus::Emitted) { // Skip the diagnostic if we know it won't be emitted. - if ((getEmissionReason(FD) & Reason) == + if ((SemaRef.getEmissionReason(FD) & Reason) == Sema::DeviceDiagnosticReason::None) return SemaDiagnosticBuilder::K_Nop; @@ -4595,10 +4632,10 @@ Sema::SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID, } return SemaDiagnosticBuilder::K_Deferred; }(); - return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, *this, Reason); + return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, SemaRef, Reason); } -void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt, +void SemaSYCL::deepTypeCheckForDevice(SourceLocation UsedAt, llvm::DenseSet Visited, ValueDecl *DeclToCheck) { assert(getLangOpts().SYCLIsDevice && @@ -4610,18 +4647,18 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt, auto Check = [&](QualType TypeToCheck, const ValueDecl *D) { bool ErrorFound = false; if (isZeroSizedArray(*this, TypeToCheck)) { - SYCLDiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1; + DiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1; ErrorFound = true; } // Checks for other types can also be done here. if (ErrorFound) { if (NeedToEmitNotes) { if (auto *FD = dyn_cast(D)) - SYCLDiagIfDeviceCode(FD->getLocation(), - diag::note_illegal_field_declared_here) + DiagIfDeviceCode(FD->getLocation(), + diag::note_illegal_field_declared_here) << FD->getType()->isPointerType() << FD->getType(); else - SYCLDiagIfDeviceCode(D->getLocation(), diag::note_declared_at); + DiagIfDeviceCode(D->getLocation(), diag::note_declared_at); } } @@ -4652,8 +4689,8 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt, auto EmitHistory = [&]() { // The first element is always nullptr. for (uint64_t Index = 1; Index < History.size(); ++Index) { - SYCLDiagIfDeviceCode(History[Index]->getLocation(), - diag::note_within_field_of_type) + DiagIfDeviceCode(History[Index]->getLocation(), + diag::note_within_field_of_type) << History[Index]->getType(); } }; @@ -4690,10 +4727,10 @@ void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt, } while (!StackForRecursion.empty()); } -void Sema::finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller, - const FunctionDecl *Callee, - SourceLocation Loc, - DeviceDiagnosticReason Reason) { +void SemaSYCL::finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller, + const FunctionDecl *Callee, + SourceLocation Loc, + DeviceDiagnosticReason Reason) { Callee = Callee->getMostRecentDecl(); // If the reason for the emission of this diagnostic is not SYCL-specific, @@ -4716,14 +4753,14 @@ void Sema::finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller, // this undefined function is used to trigger a compiling error. if (!Callee->isDefined() && !Callee->getBuiltinID() && !Callee->isReplaceableGlobalAllocationFunction() && - !isSYCLUndefinedAllowed(Callee, getSourceManager())) { - Diag(Loc, diag::err_sycl_restrict) << Sema::KernelCallUndefinedFunction; + !isSYCLUndefinedAllowed(Callee, SemaRef.getSourceManager())) { + Diag(Loc, diag::err_sycl_restrict) << SemaSYCL::KernelCallUndefinedFunction; Diag(Callee->getLocation(), diag::note_previous_decl) << Callee; Diag(Caller->getLocation(), diag::note_called_by) << Caller; } } -bool Sema::checkAllowedSYCLInitializer(VarDecl *VD) { +bool SemaSYCL::checkAllowedSYCLInitializer(VarDecl *VD) { assert(getLangOpts().SYCLIsDevice && "Should only be called during SYCL compilation"); @@ -4732,8 +4769,8 @@ bool Sema::checkAllowedSYCLInitializer(VarDecl *VD) { const Expr *Init = VD->getInit(); bool ValueDependent = Init && Init->isValueDependent(); - bool isConstantInit = - Init && !ValueDependent && Init->isConstantInitializer(Context, false); + bool isConstantInit = Init && !ValueDependent && + Init->isConstantInitializer(getASTContext(), false); if (!VD->isConstexpr() && Init && !ValueDependent && !isConstantInit) return false; @@ -5317,8 +5354,9 @@ void SYCLIntegrationHeader::emit(raw_ostream &O) { for (const KernelDesc &K : KernelDescs) { const size_t N = K.Params.size(); - PresumedLoc PLoc = S.Context.getSourceManager().getPresumedLoc( - S.Context.getSourceManager() + PresumedLoc PLoc = S.getASTContext().getSourceManager().getPresumedLoc( + S.getASTContext() + .getSourceManager() .getExpansionRange(K.KernelLocation) .getEnd()); if (K.IsUnnamedKernel) { @@ -5383,10 +5421,10 @@ void SYCLIntegrationHeader::emit(raw_ostream &O) { O << " return 0;\n"; O << "#endif\n"; O << " }\n"; - StringRef ReturnType = - (S.Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong) - ? "long" - : "long long"; + StringRef ReturnType = (S.getASTContext().getTargetInfo().getInt64Type() == + TargetInfo::SignedLong) + ? "long" + : "long long"; O << " // Returns the size of the kernel object in bytes.\n"; O << " __SYCL_DLL_LOCAL\n"; O << " static constexpr " << ReturnType << " getKernelSize() { return " @@ -5445,7 +5483,7 @@ void SYCLIntegrationHeader::addSpecConstant(StringRef IDName, QualType IDType) { SpecConsts.emplace_back(std::make_pair(IDType, IDName.str())); } -SYCLIntegrationHeader::SYCLIntegrationHeader(Sema &S) : S(S) {} +SYCLIntegrationHeader::SYCLIntegrationHeader(SemaSYCL &S) : S(S) {} void SYCLIntegrationFooter::addVarDecl(const VarDecl *VD) { // Variable template declaration can result in an error case of 'nullptr' @@ -5462,8 +5500,8 @@ void SYCLIntegrationFooter::addVarDecl(const VarDecl *VD) { if (isa(VD)) return; // Step 1: ensure that this is of the correct type template specialization. - if (!Sema::isSyclType(VD->getType(), SYCLTypeAttr::specialization_id) && - !Sema::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe) && + if (!SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::specialization_id) && + !SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe) && !S.isTypeDecoratedWithDeclAttribute( VD->getType())) { // Handle the case where this could be a deduced type, such as a deduction @@ -5659,8 +5697,8 @@ bool SYCLIntegrationFooter::emit(raw_ostream &OS) { // Skip if this isn't a SpecIdType, DeviceGlobal, or HostPipe. This // can happen if it was a deduced type. - if (!Sema::isSyclType(VD->getType(), SYCLTypeAttr::specialization_id) && - !Sema::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe) && + if (!SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::specialization_id) && + !SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe) && !S.isTypeDecoratedWithDeclAttribute( VD->getType())) continue; @@ -5691,7 +5729,7 @@ bool SYCLIntegrationFooter::emit(raw_ostream &OS) { DeviceGlobOS << SYCLUniqueStableIdExpr::ComputeName(S.getASTContext(), VD); DeviceGlobOS << "\");\n"; - } else if (Sema::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe)) { + } else if (SemaSYCL::isSyclType(VD->getType(), SYCLTypeAttr::host_pipe)) { HostPipesEmitted = true; HostPipesOS << "host_pipe_map::add("; HostPipesOS << "(void *)&"; @@ -5766,3 +5804,64 @@ bool SYCLIntegrationFooter::emit(raw_ostream &OS) { return true; } + +ExprResult SemaSYCL::BuildUniqueStableIdExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, Expr *E) { + if (!E->isInstantiationDependent()) { + // Special handling to get us better error messages for a member variable. + if (auto *ME = dyn_cast(E->IgnoreUnlessSpelledInSource())) { + if (isa(ME->getMemberDecl())) + Diag(E->getExprLoc(), diag::err_unique_stable_id_global_storage); + else + Diag(E->getExprLoc(), diag::err_unique_stable_id_expected_var); + return ExprError(); + } + + auto *DRE = dyn_cast(E->IgnoreUnlessSpelledInSource()); + + if (!DRE || !isa_and_nonnull(DRE->getDecl())) { + Diag(E->getExprLoc(), diag::err_unique_stable_id_expected_var); + return ExprError(); + } + + auto *Var = cast(DRE->getDecl()); + + if (!Var->hasGlobalStorage()) { + Diag(E->getExprLoc(), diag::err_unique_stable_id_global_storage); + return ExprError(); + } + } + + return SYCLUniqueStableIdExpr::Create(getASTContext(), OpLoc, LParen, RParen, + E); +} + +ExprResult SemaSYCL::ActOnUniqueStableIdExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, Expr *E) { + return BuildUniqueStableIdExpr(OpLoc, LParen, RParen, E); +} + +ExprResult SemaSYCL::BuildUniqueStableNameExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, + TypeSourceInfo *TSI) { + return SYCLUniqueStableNameExpr::Create(getASTContext(), OpLoc, LParen, + RParen, TSI); +} + +ExprResult SemaSYCL::ActOnUniqueStableNameExpr(SourceLocation OpLoc, + SourceLocation LParen, + SourceLocation RParen, + ParsedType ParsedTy) { + TypeSourceInfo *TSI = nullptr; + QualType Ty = SemaRef.GetTypeFromParser(ParsedTy, &TSI); + + if (Ty.isNull()) + return ExprError(); + if (!TSI) + TSI = getASTContext().getTrivialTypeSourceInfo(Ty, LParen); + + return BuildUniqueStableNameExpr(OpLoc, LParen, RParen, TSI); +} diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index a9711a7397bc7..a4c28adb7153f 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -33,10 +33,14 @@ #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/SemaOpenMP.h" +#include "clang/Sema/SemaSYCL.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" @@ -2275,11 +2279,9 @@ Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) { // Otherwise, if we have any useful type information, check that // the type declares the appropriate method. } else if (iface || !objectType->qual_empty()) { - IdentifierInfo *selectorIdents[] = { - &Context.Idents.get("countByEnumeratingWithState"), - &Context.Idents.get("objects"), - &Context.Idents.get("count") - }; + const IdentifierInfo *selectorIdents[] = { + &Context.Idents.get("countByEnumeratingWithState"), + &Context.Idents.get("objects"), &Context.Idents.get("count")}; Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]); ObjCMethodDecl *method = nullptr; @@ -3097,7 +3099,7 @@ StmtResult Sema::BuildCXXForRangeStmt( // In OpenMP loop region loop control variable must be private. Perform // analysis of first part (if any). if (getLangOpts().OpenMP >= 50 && BeginDeclStmt.isUsable()) - ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get()); + OpenMP().ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get()); return new (Context) CXXForRangeStmt( InitStmt, RangeDS, cast_or_null(BeginDeclStmt.get()), @@ -4575,13 +4577,13 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, // Exceptions aren't allowed in CUDA device code. if (getLangOpts().CUDA) - CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions) - << "try" << CurrentCUDATarget(); + CUDA().DiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions) + << "try" << llvm::to_underlying(CUDA().CurrentTarget()); // Exceptions aren't allowed in SYCL device code. if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(TryLoc, diag::err_sycl_restrict) - << KernelUseExceptions; + SYCL().DiagIfDeviceCode(TryLoc, diag::err_sycl_restrict) + << SemaSYCL::KernelUseExceptions; if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try"; @@ -4693,8 +4695,8 @@ StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc, // Exceptions aren't allowed in SYCL device code. if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(TryLoc, diag::err_sycl_restrict) - << KernelUseExceptions; + SYCL().DiagIfDeviceCode(TryLoc, diag::err_sycl_restrict) + << SemaSYCL::KernelUseExceptions; FSI->setHasSEHTry(TryLoc); @@ -4712,7 +4714,7 @@ StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc, // Reject __try on unsupported targets. if (!Context.getTargetInfo().isSEHTrySupported()) { if (getLangOpts().SYCLIsDevice) - SYCLDiagIfDeviceCode(TryLoc, diag::err_seh_try_unsupported); + SYCL().DiagIfDeviceCode(TryLoc, diag::err_seh_try_unsupported); else Diag(TryLoc, diag::err_seh_try_unsupported); } @@ -4836,7 +4838,8 @@ buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI, assert(Cap.isVariableCapture() && "unknown kind of capture"); if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) - S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel); + S.OpenMP().setOpenMPCaptureKind(Field, Cap.getVariable(), + RSI->OpenMPLevel); Captures.push_back(CapturedStmt::Capture( Cap.getLocation(), diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index 2013799b5eb81..f4b6e1ceb6f02 100644 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -33,6 +33,7 @@ #include "clang/Sema/Overload.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/SemaCUDA.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateDeduction.h" @@ -970,9 +971,10 @@ void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn, static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S, SourceLocation Loc, - IdentifierInfo *Name) { - NamedDecl *PrevDecl = SemaRef.LookupSingleName( - S, Name, Loc, Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration); + const IdentifierInfo *Name) { + NamedDecl *PrevDecl = + SemaRef.LookupSingleName(S, Name, Loc, Sema::LookupOrdinaryName, + RedeclarationKind::ForVisibleRedeclaration); if (PrevDecl && PrevDecl->isTemplateParameter()) SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl); } @@ -1578,7 +1580,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, CheckFunctionOrTemplateParamDeclarator(S, D); - IdentifierInfo *ParamName = D.getIdentifier(); + const IdentifierInfo *ParamName = D.getIdentifier(); bool IsParameterPack = D.hasEllipsis(); NonTypeTemplateParmDecl *Param = NonTypeTemplateParmDecl::Create( Context, Context.getTranslationUnitDecl(), D.getBeginLoc(), @@ -1630,26 +1632,20 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, /// ActOnTemplateTemplateParameter - Called when a C++ template template /// parameter (e.g. T in template