diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp index 2554eba74301e..1c6bdc820f75e 100644 --- a/llvm/lib/Support/Mustache.cpp +++ b/llvm/lib/Support/Mustache.cpp @@ -619,8 +619,7 @@ void ASTNode::render(const json::Value &Data, raw_ostream &OS) { return; } case InvertSection: { - bool IsLambda = - SectionLambdas.find(AccessorValue[0]) != SectionLambdas.end(); + bool IsLambda = SectionLambdas.contains(AccessorValue[0]); if (!isFalsey(Context) || IsLambda) return; renderChild(Context, OS); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index d99a30083fb88..c3e411e76efd9 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -3798,7 +3798,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCostHelper( return false; for (auto &[S, U, L] : ScalarUserAndIdx) { for (auto *U : S->users()) { - if (UserToExtractIdx.find(U) != UserToExtractIdx.end()) { + if (UserToExtractIdx.contains(U)) { auto *FMul = cast(U); auto *Op0 = FMul->getOperand(0); auto *Op1 = FMul->getOperand(1); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index 88acfe13357dc..34ba53cbe0f9e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -1016,7 +1016,7 @@ class AMDGPULowerModuleLDS { auto NewGV = uniquifyGVPerKernel(M, GV, F); Changed |= (NewGV != GV); int BarId = (NumAbsolutes + 1); - if (Kernel2BarId.find(F) != Kernel2BarId.end()) { + if (Kernel2BarId.contains(F)) { BarId = (Kernel2BarId[F] + 1); } Kernel2BarId[F] = BarId; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp index ca093be61d113..9a5ffe1c2dad8 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp @@ -1256,7 +1256,7 @@ bool AMDGPUSwLowerLDS::run() { } for (Function *Func : FuncLDSAccessInfo.NonKernelsWithLDSArgument) { auto &K = FuncLDSAccessInfo.NonKernelToLDSAccessMap; - if (K.find(Func) != K.end()) + if (K.contains(Func)) continue; SetVector Vec; lowerNonKernelLDSAccesses(Func, Vec, NKLDSParams); diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h index e3da8d3005629..d5d4749242d00 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h @@ -208,7 +208,7 @@ class RegionPressureMap { // Retrieve the LiveReg for a given RegionIdx GCNRPTracker::LiveRegSet &getLiveRegsForRegionIdx(unsigned RegionIdx) { - assert(IdxToInstruction.find(RegionIdx) != IdxToInstruction.end()); + assert(IdxToInstruction.contains(RegionIdx)); MachineInstr *Key = IdxToInstruction[RegionIdx]; return RegionLiveRegMap[Key]; } diff --git a/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp index 0d7bfe3d1161b..41cd68c0c0e72 100644 --- a/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -533,7 +533,7 @@ bool HexagonOptAddrMode::processAddBases(NodeAddr AddSN, [](const MachineInstr *MI, const DenseSet &ProcessedAddiInsts) -> bool { // If we've already processed this Addi, just return - if (ProcessedAddiInsts.find(MI) != ProcessedAddiInsts.end()) { + if (ProcessedAddiInsts.contains(MI)) { LLVM_DEBUG(dbgs() << "\t\t\tAddi already found in ProcessedAddiInsts: " << *MI << "\n\t\t\tSkipping..."); return true; diff --git a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h index 5e8e1c55d91c6..9c9c099bc5fc4 100644 --- a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h +++ b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h @@ -216,7 +216,7 @@ class SPIRVIRMapping { erase(MI); return nullptr; } - assert(Defs.find(MI) != Defs.end() && Defs.find(MI)->second == HandleMF); + assert(Defs.contains(MI) && Defs.find(MI)->second == HandleMF); return MI; } Register find(SPIRV::IRHandle Handle, const MachineFunction *MF) { diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp index d396dbf75eebc..44df2239c475a 100644 --- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp @@ -439,9 +439,7 @@ class ValueToShadowMap { // Returns true if the value already has a shadow (including if the value is a // constant). If true, calling getShadow() is valid. - bool hasShadow(Value *V) const { - return isa(V) || (Map.find(V) != Map.end()); - } + bool hasShadow(Value *V) const { return isa(V) || Map.contains(V); } // Returns the shadow value for a given value. Asserts that the value has // a shadow value. Lazily creates shadows for constant values. diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index 4ee34c3c88e73..1aceb5098eafb 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -1036,7 +1036,7 @@ class LowerMatrixIntrinsics { for (Instruction &I : *BB) { if (match(&I, m_Intrinsic())) LifetimeEnds.push_back(cast(&I)); - if (ShapeMap.find(&I) == ShapeMap.end()) + if (!ShapeMap.contains(&I)) continue; if (match(&I, m_Intrinsic())) MaybeFusableInsts.push_back(cast(&I)); @@ -1354,7 +1354,7 @@ class LowerMatrixIntrinsics { ToRemove.push_back(Inst); Value *Flattened = nullptr; for (Use &U : llvm::make_early_inc_range(Inst->uses())) { - if (ShapeMap.find(U.getUser()) == ShapeMap.end()) { + if (!ShapeMap.contains(U.getUser())) { if (!Flattened) Flattened = Matrix.embedInVector(Builder); U.set(Flattened); @@ -1401,7 +1401,7 @@ class LowerMatrixIntrinsics { // the returned cost is < 0, the argument is cheaper to use in the // dot-product lowering. auto GetCostForArg = [this, &CanBeFlattened](Value *Op, unsigned N) { - if (ShapeMap.find(Op) == ShapeMap.end()) + if (!ShapeMap.contains(Op)) return InstructionCost::getInvalid(); if (!isa(Op)) @@ -1420,7 +1420,7 @@ class LowerMatrixIntrinsics { return EmbedCost; } - if (match(Op, m_BinOp()) && ShapeMap.find(Op) != ShapeMap.end()) { + if (match(Op, m_BinOp()) && ShapeMap.contains(Op)) { InstructionCost OriginalCost = TTI.getArithmeticInstrCost(cast(Op)->getOpcode(), EltTy) *