diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index f0d0ee554f12b..2b2ccac2972bf 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -1195,6 +1195,12 @@ std::optional isImpliedByDomCondition(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS, const Instruction *ContextI, const DataLayout &DL); + +// Call \p InsertAffected on all Values whose known bits / value may be affected +// by the condition \p Cond. Used by AssumptionCache and DomConditionCache. +void findValuesAffectedByCondition(Value *Cond, bool IsAssume, + function_ref InsertAffected); + } // end namespace llvm #endif // LLVM_ANALYSIS_VALUETRACKING_H diff --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp index 1b7277df0e0cd..9c86ef13f6df0 100644 --- a/llvm/lib/Analysis/AssumptionCache.cpp +++ b/llvm/lib/Analysis/AssumptionCache.cpp @@ -61,19 +61,13 @@ findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, // Note: This code must be kept in-sync with the code in // computeKnownBitsFromAssume in ValueTracking. - auto AddAffected = [&Affected](Value *V, unsigned Idx = - AssumptionCache::ExprResultIdx) { - if (isa(V) || isa(V)) { + auto InsertAffected = [&Affected](Value *V) { + Affected.push_back({V, AssumptionCache::ExprResultIdx}); + }; + + auto AddAffectedVal = [&Affected](Value *V, unsigned Idx) { + if (isa(V) || isa(V) || isa(V)) { Affected.push_back({V, Idx}); - } else if (auto *I = dyn_cast(V)) { - Affected.push_back({I, Idx}); - - // Peek through unary operators to find the source of the condition. - Value *Op; - if (match(I, m_PtrToInt(m_Value(Op)))) { - if (isa(Op) || isa(Op)) - Affected.push_back({Op, Idx}); - } } }; @@ -82,64 +76,23 @@ findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, if (Bundle.getTagName() == "separate_storage") { assert(Bundle.Inputs.size() == 2 && "separate_storage must have two args"); - AddAffected(getUnderlyingObject(Bundle.Inputs[0]), Idx); - AddAffected(getUnderlyingObject(Bundle.Inputs[1]), Idx); + AddAffectedVal(getUnderlyingObject(Bundle.Inputs[0]), Idx); + AddAffectedVal(getUnderlyingObject(Bundle.Inputs[1]), Idx); } else if (Bundle.Inputs.size() > ABA_WasOn && Bundle.getTagName() != IgnoreBundleTag) - AddAffected(Bundle.Inputs[ABA_WasOn], Idx); + AddAffectedVal(Bundle.Inputs[ABA_WasOn], Idx); } - Value *Cond = CI->getArgOperand(0), *A, *B; - AddAffected(Cond); - if (match(Cond, m_Not(m_Value(A)))) - AddAffected(A); - - CmpInst::Predicate Pred; - if (match(Cond, m_Cmp(Pred, m_Value(A), m_Value(B)))) { - AddAffected(A); - AddAffected(B); - - if (Pred == ICmpInst::ICMP_EQ) { - if (match(B, m_ConstantInt())) { - Value *X; - // (X & C) or (X | C) or (X ^ C). - // (X << C) or (X >>_s C) or (X >>_u C). - if (match(A, m_BitwiseLogic(m_Value(X), m_ConstantInt())) || - match(A, m_Shift(m_Value(X), m_ConstantInt()))) - AddAffected(X); - } - } else if (Pred == ICmpInst::ICMP_NE) { - Value *X; - // Handle (X & pow2 != 0). - if (match(A, m_And(m_Value(X), m_Power2())) && match(B, m_Zero())) - AddAffected(X); - } else if (Pred == ICmpInst::ICMP_ULT) { - Value *X; - // Handle (A + C1) u< C2, which is the canonical form of A > C3 && A < C4, - // and recognized by LVI at least. - if (match(A, m_Add(m_Value(X), m_ConstantInt())) && - match(B, m_ConstantInt())) - AddAffected(X); - } else if (CmpInst::isFPPredicate(Pred)) { - // fcmp fneg(x), y - // fcmp fabs(x), y - // fcmp fneg(fabs(x)), y - if (match(A, m_FNeg(m_Value(A)))) - AddAffected(A); - if (match(A, m_FAbs(m_Value(A)))) - AddAffected(A); - } - } else if (match(Cond, m_Intrinsic(m_Value(A), - m_Value(B)))) { - AddAffected(A); - } + Value *Cond = CI->getArgOperand(0); + findValuesAffectedByCondition(Cond, /*IsAssume=*/true, InsertAffected); if (TTI) { const Value *Ptr; unsigned AS; std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(Cond); if (Ptr) - AddAffected(const_cast(Ptr->stripInBoundsOffsets())); + AddAffectedVal(const_cast(Ptr->stripInBoundsOffsets()), + AssumptionCache::ExprResultIdx); } } diff --git a/llvm/lib/Analysis/DomConditionCache.cpp b/llvm/lib/Analysis/DomConditionCache.cpp index da05e02b4b57f..66bd15b47901d 100644 --- a/llvm/lib/Analysis/DomConditionCache.cpp +++ b/llvm/lib/Analysis/DomConditionCache.cpp @@ -7,75 +7,13 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/DomConditionCache.h" -#include "llvm/IR/PatternMatch.h" - +#include "llvm/Analysis/ValueTracking.h" using namespace llvm; -using namespace llvm::PatternMatch; -// TODO: This code is very similar to findAffectedValues() in -// AssumptionCache, but currently specialized to just the patterns that -// computeKnownBits() supports, and without the notion of result elem indices -// that are AC specific. Deduplicate this code once we have a clearer picture -// of how much they can be shared. static void findAffectedValues(Value *Cond, SmallVectorImpl &Affected) { - auto AddAffected = [&Affected](Value *V) { - if (isa(V) || isa(V)) { - Affected.push_back(V); - } else if (auto *I = dyn_cast(V)) { - Affected.push_back(I); - - // Peek through unary operators to find the source of the condition. - Value *Op; - if (match(I, m_PtrToInt(m_Value(Op)))) { - if (isa(Op) || isa(Op)) - Affected.push_back(Op); - } - } - }; - - SmallVector Worklist; - SmallPtrSet Visited; - Worklist.push_back(Cond); - while (!Worklist.empty()) { - Value *V = Worklist.pop_back_val(); - if (!Visited.insert(V).second) - continue; - - CmpInst::Predicate Pred; - Value *A, *B; - if (match(V, m_LogicalOp(m_Value(A), m_Value(B)))) { - Worklist.push_back(A); - Worklist.push_back(B); - } else if (match(V, m_ICmp(Pred, m_Value(A), m_Constant()))) { - AddAffected(A); - - if (ICmpInst::isEquality(Pred)) { - Value *X; - // (X & C) or (X | C) or (X ^ C). - // (X << C) or (X >>_s C) or (X >>_u C). - if (match(A, m_BitwiseLogic(m_Value(X), m_ConstantInt())) || - match(A, m_Shift(m_Value(X), m_ConstantInt()))) - AddAffected(X); - } else { - Value *X; - // Handle (A + C1) u< C2, which is the canonical form of - // A > C3 && A < C4. - if (match(A, m_Add(m_Value(X), m_ConstantInt()))) - AddAffected(X); - // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported by - // computeKnownFPClass(). - if ((Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGT) && - match(A, m_ElementWiseBitCast(m_Value(X)))) - Affected.push_back(X); - } - } else if (match(Cond, m_CombineOr(m_FCmp(Pred, m_Value(A), m_Constant()), - m_Intrinsic( - m_Value(A), m_Constant())))) { - // Handle patterns that computeKnownFPClass() support. - AddAffected(A); - } - } + auto InsertAffected = [&Affected](Value *V) { Affected.push_back(V); }; + findValuesAffectedByCondition(Cond, /*IsAssume=*/false, InsertAffected); } void DomConditionCache::registerBranch(BranchInst *BI) { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index e591ac504e9f0..69b2710420391 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -9099,3 +9099,108 @@ ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, return CR; } + +static void +addValueAffectedByCondition(Value *V, + function_ref InsertAffected) { + assert(V != nullptr); + if (isa(V) || isa(V)) { + InsertAffected(V); + } else if (auto *I = dyn_cast(V)) { + InsertAffected(V); + + // Peek through unary operators to find the source of the condition. + Value *Op; + if (match(I, m_PtrToInt(m_Value(Op)))) { + if (isa(Op) || isa(Op)) + InsertAffected(Op); + } + } +} + +void llvm::findValuesAffectedByCondition( + Value *Cond, bool IsAssume, function_ref InsertAffected) { + auto AddAffected = [&InsertAffected](Value *V) { + addValueAffectedByCondition(V, InsertAffected); + }; + + auto AddCmpOperands = [&AddAffected, IsAssume](Value *LHS, Value *RHS) { + if (IsAssume) { + AddAffected(LHS); + AddAffected(RHS); + } else if (match(RHS, m_Constant())) + AddAffected(LHS); + }; + + SmallVector Worklist; + SmallPtrSet Visited; + Worklist.push_back(Cond); + while (!Worklist.empty()) { + Value *V = Worklist.pop_back_val(); + if (!Visited.insert(V).second) + continue; + + CmpInst::Predicate Pred; + Value *A, *B, *X; + + if (IsAssume) { + AddAffected(V); + if (match(V, m_Not(m_Value(X)))) + AddAffected(X); + } + + if (match(V, m_LogicalOp(m_Value(A), m_Value(B)))) { + // assume(A && B) is split to -> assume(A); assume(B); + // assume(!(A || B)) is split to -> assume(!A); assume(!B); + // Finally, assume(A || B) / assume(!(A && B)) generally don't provide + // enough information to be worth handling (intersection of information as + // opposed to union). + if (!IsAssume) { + Worklist.push_back(A); + Worklist.push_back(B); + } + } else if (match(V, m_ICmp(Pred, m_Value(A), m_Value(B)))) { + AddCmpOperands(A, B); + + if (ICmpInst::isEquality(Pred)) { + if (match(B, m_ConstantInt())) { + // (X & C) or (X | C) or (X ^ C). + // (X << C) or (X >>_s C) or (X >>_u C). + if (match(A, m_BitwiseLogic(m_Value(X), m_ConstantInt())) || + match(A, m_Shift(m_Value(X), m_ConstantInt()))) + AddAffected(X); + } + } else { + // Handle (A + C1) u< C2, which is the canonical form of + // A > C3 && A < C4. + if (match(A, m_Add(m_Value(X), m_ConstantInt())) && + match(B, m_ConstantInt())) + AddAffected(X); + + // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported + // by computeKnownFPClass(). + if (match(A, m_ElementWiseBitCast(m_Value(X)))) { + if (Pred == ICmpInst::ICMP_SLT && match(B, m_Zero())) + InsertAffected(X); + else if (Pred == ICmpInst::ICMP_SGT && match(B, m_AllOnes())) + InsertAffected(X); + } + } + } else if (match(Cond, m_FCmp(Pred, m_Value(A), m_Value(B)))) { + AddCmpOperands(A, B); + + // fcmp fneg(x), y + // fcmp fabs(x), y + // fcmp fneg(fabs(x)), y + if (match(A, m_FNeg(m_Value(A)))) + AddAffected(A); + if (match(A, m_FAbs(m_Value(A)))) + AddAffected(A); + + } else if (match(V, m_Intrinsic(m_Value(A), + m_Value()))) { + // Handle patterns that computeKnownFPClass() support. + AddAffected(A); + } + } +} diff --git a/llvm/test/Analysis/ValueTracking/numsignbits-from-assume.ll b/llvm/test/Analysis/ValueTracking/numsignbits-from-assume.ll index 00c66eeb59957..95ac98532da62 100644 --- a/llvm/test/Analysis/ValueTracking/numsignbits-from-assume.ll +++ b/llvm/test/Analysis/ValueTracking/numsignbits-from-assume.ll @@ -51,7 +51,7 @@ define i32 @computeNumSignBits_sub1(i32 %in) { define i32 @computeNumSignBits_sub2(i32 %in) { ; CHECK-LABEL: @computeNumSignBits_sub2( -; CHECK-NEXT: [[SUB:%.*]] = add i32 [[IN:%.*]], -1 +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[IN:%.*]], -1 ; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[SUB]], 43 ; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) ; CHECK-NEXT: [[SH:%.*]] = shl nuw nsw i32 [[SUB]], 3 diff --git a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll index d40cd7fd503ec..d6706d76056ee 100644 --- a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll +++ b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll @@ -185,10 +185,9 @@ define i1 @test8(float %x) { ; CHECK-NEXT: [[COND:%.*]] = fcmp oeq float [[ABS]], 0x7FF0000000000000 ; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; CHECK: if.then: -; CHECK-NEXT: [[RET1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 575) -; CHECK-NEXT: ret i1 [[RET1]] +; CHECK-NEXT: ret i1 true ; CHECK: if.else: -; CHECK-NEXT: [[RET2:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 575) +; CHECK-NEXT: [[RET2:%.*]] = call i1 @llvm.is.fpclass.f32(float [[X]], i32 59) ; CHECK-NEXT: ret i1 [[RET2]] ; %abs = call float @llvm.fabs.f32(float %x)