-
Notifications
You must be signed in to change notification settings - Fork 13.4k
[InstrProf] Single byte counters in coverage #75425
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[InstrProf] Single byte counters in coverage #75425
Conversation
@llvm/pr-subscribers-pgo @llvm/pr-subscribers-clang-codegen Author: None (gulfemsavrun) ChangesThis patch inserts 1-byte counters instead of an 8-byte counters into llvm profiles for source-based code coverage. The origial idea was proposed as block-cov for PGO, and this patch repurposes that idea for coverage: https://groups.google.com/g/llvm-dev/c/r03Z6JoN7d4 The current 8-byte counters mechanism add counters to minimal regions, and infer the counters in the remaining regions via adding or subtracting counters. For example, it infers the counter in the if.else region by subtracting the counters between if.entry and if.then regions in an if statement. Whenever there is a control-flow merge, it adds the counters from all the incoming regions. However, we are not going to be Patch is 56.62 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/75425.diff 18 Files Affected:
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 810b28f25fa18b..d794afa66c2153 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -33,6 +33,10 @@ using namespace CodeGen;
// Aggregate Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
@@ -1275,7 +1279,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getTrueExpr());
+ else
+ CGF.incrementProfileCounter(E);
Visit(E->getTrueExpr());
eval.end(CGF);
@@ -1290,6 +1297,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getFalseExpr());
Visit(E->getFalseExpr());
eval.end(CGF);
@@ -1298,6 +1307,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
E->getType());
CGF.EmitBlock(ContBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
}
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index e532794b71bdb4..d1d66089280fbe 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -28,6 +28,10 @@ using namespace CodeGen;
// Complex Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
/// Return the complex type that we are meant to emit.
@@ -1319,7 +1323,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getTrueExpr());
+ else
+ CGF.incrementProfileCounter(E);
+
ComplexPairTy LHS = Visit(E->getTrueExpr());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
@@ -1327,9 +1335,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getFalseExpr());
ComplexPairTy RHS = Visit(E->getFalseExpr());
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBlock(ContBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
eval.end(CGF);
// Create a PHI node for the real part.
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 41ad2ddac30d2d..3fdd614ad86af6 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -52,6 +52,10 @@ using llvm::Value;
// Scalar Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
namespace {
/// Determine whether the given binary operation may overflow.
@@ -4807,8 +4811,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// If the dead side doesn't have labels we need, just emit the Live part.
if (!CGF.ContainsLabel(dead)) {
- if (CondExprBool)
+ if (CondExprBool) {
+ if (llvm::EnableSingleByteCoverage) {
+ CGF.incrementProfileCounter(lhsExpr);
+ CGF.incrementProfileCounter(rhsExpr);
+ }
CGF.incrementProfileCounter(E);
+ }
Value *Result = Visit(live);
// If the live part is a throw expression, it acts like it has a void
@@ -4887,7 +4896,12 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
- CGF.incrementProfileCounter(E, StepV);
+ if (llvm::EnableSingleByteCoverage) {
+ CGF.incrementProfileCounter(lhsExpr);
+ CGF.incrementProfileCounter(rhsExpr);
+ CGF.incrementProfileCounter(E);
+ } else
+ CGF.incrementProfileCounter(E, StepV);
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
@@ -4908,7 +4922,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
CGF.getProfileCount(lhsExpr));
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(lhsExpr);
+ else
+ CGF.incrementProfileCounter(E);
+
eval.begin(CGF);
Value *LHS = Visit(lhsExpr);
eval.end(CGF);
@@ -4917,6 +4935,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
Builder.CreateBr(ContBlock);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(rhsExpr);
eval.begin(CGF);
Value *RHS = Visit(rhsExpr);
eval.end(CGF);
@@ -4934,6 +4954,12 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
PN->addIncoming(LHS, LHSBlock);
PN->addIncoming(RHS, RHSBlock);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
+
return PN;
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 0f79a2e861d220..1513c741c7754e 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -43,6 +43,10 @@ using namespace CodeGen;
// Statement Emission
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
SourceLocation Loc;
@@ -841,7 +845,10 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the 'then' code.
EmitBlock(ThenBlock);
- incrementProfileCounter(&S);
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getThen());
+ else
+ incrementProfileCounter(&S);
{
RunCleanupsScope ThenScope(*this);
EmitStmt(S.getThen());
@@ -855,6 +862,9 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
auto NL = ApplyDebugLocation::CreateEmpty(*this);
EmitBlock(ElseBlock);
}
+ // When single byte coverage mode is enabled, add a counter to else block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(Else);
{
RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
@@ -868,6 +878,11 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the continuation block for code after the if.
EmitBlock(ContBlock, true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
@@ -912,6 +927,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
SourceLocToDebugLoc(R.getEnd()),
checkIfLoopMustProgress(CondIsConstInt));
+ // When single byte coverage mode is enabled, add a counter to loop condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
+
// As long as the condition is true, go to the loop body.
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
if (EmitBoolCondBranch) {
@@ -944,7 +963,11 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
{
RunCleanupsScope BodyScope(*this);
EmitBlock(LoopBody);
- incrementProfileCounter(&S);
+ // When single byte coverage mode is enabled, add a counter to the body.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
EmitStmt(S.getBody());
}
@@ -966,6 +989,11 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// a branch, try to erase it.
if (!EmitBoolCondBranch)
SimplifyForwardingBlocks(LoopHeader.getBlock());
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S,
@@ -981,13 +1009,19 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// Emit the body of the loop.
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- EmitBlockWithFallThrough(LoopBody, &S);
+ if (llvm::EnableSingleByteCoverage)
+ EmitBlockWithFallThrough(LoopBody, S.getBody());
+ else
+ EmitBlockWithFallThrough(LoopBody, &S);
{
RunCleanupsScope BodyScope(*this);
EmitStmt(S.getBody());
}
EmitBlock(LoopCond.getBlock());
+ // When single byte coverage mode is enabled, add a counter to loop condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -1028,6 +1062,11 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
SimplifyForwardingBlocks(LoopCond.getBlock());
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
}
void CodeGenFunction::EmitForStmt(const ForStmt &S,
@@ -1086,6 +1125,11 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
BreakContinueStack.back().ContinueBlock = Continue;
}
+ // When single byte coverage mode is enabled, add a counter to loop
+ // condition.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getCond());
+
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -1116,8 +1160,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// Treat it as a non-zero constant. Don't even create a new block for the
// body, just fall into it.
}
- incrementProfileCounter(&S);
+ // When single byte coverage mode is enabled, add a counter to the body.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
@@ -1129,6 +1177,8 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
if (S.getInc()) {
EmitBlock(Continue.getBlock());
EmitStmt(S.getInc());
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getInc());
}
BreakContinueStack.pop_back();
@@ -1144,6 +1194,11 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
}
void
@@ -1196,7 +1251,10 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
}
EmitBlock(ForBody);
- incrementProfileCounter(&S);
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(S.getBody());
+ else
+ incrementProfileCounter(&S);
// Create a block for the increment. In case of a 'continue', we jump there.
JumpDest Continue = getJumpDestInCurrentScope("for.inc");
@@ -1226,6 +1284,11 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
+
+ // When single byte coverage mode is enabled, add a counter to continuation
+ // block.
+ if (llvm::EnableSingleByteCoverage)
+ incrementProfileCounter(&S);
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 2199d7b58fb96e..81bed76078f161 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -52,6 +52,10 @@
using namespace clang;
using namespace CodeGen;
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
/// markers.
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
@@ -1269,7 +1273,10 @@ void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
const Stmt *S) {
llvm::BasicBlock *SkipCountBB = nullptr;
- if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
+ // Do not skip over the instrumentation when single byte coverage mode is
+ // enabled.
+ if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ !llvm::EnableSingleByteCoverage) {
// When instrumenting for profiling, the fallthrough to certain
// statements needs to skip over the instrumentation code so that we
// get an accurate count.
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 618e78809db408..846ff79332c5d2 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -1535,7 +1535,7 @@ class CodeGenFunction : public CodeGenTypeCache {
if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
!CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
!CurFn->hasFnAttribute(llvm::Attribute::SkipProfile))
- PGO.emitCounterIncrement(Builder, S, StepV);
+ PGO.emitCounterSetOrIncrement(Builder, S, StepV);
PGO.setCurrentStmt(S);
}
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 7ad26ace328ab2..059482bc1663d1 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -832,6 +832,7 @@ void CodeGenModule::Release() {
checkAliases();
EmitDeferredUnusedCoverageMappings();
CodeGenPGO(*this).setValueProfilingFlag(getModule());
+ CodeGenPGO(*this).setProfileVersion(getModule());
if (CoverageMapping)
CoverageMapping->emit();
if (CodeGenOpts.SanitizeCfiCrossDso) {
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index 81bf8ea696b164..31a61494c62b7c 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -23,6 +23,10 @@
#include "llvm/Support/MD5.h"
#include <optional>
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
static llvm::cl::opt<bool>
EnableValueProfiling("enable-value-profiling",
llvm::cl::desc("Enable value profiling"),
@@ -219,6 +223,14 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return Base::VisitBinaryOperator(S);
}
+ bool VisitConditionalOperator(ConditionalOperator *S) {
+ if (llvm::EnableSingleByteCoverage && S->getTrueExpr())
+ CounterMap[S->getTrueExpr()] = NextCounter++;
+ if (llvm::EnableSingleByteCoverage && S->getFalseExpr())
+ CounterMap[S->getFalseExpr()] = NextCounter++;
+ return Base::VisitConditionalOperator(S);
+ }
+
/// Include \p S in the function hash.
bool VisitStmt(Stmt *S) {
auto Type = updateCounterMappings(S);
@@ -234,8 +246,20 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
if (Hash.getHashVersion() == PGO_HASH_V1)
return Base::TraverseIfStmt(If);
+ // When single byte coverage mode is enabled, add a counter to then and
+ // else.
+ for (Stmt *CS : If->children()) {
+ if (!CS || !llvm::EnableSingleByteCoverage)
+ continue;
+ if (CS == If->getThen())
+ CounterMap[If->getThen()] = NextCounter++;
+ else if (CS == If->getElse())
+ CounterMap[If->getElse()] = NextCounter++;
+ }
+
// Otherwise, keep track of which branch we're in while traversing.
VisitStmt(If);
+
for (Stmt *CS : If->children()) {
if (!CS)
continue;
@@ -249,6 +273,77 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return true;
}
+ bool TraverseWhileStmt(WhileStmt *While) {
+ // When single byte coverage mode is enabled, add a counter to condition and
+ // body.
+ for (Stmt *CS : While->children()) {
+ if (!CS || !llvm::EnableSingleByteCoverage)
+ continue;
+ if (CS == While->getCond())
+ CounterMap[While->getCond()] = NextCounter++;
+ else if (CS == While->getBody())
+ CounterMap[While->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseWhileStmt(While);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseDoStmt(DoStmt *Do) {
+ // When single byte coverage mode is enabled, add a counter to condition and
+ // body.
+ for (Stmt *CS : Do->children()) {
+ if (!CS || !llvm::EnableSingleByteCoverage)
+ continue;
+ if (CS == Do->getCond())
+ CounterMap[Do->getCond()] = NextCounter++;
+ else if (CS == Do->getBody())
+ CounterMap[Do->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseDoStmt(Do);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseForStmt(ForStmt *For) {
+ // When single byte coverage mode is enabled, add a counter to condition,
+ // increment and body.
+ for (Stmt *CS : For->children()) {
+ if (!CS || !llvm::EnableSingleByteCoverage)
+ continue;
+ if (CS == For->getCond())
+ CounterMap[For->getCond()] = NextCounter++;
+ else if (CS == For->getInc())
+ CounterMap[For->getInc()] = NextCounter++;
+ else if (CS == For->getBody())
+ CounterMap[For->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseForStmt(For);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
+ bool TraverseCXXForRangeStmt(CXXForRangeStmt *ForRange) {
+ // When single byte coverage mode is enabled, add a counter to body.
+ for (Stmt *CS : ForRange->children()) {
+ if (!CS || !llvm::EnableSingleByteCoverage)
+ continue;
+ if (CS == ForRange->getBody())
+ CounterMap[ForRange->getBody()] = NextCounter++;
+ }
+
+ Base::TraverseCXXForRangeStmt(ForRange);
+ if (Hash.getHashVersion() != PGO_HASH_V1)
+ Hash.combine(PGOHash::EndOfScope);
+ return true;
+ }
+
// If the statement type \p N is nestable, and its nesting impacts profile
// stability, define a custom traversal which tracks the end of the statement
// in the hash (provided we're not using the V1 hash).
@@ -260,10 +355,6 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return true; \
}
- DEFINE_NESTABLE_TRAVERSAL(WhileStmt)
- DEFINE_NESTABLE_TRAVERSAL(DoStmt)
- DEFINE_NESTABLE_TRAVERSAL(ForStmt)
- DEFINE_NESTABLE_TRAVERSAL(CXXForRangeStmt)
DEFINE_NESTABLE_TRAVERSAL(ObjCForCollectionStmt)
DEFINE_NESTABLE_TRAVERSAL(CXXTryStmt)
DEFINE_NESTABLE_TRAVERSAL(CXXCatchStmt)
@@ -952,8 +1043,8 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
Fn->setEntryCount(FunctionCount);
}
-void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
- llvm::Value *StepV) {
+void CodeGenPGO::emitCounterSetOrIncrement(CGBuilderTy &Builder, const Stmt *S,
+ ...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What are the next steps to work to land this? We're interested to try this out for Chrome!
4dd98d8
to
d98bf5a
Compare
We just need to land this :) |
Entry coverage (`-pgo-function-entry-coverage`) and block covearge (`-pgo-block-coverage`) can be useful to reduce the size overhead of instrumented builds, and the profiles they generate can even be used in PGO builds. When merging raw profiles with coverage, we accumulate their values as if they were counts and `llvm-profdata` considers a value to be "covered" if it's larger than zero. https://github.com/llvm/llvm-project/blob/acdb4cdc04ed4d9a130f0fa706ed1b0f42cc1aa0/llvm/lib/ProfileData/InstrProf.cpp#L841-L842 This is technically different than accumulating counts, but it can help discriminate cold functions from hot functions when the number of raw profiles is large. See llvm#75425 (comment) for discussion.
5553825
to
ee88c6e
Compare
LGTM, but I'm less familiar with the clang coverage code. So I'll give others some time to accept. |
subtractCounters(CondCount, BodyCount)); | ||
if (!llvm::EnableSingleByteCoverage) | ||
createBranchRegion(S->getCond(), BodyCount, | ||
subtractCounters(CondCount, BodyCount)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I may have missed it, but do you plan to handle branch regions at some point with explicit byte counters?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We are currently interested in enabling single byte counters mode for line coverage. But, if there is any interest, we might consider supporting branch coverage in single byte counters mode as well. Branch coverage is more tricky, though. Because we already insert additional counters for branch coverage and in order to support single byte counters mode we need to insert more counters. We need to think about associating counters with the relevant AST nodes for branch coverage.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM. I would like to see this broadened to include branch coverage as well!
This patch inserts 1-byte counters instead of an 8-byte counters into llvm profiles for source-based code coverage. The origial idea was proposed as block-cov for PGO, and this patch repurposes that idea for coverage. The current 8-byte counters mechanism add counters to minimal regions, and infer the counters in the remaining regions via adding or subtracting counters. For example, it infers the counter in the if.else region by subtracting the counters between if.entry and if.then regions in an if statement. Whenever there is a control-flow merge, it adds the counters from all the incoming regions. However, we are not going to be able to infer counters by subtracting two execution counts when using single-byte counters. Therefore, this patch conservatively inserts additional counters for the cases where we need to add or subtract counters. RFC: https://discourse.llvm.org/t/rfc-single-byte-counters-for-source-based-code-coverage/75685
ee88c6e
to
904957a
Compare
if (!llvm::EnableSingleByteCoverage) | ||
createBranchRegion(S->getCond(), BodyCount, | ||
subtractCounters(CondCount, BodyCount)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As far as I have checked, I guess !EnableSingleByteCoverage
can be sunk into createBranchRegion()
rather than checking from callers.
Sorry for my delayed comment. I didn't notice till merging.
Active->ExecutionCount += I->ExecutionCount; | ||
if (I->Kind == Active->Kind) { | ||
assert(I->HasSingleByteCoverage == Active->HasSingleByteCoverage && | ||
"Regions are generated in different coverage modes"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is just a minor nit that I just noticed and could be addressed in some follow up change, but in all other assert
cases the message is not capitalized so shouldn't capitalize here either for consistency.
`S->isConsteval()` is evaluated at the top of this method. Likely mis-merging in #75425
This patch inserts 1-byte counters instead of an 8-byte counters into llvm profiles for source-based code coverage. The origial idea was proposed as block-cov for PGO, and this patch repurposes that idea for coverage: https://groups.google.com/g/llvm-dev/c/r03Z6JoN7d4 The current 8-byte counters mechanism add counters to minimal regions, and infer the counters in the remaining regions via adding or subtracting counters. For example, it infers the counter in the if.else region by subtracting the counters between if.entry and if.then regions in an if statement. Whenever there is a control-flow merge, it adds the counters from all the incoming regions. However, we are not going to be able to infer counters by subtracting two execution counts when using single-byte counters. Therefore, this patch conservatively inserts additional counters for the cases where we need to add or subtract counters. RFC: https: //discourse.llvm.org/t/rfc-single-byte-counters-for-source-based-code-coverage/75685 (cherry picked from commit 23f895f) Change-Id: Ia949464dee61ad6d6a5364b53b7783be1437c7a6 Signed-off-by: Yi-Hsuan Deng <[email protected]>
This patch inserts 1-byte counters instead of an 8-byte counters into llvm profiles for source-based code coverage. The origial idea was proposed as block-cov for PGO, and this patch repurposes that idea for coverage: https://groups.google.com/g/llvm-dev/c/r03Z6JoN7d4 The current 8-byte counters mechanism add counters to minimal regions, and infer the counters in the remaining regions via adding or subtracting counters. For example, it infers the counter in the if.else region by subtracting the counters between if.entry and if.then regions in an if statement. Whenever there is a control-flow merge, it adds the counters from all the incoming regions. However, we are not going to be able to infer counters by subtracting two execution counts when using single-byte counters. Therefore, this patch conservatively inserts additional counters for the cases where we need to add or subtract counters. RFC: https: //discourse.llvm.org/t/rfc-single-byte-counters-for-source-based-code-coverage/75685 (cherry picked from commit 23f895f) Change-Id: Ia949464dee61ad6d6a5364b53b7783be1437c7a6 Signed-off-by: Yi-Hsuan Deng <[email protected]>
This patch inserts 1-byte counters instead of an 8-byte counters into llvm profiles for source-based code coverage. The origial idea was proposed as block-cov for PGO, and this patch repurposes that idea for coverage: https://groups.google.com/g/llvm-dev/c/r03Z6JoN7d4
The current 8-byte counters mechanism add counters to minimal regions, and infer the counters in the remaining regions via adding or subtracting counters. For example, it infers the counter in the if.else region by subtracting the counters between if.entry and if.then regions in an if statement. Whenever there is a control-flow merge, it adds the counters from all the incoming regions. However, we are not going to be able to infer counters by subtracting two execution counts when using single-byte counters. Therefore, this patch conservatively inserts additional counters for the cases where we need to add or subtract counters.
RFC: https://discourse.llvm.org/t/rfc-single-byte-counters-for-source-based-code-coverage/75685