@@ -40,24 +40,6 @@ const (
40
40
// size of bucket hash table
41
41
buckHashSize = 179999
42
42
43
- // maxStack is the max depth of stack to record in bucket.
44
- // Note that it's only used internally as a guard against
45
- // wildly out-of-bounds slicing of the PCs that come after
46
- // a bucket struct, and it could increase in the future.
47
- // The term "1" accounts for the first stack entry being
48
- // taken up by a "skip" sentinel value for profilers which
49
- // defer inline frame expansion until the profile is reported.
50
- // The term "maxSkip" is for frame pointer unwinding, where we
51
- // want to end up with maxLogicalStack frames but will discard
52
- // some "physical" frames to account for skipping.
53
- maxStack = 1 + maxSkip + maxLogicalStack
54
-
55
- // maxLogicalStack is the maximum stack size of a call stack
56
- // to encode in a profile. This counts "logical" frames, which
57
- // includes inlined frames. We may record more than this many
58
- // "physical" frames when using frame pointer unwinding to account
59
- // for deferred handling of skipping frames & inline expansion.
60
- maxLogicalStack = 128
61
43
// maxSkip is to account for deferred inline expansion
62
44
// when using frame pointer unwinding. We record the stack
63
45
// with "physical" frame pointers but handle skipping "logical"
@@ -67,6 +49,11 @@ const (
67
49
// This should be at least as large as the largest skip value
68
50
// used for profiling; otherwise stacks may be truncated inconsistently
69
51
maxSkip = 5
52
+
53
+ // maxProfStackDepth is the highest valid value for debug.profstackdepth.
54
+ // It's used for the bucket.stk func.
55
+ // TODO(fg): can we get rid of this?
56
+ maxProfStackDepth = 1024
70
57
)
71
58
72
59
type bucketType int
@@ -254,10 +241,11 @@ func newBucket(typ bucketType, nstk int) *bucket {
254
241
return b
255
242
}
256
243
257
- // stk returns the slice in b holding the stack.
244
+ // stk returns the slice in b holding the stack. The caller can asssume that the
245
+ // backing array is immutable.
258
246
func (b * bucket ) stk () []uintptr {
259
- stk := (* [maxStack ]uintptr )(add (unsafe .Pointer (b ), unsafe .Sizeof (* b )))
260
- if b .nstk > maxStack {
247
+ stk := (* [maxProfStackDepth ]uintptr )(add (unsafe .Pointer (b ), unsafe .Sizeof (* b )))
248
+ if b .nstk > maxProfStackDepth {
261
249
// prove that slicing works; otherwise a failure requires a P
262
250
throw ("bad profile stack count" )
263
251
}
@@ -455,7 +443,7 @@ func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
455
443
}
456
444
// Only use the part of mp.profStack we need and ignore the extra space
457
445
// reserved for delayed inline expansion with frame pointer unwinding.
458
- nstk := callers (4 , mp .profStack [:maxLogicalStack ])
446
+ nstk := callers (4 , mp .profStack [:debug . profstackdepth ])
459
447
index := (mProfCycle .read () + 2 ) % uint32 (len (memRecord {}.future ))
460
448
461
449
b := stkbucket (memProfile , size , mp .profStack [:nstk ], true )
@@ -542,12 +530,18 @@ func blocksampled(cycles, rate int64) bool {
542
530
// skip should be positive if this event is recorded from the current stack
543
531
// (e.g. when this is not called from a system stack)
544
532
func saveblockevent (cycles , rate int64 , skip int , which bucketType ) {
533
+ if debug .profstackdepth == 0 {
534
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
535
+ // can't record a stack trace.
536
+ return
537
+ }
545
538
if skip > maxSkip {
546
539
print ("requested skip=" , skip )
547
540
throw ("invalid skip value" )
548
541
}
549
542
gp := getg ()
550
543
mp := acquirem () // we must not be preempted while accessing profstack
544
+
551
545
nstk := 1
552
546
if tracefpunwindoff () || gp .m .hasCgoOnStack () {
553
547
mp .profStack [0 ] = logicalStackSentinel
@@ -736,6 +730,12 @@ func (prof *mLockProfile) recordUnlock(l *mutex) {
736
730
}
737
731
738
732
func (prof * mLockProfile ) captureStack () {
733
+ if debug .profstackdepth == 0 {
734
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
735
+ // can't record a stack trace.
736
+ return
737
+ }
738
+
739
739
skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank
740
740
if staticLockRanking {
741
741
// When static lock ranking is enabled, we'll always be on the system
@@ -780,7 +780,7 @@ func (prof *mLockProfile) store() {
780
780
mp := acquirem ()
781
781
prof .disabled = true
782
782
783
- nstk := maxStack
783
+ nstk := int ( debug . profstackdepth )
784
784
for i := 0 ; i < nstk ; i ++ {
785
785
if pc := prof .stack [i ]; pc == 0 {
786
786
nstk = i
0 commit comments