Skip to content

Commit 9114c51

Browse files
rhyshgopherbot
authored andcommitted
Revert "runtime: prepare for extensions to waiting M list"
This reverts commit be0b569 (CL 585635). Reason for revert: This is part of a patch series that changed the handling of contended lock2/unlock2 calls, reducing the maximum throughput of contended runtime.mutex values, and causing a performance regression on applications where that is (or became) the bottleneck. Updates #66999 Updates #67585 Change-Id: I7843ccaecbd273b7ceacfa0f420dd993b4b15a0a Reviewed-on: https://go-review.googlesource.com/c/go/+/589117 Auto-Submit: Rhys Hiltner <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Than McIntosh <[email protected]> Reviewed-by: Michael Pratt <[email protected]>
1 parent 3f4be12 commit 9114c51

File tree

3 files changed

+6
-17
lines changed

3 files changed

+6
-17
lines changed

src/runtime/lock_sema.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,11 @@ Loop:
7777
osyield()
7878
} else {
7979
// Someone else has it.
80-
// l.key points to a linked list of M's waiting
81-
// for this lock, chained through m.mWaitList.next.
80+
// l->waitm points to a linked list of M's waiting
81+
// for this lock, chained through m->nextwaitm.
8282
// Queue this M.
8383
for {
84-
gp.m.mWaitList.next = muintptr(v &^ locked)
84+
gp.m.nextwaitm = muintptr(v &^ locked)
8585
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
8686
break
8787
}
@@ -119,7 +119,7 @@ func unlock2(l *mutex) {
119119
// Other M's are waiting for the lock.
120120
// Dequeue an M.
121121
mp = muintptr(v &^ locked).ptr()
122-
if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) {
122+
if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
123123
// Dequeued an M. Wake it.
124124
semawakeup(mp)
125125
break
@@ -200,7 +200,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
200200
// This reduces the nosplit footprint of notetsleep_internal.
201201
gp = getg()
202202

203-
// Register for wakeup on n.key.
203+
// Register for wakeup on n->waitm.
204204
if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
205205
// Must be locked (got wakeup).
206206
if n.key != locked {

src/runtime/mprof.go

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -667,17 +667,6 @@ func (lt *lockTimer) end() {
667667
}
668668
}
669669

670-
// mWaitList is part of the M struct, and holds the list of Ms that are waiting
671-
// for a particular runtime.mutex.
672-
//
673-
// When an M is unable to immediately obtain a lock, it adds itself to the list
674-
// of Ms waiting for the lock. It does that via this struct's next field,
675-
// forming a singly-linked list with the mutex's key field pointing to the head
676-
// of the list.
677-
type mWaitList struct {
678-
next muintptr // next m waiting for lock (set by us, cleared by another during unlock)
679-
}
680-
681670
type mLockProfile struct {
682671
waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank
683672
stack []uintptr // stack that experienced contention in runtime.lockWithRank

src/runtime/runtime2.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -596,8 +596,8 @@ type m struct {
596596
createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
597597
lockedExt uint32 // tracking for external LockOSThread
598598
lockedInt uint32 // tracking for internal lockOSThread
599+
nextwaitm muintptr // next m waiting for lock
599600

600-
mWaitList mWaitList // list of runtime lock waiters
601601
mLockProfile mLockProfile // fields relating to runtime.lock contention
602602
profStack []uintptr // used for memory/block/mutex stack traces
603603

0 commit comments

Comments
 (0)