Skip to content

Commit 8c9c148

Browse files
prattmicdmitshur
authored andcommitted
[release-branch.go1.16] runtime: consistently access pollDesc r/w Gs with atomics
Both netpollblock and netpollunblock read gpp using a non-atomic load. When consuming a ready event, netpollblock clears gpp using a non-atomic store, thus skipping a barrier. Thus on systems with weak memory ordering, a sequence like so this is possible: T1 T2 1. netpollblock: read gpp -> pdReady 2. netpollblock: store gpp -> 0 3. netpollunblock: read gpp -> pdReady 4. netpollunblock: return i.e., without a happens-before edge between (2) and (3), netpollunblock may read the stale value of gpp. Switch these access to use atomic loads and stores in order to create these edges. For ease of future maintainance, I've simply changed rg and wg to always be accessed atomically, though I don't believe pollOpen or pollClose require atomics today. For #48925 Fixes #49009 Change-Id: I903ea667eea320277610b4f969129935731520c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/355952 Trust: Michael Pratt <[email protected]> Run-TryBot: Michael Pratt <[email protected]> TryBot-Result: Go Bot <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Reviewed-by: David Chase <[email protected]> (cherry picked from commit 1b072b3) Reviewed-on: https://go-review.googlesource.com/c/go/+/356370
1 parent 1a6281d commit 8c9c148

File tree

1 file changed

+26
-17
lines changed

1 file changed

+26
-17
lines changed

src/runtime/netpoll.go

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ type pollDesc struct {
7474
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
7575
// proceed w/o taking the lock. So closing, everr, rg, rd, wg and wd are manipulated
7676
// in a lock-free way by all operations.
77+
// TODO(golang.org/issue/49008): audit these lock-free fields for continued correctness.
7778
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
7879
// that will blow up when GC starts moving objects.
7980
lock mutex // protects the following fields
@@ -82,11 +83,11 @@ type pollDesc struct {
8283
everr bool // marks event scanning error happened
8384
user uint32 // user settable cookie
8485
rseq uintptr // protects from stale read timers
85-
rg uintptr // pdReady, pdWait, G waiting for read or nil
86+
rg uintptr // pdReady, pdWait, G waiting for read or nil. Accessed atomically.
8687
rt timer // read deadline timer (set if rt.f != nil)
8788
rd int64 // read deadline
8889
wseq uintptr // protects from stale write timers
89-
wg uintptr // pdReady, pdWait, G waiting for write or nil
90+
wg uintptr // pdReady, pdWait, G waiting for write or nil. Accessed atomically.
9091
wt timer // write deadline timer
9192
wd int64 // write deadline
9293
self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
@@ -143,20 +144,22 @@ func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
143144
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
144145
pd := pollcache.alloc()
145146
lock(&pd.lock)
146-
if pd.wg != 0 && pd.wg != pdReady {
147+
wg := atomic.Loaduintptr(&pd.wg)
148+
if wg != 0 && wg != pdReady {
147149
throw("runtime: blocked write on free polldesc")
148150
}
149-
if pd.rg != 0 && pd.rg != pdReady {
151+
rg := atomic.Loaduintptr(&pd.rg)
152+
if rg != 0 && rg != pdReady {
150153
throw("runtime: blocked read on free polldesc")
151154
}
152155
pd.fd = fd
153156
pd.closing = false
154157
pd.everr = false
155158
pd.rseq++
156-
pd.rg = 0
159+
atomic.Storeuintptr(&pd.rg, 0)
157160
pd.rd = 0
158161
pd.wseq++
159-
pd.wg = 0
162+
atomic.Storeuintptr(&pd.wg, 0)
160163
pd.wd = 0
161164
pd.self = pd
162165
unlock(&pd.lock)
@@ -171,10 +174,12 @@ func poll_runtime_pollClose(pd *pollDesc) {
171174
if !pd.closing {
172175
throw("runtime: close polldesc w/o unblock")
173176
}
174-
if pd.wg != 0 && pd.wg != pdReady {
177+
wg := atomic.Loaduintptr(&pd.wg)
178+
if wg != 0 && wg != pdReady {
175179
throw("runtime: blocked write on closing polldesc")
176180
}
177-
if pd.rg != 0 && pd.rg != pdReady {
181+
rg := atomic.Loaduintptr(&pd.rg)
182+
if rg != 0 && rg != pdReady {
178183
throw("runtime: blocked read on closing polldesc")
179184
}
180185
netpollclose(pd.fd)
@@ -198,9 +203,9 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int {
198203
return errcode
199204
}
200205
if mode == 'r' {
201-
pd.rg = 0
206+
atomic.Storeuintptr(&pd.rg, 0)
202207
} else if mode == 'w' {
203-
pd.wg = 0
208+
atomic.Storeuintptr(&pd.wg, 0)
204209
}
205210
return pollNoError
206211
}
@@ -410,6 +415,8 @@ func netpollgoready(gp *g, traceskip int) {
410415

411416
// returns true if IO is ready, or false if timedout or closed
412417
// waitio - wait only for completed IO, ignore errors
418+
// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
419+
// can hold only a single waiting goroutine for each mode.
413420
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
414421
gpp := &pd.rg
415422
if mode == 'w' {
@@ -418,17 +425,19 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
418425

419426
// set the gpp semaphore to pdWait
420427
for {
421-
old := *gpp
422-
if old == pdReady {
423-
*gpp = 0
428+
// Consume notification if already ready.
429+
if atomic.Casuintptr(gpp, pdReady, 0) {
424430
return true
425431
}
426-
if old != 0 {
427-
throw("runtime: double wait")
428-
}
429432
if atomic.Casuintptr(gpp, 0, pdWait) {
430433
break
431434
}
435+
436+
// Double check that this isn't corrupt; otherwise we'd loop
437+
// forever.
438+
if v := atomic.Loaduintptr(gpp); v != pdReady && v != 0 {
439+
throw("runtime: double wait")
440+
}
432441
}
433442

434443
// need to recheck error states after setting gpp to pdWait
@@ -452,7 +461,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
452461
}
453462

454463
for {
455-
old := *gpp
464+
old := atomic.Loaduintptr(gpp)
456465
if old == pdReady {
457466
return nil
458467
}

0 commit comments

Comments
 (0)