Skip to content

Commit 8bdb0b2

Browse files
prattmicdmitshur
authored andcommitted
[release-branch.go1.17] runtime: consistently access pollDesc r/w Gs with atomics
Both netpollblock and netpollunblock read gpp using a non-atomic load. When consuming a ready event, netpollblock clears gpp using a non-atomic store, thus skipping a barrier. Thus on systems with weak memory ordering, a sequence like so this is possible: T1 T2 1. netpollblock: read gpp -> pdReady 2. netpollblock: store gpp -> 0 3. netpollunblock: read gpp -> pdReady 4. netpollunblock: return i.e., without a happens-before edge between (2) and (3), netpollunblock may read the stale value of gpp. Switch these access to use atomic loads and stores in order to create these edges. For ease of future maintainance, I've simply changed rg and wg to always be accessed atomically, though I don't believe pollOpen or pollClose require atomics today. For #48925 Fixes #49010 Change-Id: I903ea667eea320277610b4f969129935731520c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/355952 Trust: Michael Pratt <[email protected]> Run-TryBot: Michael Pratt <[email protected]> TryBot-Result: Go Bot <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Reviewed-by: David Chase <[email protected]> (cherry picked from commit 1b072b3) Reviewed-on: https://go-review.googlesource.com/c/go/+/356369
1 parent 3a03ddf commit 8bdb0b2

File tree

1 file changed

+26
-17
lines changed

1 file changed

+26
-17
lines changed

src/runtime/netpoll.go

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ type pollDesc struct {
7878
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
7979
// proceed w/o taking the lock. So closing, everr, rg, rd, wg and wd are manipulated
8080
// in a lock-free way by all operations.
81+
// TODO(golang.org/issue/49008): audit these lock-free fields for continued correctness.
8182
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
8283
// that will blow up when GC starts moving objects.
8384
lock mutex // protects the following fields
@@ -86,11 +87,11 @@ type pollDesc struct {
8687
everr bool // marks event scanning error happened
8788
user uint32 // user settable cookie
8889
rseq uintptr // protects from stale read timers
89-
rg uintptr // pdReady, pdWait, G waiting for read or nil
90+
rg uintptr // pdReady, pdWait, G waiting for read or nil. Accessed atomically.
9091
rt timer // read deadline timer (set if rt.f != nil)
9192
rd int64 // read deadline
9293
wseq uintptr // protects from stale write timers
93-
wg uintptr // pdReady, pdWait, G waiting for write or nil
94+
wg uintptr // pdReady, pdWait, G waiting for write or nil. Accessed atomically.
9495
wt timer // write deadline timer
9596
wd int64 // write deadline
9697
self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
@@ -147,20 +148,22 @@ func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
147148
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
148149
pd := pollcache.alloc()
149150
lock(&pd.lock)
150-
if pd.wg != 0 && pd.wg != pdReady {
151+
wg := atomic.Loaduintptr(&pd.wg)
152+
if wg != 0 && wg != pdReady {
151153
throw("runtime: blocked write on free polldesc")
152154
}
153-
if pd.rg != 0 && pd.rg != pdReady {
155+
rg := atomic.Loaduintptr(&pd.rg)
156+
if rg != 0 && rg != pdReady {
154157
throw("runtime: blocked read on free polldesc")
155158
}
156159
pd.fd = fd
157160
pd.closing = false
158161
pd.everr = false
159162
pd.rseq++
160-
pd.rg = 0
163+
atomic.Storeuintptr(&pd.rg, 0)
161164
pd.rd = 0
162165
pd.wseq++
163-
pd.wg = 0
166+
atomic.Storeuintptr(&pd.wg, 0)
164167
pd.wd = 0
165168
pd.self = pd
166169
unlock(&pd.lock)
@@ -178,10 +181,12 @@ func poll_runtime_pollClose(pd *pollDesc) {
178181
if !pd.closing {
179182
throw("runtime: close polldesc w/o unblock")
180183
}
181-
if pd.wg != 0 && pd.wg != pdReady {
184+
wg := atomic.Loaduintptr(&pd.wg)
185+
if wg != 0 && wg != pdReady {
182186
throw("runtime: blocked write on closing polldesc")
183187
}
184-
if pd.rg != 0 && pd.rg != pdReady {
188+
rg := atomic.Loaduintptr(&pd.rg)
189+
if rg != 0 && rg != pdReady {
185190
throw("runtime: blocked read on closing polldesc")
186191
}
187192
netpollclose(pd.fd)
@@ -205,9 +210,9 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int {
205210
return errcode
206211
}
207212
if mode == 'r' {
208-
pd.rg = 0
213+
atomic.Storeuintptr(&pd.rg, 0)
209214
} else if mode == 'w' {
210-
pd.wg = 0
215+
atomic.Storeuintptr(&pd.wg, 0)
211216
}
212217
return pollNoError
213218
}
@@ -417,6 +422,8 @@ func netpollgoready(gp *g, traceskip int) {
417422

418423
// returns true if IO is ready, or false if timedout or closed
419424
// waitio - wait only for completed IO, ignore errors
425+
// Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
426+
// can hold only a single waiting goroutine for each mode.
420427
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
421428
gpp := &pd.rg
422429
if mode == 'w' {
@@ -425,17 +432,19 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
425432

426433
// set the gpp semaphore to pdWait
427434
for {
428-
old := *gpp
429-
if old == pdReady {
430-
*gpp = 0
435+
// Consume notification if already ready.
436+
if atomic.Casuintptr(gpp, pdReady, 0) {
431437
return true
432438
}
433-
if old != 0 {
434-
throw("runtime: double wait")
435-
}
436439
if atomic.Casuintptr(gpp, 0, pdWait) {
437440
break
438441
}
442+
443+
// Double check that this isn't corrupt; otherwise we'd loop
444+
// forever.
445+
if v := atomic.Loaduintptr(gpp); v != pdReady && v != 0 {
446+
throw("runtime: double wait")
447+
}
439448
}
440449

441450
// need to recheck error states after setting gpp to pdWait
@@ -459,7 +468,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
459468
}
460469

461470
for {
462-
old := *gpp
471+
old := atomic.Loaduintptr(gpp)
463472
if old == pdReady {
464473
return nil
465474
}

0 commit comments

Comments
 (0)