@@ -72,31 +72,99 @@ const pollBlockSize = 4 * 1024
72
72
//go:notinheap
73
73
type pollDesc struct {
74
74
link * pollDesc // in pollcache, protected by pollcache.lock
75
+ fd uintptr // constant for pollDesc usage lifetime
76
+
77
+ // atomicInfo holds bits from closing, rd, and wd,
78
+ // which are only ever written while holding the lock,
79
+ // summarized for use by netpollcheckerr,
80
+ // which cannot acquire the lock.
81
+ // After writing these fields under lock in a way that
82
+ // might change the summary, code must call publishInfo
83
+ // before releasing the lock.
84
+ // Code that changes fields and then calls netpollunblock
85
+ // (while still holding the lock) must call publishInfo
86
+ // before calling netpollunblock, because publishInfo is what
87
+ // stops netpollblock from blocking anew
88
+ // (by changing the result of netpollcheckerr).
89
+ // atomicInfo also holds the eventErr bit,
90
+ // recording whether a poll event on the fd got an error;
91
+ // atomicInfo is the only source of truth for that bit.
92
+ atomicInfo uint32 // atomic pollInfo
93
+
94
+ // rg, wg are accessed atomically and hold g pointers.
95
+ // (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
96
+ rg uintptr // pdReady, pdWait, G waiting for read or nil
97
+ wg uintptr // pdReady, pdWait, G waiting for write or nil
75
98
76
- // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
77
- // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
78
- // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
79
- // proceed w/o taking the lock. So closing, everr, rg, rd, wg and wd are manipulated
80
- // in a lock-free way by all operations.
81
- // TODO(golang.org/issue/49008): audit these lock-free fields for continued correctness.
82
- // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
83
- // that will blow up when GC starts moving objects.
84
99
lock mutex // protects the following fields
85
- fd uintptr
86
100
closing bool
87
- everr bool // marks event scanning error happened
88
101
user uint32 // user settable cookie
89
102
rseq uintptr // protects from stale read timers
90
- rg uintptr // pdReady, pdWait, G waiting for read or nil. Accessed atomically.
91
103
rt timer // read deadline timer (set if rt.f != nil)
92
- rd int64 // read deadline
104
+ rd int64 // read deadline (a nanotime in the future, -1 when expired)
93
105
wseq uintptr // protects from stale write timers
94
- wg uintptr // pdReady, pdWait, G waiting for write or nil. Accessed atomically.
95
106
wt timer // write deadline timer
96
- wd int64 // write deadline
107
+ wd int64 // write deadline (a nanotime in the future, -1 when expired)
97
108
self * pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
98
109
}
99
110
111
+ // pollInfo is the bits needed by netpollcheckerr, stored atomically,
112
+ // mostly duplicating state that is manipulated under lock in pollDesc.
113
+ // The one exception is the pollEventErr bit, which is maintained only
114
+ // in the pollInfo.
115
+ type pollInfo uint32
116
+
117
+ const (
118
+ pollClosing = 1 << iota
119
+ pollEventErr
120
+ pollExpiredReadDeadline
121
+ pollExpiredWriteDeadline
122
+ )
123
+
124
+ func (i pollInfo ) closing () bool { return i & pollClosing != 0 }
125
+ func (i pollInfo ) eventErr () bool { return i & pollEventErr != 0 }
126
+ func (i pollInfo ) expiredReadDeadline () bool { return i & pollExpiredReadDeadline != 0 }
127
+ func (i pollInfo ) expiredWriteDeadline () bool { return i & pollExpiredWriteDeadline != 0 }
128
+
129
+ // info returns the pollInfo corresponding to pd.
130
+ func (pd * pollDesc ) info () pollInfo {
131
+ return pollInfo (atomic .Load (& pd .atomicInfo ))
132
+ }
133
+
134
+ // publishInfo updates pd.atomicInfo (returned by pd.info)
135
+ // using the other values in pd.
136
+ // It must be called while holding pd.lock,
137
+ // and it must be called after changing anything
138
+ // that might affect the info bits.
139
+ // In practice this means after changing closing
140
+ // or changing rd or wd from < 0 to >= 0.
141
+ func (pd * pollDesc ) publishInfo () {
142
+ var info uint32
143
+ if pd .closing {
144
+ info |= pollClosing
145
+ }
146
+ if pd .rd < 0 {
147
+ info |= pollExpiredReadDeadline
148
+ }
149
+ if pd .wd < 0 {
150
+ info |= pollExpiredWriteDeadline
151
+ }
152
+
153
+ // Set all of x except the pollEventErr bit.
154
+ x := atomic .Load (& pd .atomicInfo )
155
+ for ! atomic .Cas (& pd .atomicInfo , x , (x & pollEventErr )| info ) {
156
+ x = atomic .Load (& pd .atomicInfo )
157
+ }
158
+ }
159
+
160
+ // setEventErr sets the result of pd.info().eventErr() to b.
161
+ func (pd * pollDesc ) setEventErr (b bool ) {
162
+ x := atomic .Load (& pd .atomicInfo )
163
+ for (x & pollEventErr != 0 ) != b && ! atomic .Cas (& pd .atomicInfo , x , x ^ pollEventErr ) {
164
+ x = atomic .Load (& pd .atomicInfo )
165
+ }
166
+ }
167
+
100
168
type pollCache struct {
101
169
lock mutex
102
170
first * pollDesc
@@ -158,14 +226,15 @@ func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
158
226
}
159
227
pd .fd = fd
160
228
pd .closing = false
161
- pd .everr = false
229
+ pd .setEventErr ( false )
162
230
pd .rseq ++
163
231
atomic .Storeuintptr (& pd .rg , 0 )
164
232
pd .rd = 0
165
233
pd .wseq ++
166
234
atomic .Storeuintptr (& pd .wg , 0 )
167
235
pd .wd = 0
168
236
pd .self = pd
237
+ pd .publishInfo ()
169
238
unlock (& pd .lock )
170
239
171
240
errno := netpollopen (fd , pd )
@@ -274,6 +343,7 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
274
343
if mode == 'w' || mode == 'r' + 'w' {
275
344
pd .wd = d
276
345
}
346
+ pd .publishInfo ()
277
347
combo := pd .rd > 0 && pd .rd == pd .wd
278
348
rtf := netpollReadDeadline
279
349
if combo {
@@ -315,15 +385,13 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
315
385
}
316
386
}
317
387
// If we set the new deadline in the past, unblock currently pending IO if any.
388
+ // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
318
389
var rg , wg * g
319
- if pd .rd < 0 || pd .wd < 0 {
320
- atomic .StorepNoWB (noescape (unsafe .Pointer (& wg )), nil ) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
321
- if pd .rd < 0 {
322
- rg = netpollunblock (pd , 'r' , false )
323
- }
324
- if pd .wd < 0 {
325
- wg = netpollunblock (pd , 'w' , false )
326
- }
390
+ if pd .rd < 0 {
391
+ rg = netpollunblock (pd , 'r' , false )
392
+ }
393
+ if pd .wd < 0 {
394
+ wg = netpollunblock (pd , 'w' , false )
327
395
}
328
396
unlock (& pd .lock )
329
397
if rg != nil {
@@ -344,7 +412,7 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
344
412
pd .rseq ++
345
413
pd .wseq ++
346
414
var rg , wg * g
347
- atomic . StorepNoWB ( noescape ( unsafe . Pointer ( & rg )), nil ) // full memory barrier between store to closing and read of rg/wg in netpollunblock
415
+ pd . publishInfo ()
348
416
rg = netpollunblock (pd , 'r' , false )
349
417
wg = netpollunblock (pd , 'w' , false )
350
418
if pd .rt .f != nil {
@@ -389,16 +457,17 @@ func netpollready(toRun *gList, pd *pollDesc, mode int32) {
389
457
}
390
458
391
459
func netpollcheckerr (pd * pollDesc , mode int32 ) int {
392
- if pd .closing {
460
+ info := pd .info ()
461
+ if info .closing () {
393
462
return pollErrClosing
394
463
}
395
- if (mode == 'r' && pd . rd < 0 ) || (mode == 'w' && pd . wd < 0 ) {
464
+ if (mode == 'r' && info . expiredReadDeadline ()) || (mode == 'w' && info . expiredWriteDeadline () ) {
396
465
return pollErrTimeout
397
466
}
398
467
// Report an event scanning error only on a read event.
399
468
// An error on a write event will be captured in a subsequent
400
469
// write call that is able to report a more specific error.
401
- if mode == 'r' && pd . everr {
470
+ if mode == 'r' && info . eventErr () {
402
471
return pollErrNotPollable
403
472
}
404
473
return pollNoError
@@ -449,7 +518,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
449
518
450
519
// need to recheck error states after setting gpp to pdWait
451
520
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
452
- // do the opposite: store to closing/rd/wd, membarrier , load of rg/wg
521
+ // do the opposite: store to closing/rd/wd, publishInfo , load of rg/wg
453
522
if waitio || netpollcheckerr (pd , mode ) == 0 {
454
523
gopark (netpollblockcommit , unsafe .Pointer (gpp ), waitReasonIOWait , traceEvGoBlockNet , 5 )
455
524
}
@@ -509,7 +578,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
509
578
throw ("runtime: inconsistent read deadline" )
510
579
}
511
580
pd .rd = - 1
512
- atomic . StorepNoWB ( unsafe . Pointer ( & pd .rt . f ), nil ) // full memory barrier between store to rd and load of rg in netpollunblock
581
+ pd .publishInfo ()
513
582
rg = netpollunblock (pd , 'r' , false )
514
583
}
515
584
var wg * g
@@ -518,7 +587,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
518
587
throw ("runtime: inconsistent write deadline" )
519
588
}
520
589
pd .wd = - 1
521
- atomic . StorepNoWB ( unsafe . Pointer ( & pd .wt . f ), nil ) // full memory barrier between store to wd and load of wg in netpollunblock
590
+ pd .publishInfo ()
522
591
wg = netpollunblock (pd , 'w' , false )
523
592
}
524
593
unlock (& pd .lock )
0 commit comments