8
8
// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
9
9
// gcc-compiled function written by cgo.
10
10
//
11
- // runtime.cgocall (below) locks g to m, calls entersyscall
12
- // so as not to block other goroutines or the garbage collector,
13
- // and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame).
11
+ // runtime.cgocall (below) calls entersyscall so as not to block
12
+ // other goroutines or the garbage collector, and then calls
13
+ // runtime.asmcgocall(_cgo_Cfunc_f, frame).
14
14
//
15
15
// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
16
16
// (assumed to be an operating system-allocated stack, so safe to run
@@ -104,13 +104,9 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
104
104
racereleasemerge (unsafe .Pointer (& racecgosync ))
105
105
}
106
106
107
- // Lock g to m to ensure we stay on the same stack if we do a
108
- // cgo callback. In case of panic, unwindm calls endcgo.
109
- lockOSThread ()
110
107
mp := getg ().m
111
108
mp .ncgocall ++
112
109
mp .ncgo ++
113
- mp .incgo = true
114
110
115
111
// Reset traceback.
116
112
mp .cgoCallers [0 ] = 0
@@ -130,7 +126,14 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
130
126
// and then re-enter the "system call" reusing the PC and SP
131
127
// saved by entersyscall here.
132
128
entersyscall (0 )
129
+
130
+ mp .incgo = true
133
131
errno := asmcgocall (fn , arg )
132
+
133
+ // Call endcgo before exitsyscall because exitsyscall may
134
+ // reschedule us on to a different M.
135
+ endcgo (mp )
136
+
134
137
exitsyscall (0 )
135
138
136
139
// From the garbage collector's perspective, time can move
@@ -145,8 +148,8 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
145
148
// GC by forcing them to stay live across this time warp.
146
149
KeepAlive (fn )
147
150
KeepAlive (arg )
151
+ KeepAlive (mp )
148
152
149
- endcgo (mp )
150
153
return errno
151
154
}
152
155
@@ -158,8 +161,6 @@ func endcgo(mp *m) {
158
161
if raceenabled {
159
162
raceacquire (unsafe .Pointer (& racecgosync ))
160
163
}
161
-
162
- unlockOSThread () // invalidates mp
163
164
}
164
165
165
166
// Call from C back to Go.
@@ -171,6 +172,12 @@ func cgocallbackg(ctxt uintptr) {
171
172
exit (2 )
172
173
}
173
174
175
+ // The call from C is on gp.m's g0 stack, so we must ensure
176
+ // that we stay on that M. We have to do this before calling
177
+ // exitsyscall, since it would otherwise be free to move us to
178
+ // a different M. The call to unlockOSThread is in unwindm.
179
+ lockOSThread ()
180
+
174
181
// Save current syscall parameters, so m.syscall can be
175
182
// used again if callback decide to make syscall.
176
183
syscall := gp .m .syscall
@@ -186,6 +193,10 @@ func cgocallbackg(ctxt uintptr) {
186
193
187
194
cgocallbackg1 (ctxt )
188
195
196
+ // At this point unlockOSThread has been called.
197
+ // The following code must not change to a different m.
198
+ // This is enforced by checking incgo in the schedule function.
199
+
189
200
gp .m .incgo = true
190
201
// going back to cgo call
191
202
reentersyscall (savedpc , uintptr (savedsp ))
@@ -321,32 +332,35 @@ func cgocallbackg1(ctxt uintptr) {
321
332
}
322
333
323
334
func unwindm (restore * bool ) {
324
- if ! * restore {
325
- return
326
- }
327
- // Restore sp saved by cgocallback during
328
- // unwind of g's stack (see comment at top of file).
329
- mp := acquirem ()
330
- sched := & mp .g0 .sched
331
- switch GOARCH {
332
- default :
333
- throw ("unwindm not implemented" )
334
- case "386" , "amd64" , "arm" , "ppc64" , "ppc64le" , "mips64" , "mips64le" , "s390x" , "mips" , "mipsle" :
335
- sched .sp = * (* uintptr )(unsafe .Pointer (sched .sp + sys .MinFrameSize ))
336
- case "arm64" :
337
- sched .sp = * (* uintptr )(unsafe .Pointer (sched .sp + 16 ))
338
- }
335
+ if * restore {
336
+ // Restore sp saved by cgocallback during
337
+ // unwind of g's stack (see comment at top of file).
338
+ mp := acquirem ()
339
+ sched := & mp .g0 .sched
340
+ switch GOARCH {
341
+ default :
342
+ throw ("unwindm not implemented" )
343
+ case "386" , "amd64" , "arm" , "ppc64" , "ppc64le" , "mips64" , "mips64le" , "s390x" , "mips" , "mipsle" :
344
+ sched .sp = * (* uintptr )(unsafe .Pointer (sched .sp + sys .MinFrameSize ))
345
+ case "arm64" :
346
+ sched .sp = * (* uintptr )(unsafe .Pointer (sched .sp + 16 ))
347
+ }
339
348
340
- // Call endcgo to do the accounting that cgocall will not have a
341
- // chance to do during an unwind.
342
- //
343
- // In the case where a Go call originates from C, ncgo is 0
344
- // and there is no matching cgocall to end.
345
- if mp .ncgo > 0 {
346
- endcgo (mp )
349
+ // Call endcgo to do the accounting that cgocall will not have a
350
+ // chance to do during an unwind.
351
+ //
352
+ // In the case where a Go call originates from C, ncgo is 0
353
+ // and there is no matching cgocall to end.
354
+ if mp .ncgo > 0 {
355
+ endcgo (mp )
356
+ }
357
+
358
+ releasem (mp )
347
359
}
348
360
349
- releasem (mp )
361
+ // Undo the call to lockOSThread in cgocallbackg.
362
+ // We must still stay on the same m.
363
+ unlockOSThread ()
350
364
}
351
365
352
366
// called from assembly
0 commit comments