Skip to content

Commit 7a132d6

Browse files
committed
runtime: move doAllThreadsSyscall to os_linux.go
syscall_runtime_doAllThreadsSyscall is only used on Linux. In preparation of a follow-up CL that will modify the function to use other Linux-only functions, move it to os_linux.go with no changes. For #50113. Change-Id: I348b6130038603aa0a917be1f1debbca5a5a073f Reviewed-on: https://go-review.googlesource.com/c/go/+/383996 Trust: Michael Pratt <[email protected]> Reviewed-by: Andrew G. Morgan <[email protected]> Reviewed-by: Austin Clements <[email protected]> Run-TryBot: Austin Clements <[email protected]> TryBot-Result: Gopher Robot <[email protected]>
1 parent 76bd8ea commit 7a132d6

File tree

2 files changed

+139
-139
lines changed

2 files changed

+139
-139
lines changed

src/runtime/os_linux.go

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -664,3 +664,142 @@ func setThreadCPUProfiler(hz int32) {
664664
mp.profileTimer = timerid
665665
atomic.Store(&mp.profileTimerValid, 1)
666666
}
667+
668+
// syscall_runtime_doAllThreadsSyscall serializes Go execution and
669+
// executes a specified fn() call on all m's.
670+
//
671+
// The boolean argument to fn() indicates whether the function's
672+
// return value will be consulted or not. That is, fn(true) should
673+
// return true if fn() succeeds, and fn(true) should return false if
674+
// it failed. When fn(false) is called, its return status will be
675+
// ignored.
676+
//
677+
// syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a
678+
// single, coordinating, m, and only if it returns true does it go on
679+
// to invoke fn(false) on all of the other m's known to the process.
680+
//
681+
//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
682+
func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
683+
if iscgo {
684+
panic("doAllThreadsSyscall not supported with cgo enabled")
685+
}
686+
if fn == nil {
687+
return
688+
}
689+
for atomic.Load(&sched.sysmonStarting) != 0 {
690+
osyield()
691+
}
692+
693+
// We don't want this thread to handle signals for the
694+
// duration of this critical section. The underlying issue
695+
// being that this locked coordinating m is the one monitoring
696+
// for fn() execution by all the other m's of the runtime,
697+
// while no regular go code execution is permitted (the world
698+
// is stopped). If this present m were to get distracted to
699+
// run signal handling code, and find itself waiting for a
700+
// second thread to execute go code before being able to
701+
// return from that signal handling, a deadlock will result.
702+
// (See golang.org/issue/44193.)
703+
lockOSThread()
704+
var sigmask sigset
705+
sigsave(&sigmask)
706+
sigblock(false)
707+
708+
stopTheWorldGC("doAllThreadsSyscall")
709+
if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
710+
// Ensure that there are no in-flight thread
711+
// creations: don't want to race with allm.
712+
lock(&newmHandoff.lock)
713+
for !newmHandoff.waiting {
714+
unlock(&newmHandoff.lock)
715+
osyield()
716+
lock(&newmHandoff.lock)
717+
}
718+
unlock(&newmHandoff.lock)
719+
}
720+
if netpollinited() {
721+
netpollBreak()
722+
}
723+
sigRecvPrepareForFixup()
724+
_g_ := getg()
725+
if raceenabled {
726+
// For m's running without racectx, we loan out the
727+
// racectx of this call.
728+
lock(&mFixupRace.lock)
729+
mFixupRace.ctx = _g_.racectx
730+
unlock(&mFixupRace.lock)
731+
}
732+
if ok := fn(true); ok {
733+
tid := _g_.m.procid
734+
for mp := allm; mp != nil; mp = mp.alllink {
735+
if mp.procid == tid {
736+
// This m has already completed fn()
737+
// call.
738+
continue
739+
}
740+
// Be wary of mp's without procid values if
741+
// they are known not to park. If they are
742+
// marked as parking with a zero procid, then
743+
// they will be racing with this code to be
744+
// allocated a procid and we will annotate
745+
// them with the need to execute the fn when
746+
// they acquire a procid to run it.
747+
if mp.procid == 0 && !mp.doesPark {
748+
// Reaching here, we are either
749+
// running Windows, or cgo linked
750+
// code. Neither of which are
751+
// currently supported by this API.
752+
throw("unsupported runtime environment")
753+
}
754+
// stopTheWorldGC() doesn't guarantee stopping
755+
// all the threads, so we lock here to avoid
756+
// the possibility of racing with mp.
757+
lock(&mp.mFixup.lock)
758+
mp.mFixup.fn = fn
759+
atomic.Store(&mp.mFixup.used, 1)
760+
if mp.doesPark {
761+
// For non-service threads this will
762+
// cause the wakeup to be short lived
763+
// (once the mutex is unlocked). The
764+
// next real wakeup will occur after
765+
// startTheWorldGC() is called.
766+
notewakeup(&mp.park)
767+
}
768+
unlock(&mp.mFixup.lock)
769+
}
770+
for {
771+
done := true
772+
for mp := allm; done && mp != nil; mp = mp.alllink {
773+
if mp.procid == tid {
774+
continue
775+
}
776+
done = atomic.Load(&mp.mFixup.used) == 0
777+
}
778+
if done {
779+
break
780+
}
781+
// if needed force sysmon and/or newmHandoff to wakeup.
782+
lock(&sched.lock)
783+
if atomic.Load(&sched.sysmonwait) != 0 {
784+
atomic.Store(&sched.sysmonwait, 0)
785+
notewakeup(&sched.sysmonnote)
786+
}
787+
unlock(&sched.lock)
788+
lock(&newmHandoff.lock)
789+
if newmHandoff.waiting {
790+
newmHandoff.waiting = false
791+
notewakeup(&newmHandoff.wake)
792+
}
793+
unlock(&newmHandoff.lock)
794+
osyield()
795+
}
796+
}
797+
if raceenabled {
798+
lock(&mFixupRace.lock)
799+
mFixupRace.ctx = 0
800+
unlock(&mFixupRace.lock)
801+
}
802+
startTheWorldGC()
803+
msigrestore(sigmask)
804+
unlockOSThread()
805+
}

src/runtime/proc.go

Lines changed: 0 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -1669,145 +1669,6 @@ func forEachP(fn func(*p)) {
16691669
releasem(mp)
16701670
}
16711671

1672-
// syscall_runtime_doAllThreadsSyscall serializes Go execution and
1673-
// executes a specified fn() call on all m's.
1674-
//
1675-
// The boolean argument to fn() indicates whether the function's
1676-
// return value will be consulted or not. That is, fn(true) should
1677-
// return true if fn() succeeds, and fn(true) should return false if
1678-
// it failed. When fn(false) is called, its return status will be
1679-
// ignored.
1680-
//
1681-
// syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a
1682-
// single, coordinating, m, and only if it returns true does it go on
1683-
// to invoke fn(false) on all of the other m's known to the process.
1684-
//
1685-
//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
1686-
func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
1687-
if iscgo {
1688-
panic("doAllThreadsSyscall not supported with cgo enabled")
1689-
}
1690-
if fn == nil {
1691-
return
1692-
}
1693-
for atomic.Load(&sched.sysmonStarting) != 0 {
1694-
osyield()
1695-
}
1696-
1697-
// We don't want this thread to handle signals for the
1698-
// duration of this critical section. The underlying issue
1699-
// being that this locked coordinating m is the one monitoring
1700-
// for fn() execution by all the other m's of the runtime,
1701-
// while no regular go code execution is permitted (the world
1702-
// is stopped). If this present m were to get distracted to
1703-
// run signal handling code, and find itself waiting for a
1704-
// second thread to execute go code before being able to
1705-
// return from that signal handling, a deadlock will result.
1706-
// (See golang.org/issue/44193.)
1707-
lockOSThread()
1708-
var sigmask sigset
1709-
sigsave(&sigmask)
1710-
sigblock(false)
1711-
1712-
stopTheWorldGC("doAllThreadsSyscall")
1713-
if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
1714-
// Ensure that there are no in-flight thread
1715-
// creations: don't want to race with allm.
1716-
lock(&newmHandoff.lock)
1717-
for !newmHandoff.waiting {
1718-
unlock(&newmHandoff.lock)
1719-
osyield()
1720-
lock(&newmHandoff.lock)
1721-
}
1722-
unlock(&newmHandoff.lock)
1723-
}
1724-
if netpollinited() {
1725-
netpollBreak()
1726-
}
1727-
sigRecvPrepareForFixup()
1728-
_g_ := getg()
1729-
if raceenabled {
1730-
// For m's running without racectx, we loan out the
1731-
// racectx of this call.
1732-
lock(&mFixupRace.lock)
1733-
mFixupRace.ctx = _g_.racectx
1734-
unlock(&mFixupRace.lock)
1735-
}
1736-
if ok := fn(true); ok {
1737-
tid := _g_.m.procid
1738-
for mp := allm; mp != nil; mp = mp.alllink {
1739-
if mp.procid == tid {
1740-
// This m has already completed fn()
1741-
// call.
1742-
continue
1743-
}
1744-
// Be wary of mp's without procid values if
1745-
// they are known not to park. If they are
1746-
// marked as parking with a zero procid, then
1747-
// they will be racing with this code to be
1748-
// allocated a procid and we will annotate
1749-
// them with the need to execute the fn when
1750-
// they acquire a procid to run it.
1751-
if mp.procid == 0 && !mp.doesPark {
1752-
// Reaching here, we are either
1753-
// running Windows, or cgo linked
1754-
// code. Neither of which are
1755-
// currently supported by this API.
1756-
throw("unsupported runtime environment")
1757-
}
1758-
// stopTheWorldGC() doesn't guarantee stopping
1759-
// all the threads, so we lock here to avoid
1760-
// the possibility of racing with mp.
1761-
lock(&mp.mFixup.lock)
1762-
mp.mFixup.fn = fn
1763-
atomic.Store(&mp.mFixup.used, 1)
1764-
if mp.doesPark {
1765-
// For non-service threads this will
1766-
// cause the wakeup to be short lived
1767-
// (once the mutex is unlocked). The
1768-
// next real wakeup will occur after
1769-
// startTheWorldGC() is called.
1770-
notewakeup(&mp.park)
1771-
}
1772-
unlock(&mp.mFixup.lock)
1773-
}
1774-
for {
1775-
done := true
1776-
for mp := allm; done && mp != nil; mp = mp.alllink {
1777-
if mp.procid == tid {
1778-
continue
1779-
}
1780-
done = atomic.Load(&mp.mFixup.used) == 0
1781-
}
1782-
if done {
1783-
break
1784-
}
1785-
// if needed force sysmon and/or newmHandoff to wakeup.
1786-
lock(&sched.lock)
1787-
if atomic.Load(&sched.sysmonwait) != 0 {
1788-
atomic.Store(&sched.sysmonwait, 0)
1789-
notewakeup(&sched.sysmonnote)
1790-
}
1791-
unlock(&sched.lock)
1792-
lock(&newmHandoff.lock)
1793-
if newmHandoff.waiting {
1794-
newmHandoff.waiting = false
1795-
notewakeup(&newmHandoff.wake)
1796-
}
1797-
unlock(&newmHandoff.lock)
1798-
osyield()
1799-
}
1800-
}
1801-
if raceenabled {
1802-
lock(&mFixupRace.lock)
1803-
mFixupRace.ctx = 0
1804-
unlock(&mFixupRace.lock)
1805-
}
1806-
startTheWorldGC()
1807-
msigrestore(sigmask)
1808-
unlockOSThread()
1809-
}
1810-
18111672
// runSafePointFn runs the safe point function, if any, for this P.
18121673
// This should be called like
18131674
//

0 commit comments

Comments
 (0)