diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs index 6439fa0e0c8b8..ee4be6c915119 100644 --- a/src/libcore/hint.rs +++ b/src/libcore/hint.rs @@ -49,28 +49,16 @@ pub unsafe fn unreachable_unchecked() -> ! { intrinsics::unreachable() } -/// Signals the processor that it is entering a busy-wait spin-loop. +/// Emits a machine instruction hinting to the processor that it is running in busy-wait +/// spin-loop ("spin lock"). /// -/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving -/// power or switching hyper-threads. -/// -/// This function is different than [`std::thread::yield_now`] which directly yields to the -/// system's scheduler, whereas `spin_loop` only signals the processor that it is entering a -/// busy-wait spin-loop without yielding control to the system's scheduler. -/// -/// Using a busy-wait spin-loop with `spin_loop` is ideally used in situations where a -/// contended lock is held by another thread executed on a different CPU and where the waiting -/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's -/// scheduler, no overhead for switching threads occurs. However, if the thread holding the -/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice -/// before switching to the thread that holds the lock. If the contending lock is held by a thread -/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to -/// use [`std::thread::yield_now`]. +/// For a discussion of different locking strategies and their trade-offs, see +/// [`core::sync::atomic::spin_loop_hint`]. /// /// **Note**: On platforms that do not support receiving spin-loop hints this function does not /// do anything at all. /// -/// [`std::thread::yield_now`]: ../../std/thread/fn.yield_now.html +/// [`core::sync::atomic::spin_loop_hint`]: ../sync/atomic/fn.spin_loop_hint.html #[inline] #[unstable(feature = "renamed_spin_loop", issue = "55002")] pub fn spin_loop() { diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 8dfb19fa03296..ab5af5682c180 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -124,28 +124,31 @@ use crate::fmt; use crate::hint::spin_loop; -/// Signals the processor that it is entering a busy-wait spin-loop. +/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock"). /// /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving /// power or switching hyper-threads. /// -/// This function is different than [`std::thread::yield_now`] which directly yields to the -/// system's scheduler, whereas `spin_loop_hint` only signals the processor that it is entering a -/// busy-wait spin-loop without yielding control to the system's scheduler. +/// This function is different from [`std::thread::yield_now`] which directly yields to the +/// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system. /// -/// Using a busy-wait spin-loop with `spin_loop_hint` is ideally used in situations where a -/// contended lock is held by another thread executed on a different CPU and where the waiting -/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's -/// scheduler, no overhead for switching threads occurs. However, if the thread holding the -/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice -/// before switching to the thread that holds the lock. If the contending lock is held by a thread -/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to -/// use [`std::thread::yield_now`]. +/// Spin locks can be very efficient for short lock durations because they do not involve context +/// switches or interaction with the operating system. For long lock durations they become wasteful +/// however because they use CPU cycles for the entire lock duration, and using a +/// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is +/// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`] +/// or [`std::thread::sleep`] may be the best option. +/// +/// **Note**: Spin locks are based on the underlying assumption that another thread will release +/// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or +/// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms. /// /// **Note**: On platforms that do not support receiving spin-loop hints this function does not /// do anything at all. /// /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html +/// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html +/// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html #[inline] #[stable(feature = "spin_loop_hint", since = "1.24.0")] pub fn spin_loop_hint() {