33use crate :: {
44 cell:: { Cell , UnsafeCell } ,
55 fmt,
6+ marker:: PhantomData ,
67 mem:: { self , MaybeUninit } ,
78 ops:: { Deref , Drop } ,
89 panic:: { RefUnwindSafe , UnwindSafe } ,
9- sync:: Once ,
10+ sync:: atomic:: { AtomicBool , AtomicUsize , Ordering } ,
11+ thread:: { self , Thread } ,
1012} ;
1113
1214#[ doc( inline) ]
@@ -40,7 +42,10 @@ pub use core::lazy::*;
4042/// ```
4143#[ unstable( feature = "once_cell" , issue = "68198" ) ]
4244pub struct SyncOnceCell < T > {
43- once : Once ,
45+ // This `state` word is actually an encoded version of just a pointer to a
46+ // `Waiter`, so we add the `PhantomData` appropriately.
47+ state_and_queue : AtomicUsize ,
48+ _marker : PhantomData < * mut Waiter > ,
4449 // Whether or not the value is initialized is tracked by `state_and_queue`.
4550 value : UnsafeCell < MaybeUninit < T > > ,
4651}
@@ -117,7 +122,8 @@ impl<T> SyncOnceCell<T> {
117122 #[ unstable( feature = "once_cell" , issue = "68198" ) ]
118123 pub const fn new ( ) -> SyncOnceCell < T > {
119124 SyncOnceCell {
120- once : Once :: new ( ) ,
125+ state_and_queue : AtomicUsize :: new ( INCOMPLETE ) ,
126+ _marker : PhantomData ,
121127 value : UnsafeCell :: new ( MaybeUninit :: uninit ( ) ) ,
122128 }
123129 }
@@ -129,7 +135,7 @@ impl<T> SyncOnceCell<T> {
129135 #[ unstable( feature = "once_cell" , issue = "68198" ) ]
130136 pub fn get ( & self ) -> Option < & T > {
131137 if self . is_initialized ( ) {
132- // Safe b/c checked is_initialized
138+ // Safe b/c checked is_initialize
133139 Some ( unsafe { self . get_unchecked ( ) } )
134140 } else {
135141 None
@@ -142,7 +148,7 @@ impl<T> SyncOnceCell<T> {
142148 #[ unstable( feature = "once_cell" , issue = "68198" ) ]
143149 pub fn get_mut ( & mut self ) -> Option < & mut T > {
144150 if self . is_initialized ( ) {
145- // Safe b/c checked is_initialized and we have a unique access
151+ // Safe b/c checked is_initialize and we have a unique access
146152 Some ( unsafe { self . get_unchecked_mut ( ) } )
147153 } else {
148154 None
@@ -344,32 +350,37 @@ impl<T> SyncOnceCell<T> {
344350 }
345351 }
346352
353+ /// Safety: synchronizes with store to value via Release/(Acquire|SeqCst).
347354 #[ inline]
348355 fn is_initialized ( & self ) -> bool {
349- self . once . is_completed ( )
356+ // An `Acquire` load is enough because that makes all the initialization
357+ // operations visible to us, and, this being a fast path, weaker
358+ // ordering helps with performance. This `Acquire` synchronizes with
359+ // `SeqCst` operations on the slow path.
360+ self . state_and_queue . load ( Ordering :: Acquire ) == COMPLETE
350361 }
351362
363+ /// Safety: synchronizes with store to value via SeqCst read from state,
364+ /// writes value only once because we never get to INCOMPLETE state after a
365+ /// successful write.
352366 #[ cold]
353367 fn initialize < F , E > ( & self , f : F ) -> Result < ( ) , E >
354368 where
355369 F : FnOnce ( ) -> Result < T , E > ,
356370 {
371+ let mut f = Some ( f) ;
357372 let mut res: Result < ( ) , E > = Ok ( ( ) ) ;
358373 let slot = & self . value ;
359-
360- // Ignore poisoning from other threads
361- // If another thread panics, then we'll be able to run our closure
362- self . once . call_once_force ( |p| {
374+ initialize_inner ( & self . state_and_queue , & mut || {
375+ let f = f. take ( ) . unwrap ( ) ;
363376 match f ( ) {
364377 Ok ( value) => {
365378 unsafe { ( & mut * slot. get ( ) ) . write ( value) } ;
379+ true
366380 }
367381 Err ( e) => {
368382 res = Err ( e) ;
369-
370- // Treat the underlying `Once` as poisoned since we
371- // failed to initialize our value. Calls
372- p. poison ( ) ;
383+ false
373384 }
374385 }
375386 } ) ;
@@ -396,6 +407,106 @@ impl<T> Drop for SyncOnceCell<T> {
396407 }
397408}
398409
410+ const INCOMPLETE : usize = 0x0 ;
411+ const RUNNING : usize = 0x1 ;
412+ const COMPLETE : usize = 0x2 ;
413+
414+ const STATE_MASK : usize = 0x3 ;
415+
416+ // The alignment here is so that we can stash the state in the lower
417+ // bits of the `next` pointer
418+ #[ repr( align( 4 ) ) ]
419+ struct Waiter {
420+ thread : Cell < Option < Thread > > ,
421+ signaled : AtomicBool ,
422+ next : * const Waiter ,
423+ }
424+
425+ struct WaiterQueue < ' a > {
426+ state_and_queue : & ' a AtomicUsize ,
427+ set_state_on_drop_to : usize ,
428+ }
429+
430+ impl Drop for WaiterQueue < ' _ > {
431+ fn drop ( & mut self ) {
432+ let state_and_queue =
433+ self . state_and_queue . swap ( self . set_state_on_drop_to , Ordering :: AcqRel ) ;
434+
435+ assert_eq ! ( state_and_queue & STATE_MASK , RUNNING ) ;
436+
437+ unsafe {
438+ let mut queue = ( state_and_queue & !STATE_MASK ) as * const Waiter ;
439+ while !queue. is_null ( ) {
440+ let next = ( * queue) . next ;
441+ let thread = ( * queue) . thread . replace ( None ) . unwrap ( ) ;
442+ ( * queue) . signaled . store ( true , Ordering :: Release ) ;
443+ queue = next;
444+ thread. unpark ( ) ;
445+ }
446+ }
447+ }
448+ }
449+
450+ fn initialize_inner ( my_state_and_queue : & AtomicUsize , init : & mut dyn FnMut ( ) -> bool ) -> bool {
451+ let mut state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
452+
453+ loop {
454+ match state_and_queue {
455+ COMPLETE => return true ,
456+ INCOMPLETE => {
457+ let old = my_state_and_queue. compare_and_swap (
458+ state_and_queue,
459+ RUNNING ,
460+ Ordering :: Acquire ,
461+ ) ;
462+ if old != state_and_queue {
463+ state_and_queue = old;
464+ continue ;
465+ }
466+ let mut waiter_queue = WaiterQueue {
467+ state_and_queue : my_state_and_queue,
468+ set_state_on_drop_to : INCOMPLETE ,
469+ } ;
470+ let success = init ( ) ;
471+
472+ waiter_queue. set_state_on_drop_to = if success { COMPLETE } else { INCOMPLETE } ;
473+ return success;
474+ }
475+ _ => {
476+ assert ! ( state_and_queue & STATE_MASK == RUNNING ) ;
477+ wait ( & my_state_and_queue, state_and_queue) ;
478+ state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
479+ }
480+ }
481+ }
482+ }
483+
484+ fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) {
485+ loop {
486+ if current_state & STATE_MASK != RUNNING {
487+ return ;
488+ }
489+
490+ let node = Waiter {
491+ thread : Cell :: new ( Some ( thread:: current ( ) ) ) ,
492+ signaled : AtomicBool :: new ( false ) ,
493+ next : ( current_state & !STATE_MASK ) as * const Waiter ,
494+ } ;
495+ let me = & node as * const Waiter as usize ;
496+
497+ let old = state_and_queue. compare_and_swap ( current_state, me | RUNNING , Ordering :: Release ) ;
498+ if old != current_state {
499+ current_state = old;
500+ continue ;
501+ }
502+
503+ while !node. signaled . load ( Ordering :: Acquire ) {
504+ thread:: park ( ) ;
505+ }
506+ break ;
507+ }
508+ }
509+
399510/// A value which is initialized on the first access.
400511///
401512/// This type is a thread-safe `Lazy`, and can be used in statics.
@@ -652,7 +763,6 @@ mod tests {
652763
653764 let res = panic:: catch_unwind ( || cell. get_or_try_init ( || -> Result < _ , ( ) > { panic ! ( ) } ) ) ;
654765 assert ! ( res. is_err( ) ) ;
655- assert ! ( !cell. is_initialized( ) ) ;
656766 assert ! ( cell. get( ) . is_none( ) ) ;
657767
658768 assert_eq ! ( cell. get_or_try_init( || Err ( ( ) ) ) , Err ( ( ) ) ) ;
0 commit comments