3
3
use crate :: {
4
4
cell:: { Cell , UnsafeCell } ,
5
5
fmt,
6
+ marker:: PhantomData ,
6
7
mem:: { self , MaybeUninit } ,
7
8
ops:: { Deref , Drop } ,
8
9
panic:: { RefUnwindSafe , UnwindSafe } ,
9
- sync:: Once ,
10
+ sync:: atomic:: { AtomicBool , AtomicUsize , Ordering } ,
11
+ thread:: { self , Thread } ,
10
12
} ;
11
13
12
14
#[ doc( inline) ]
@@ -40,7 +42,10 @@ pub use core::lazy::*;
40
42
/// ```
41
43
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
42
44
pub struct SyncOnceCell < T > {
43
- once : Once ,
45
+ // This `state` word is actually an encoded version of just a pointer to a
46
+ // `Waiter`, so we add the `PhantomData` appropriately.
47
+ state_and_queue : AtomicUsize ,
48
+ _marker : PhantomData < * mut Waiter > ,
44
49
// Whether or not the value is initialized is tracked by `state_and_queue`.
45
50
value : UnsafeCell < MaybeUninit < T > > ,
46
51
}
@@ -117,7 +122,8 @@ impl<T> SyncOnceCell<T> {
117
122
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
118
123
pub const fn new ( ) -> SyncOnceCell < T > {
119
124
SyncOnceCell {
120
- once : Once :: new ( ) ,
125
+ state_and_queue : AtomicUsize :: new ( INCOMPLETE ) ,
126
+ _marker : PhantomData ,
121
127
value : UnsafeCell :: new ( MaybeUninit :: uninit ( ) ) ,
122
128
}
123
129
}
@@ -129,7 +135,7 @@ impl<T> SyncOnceCell<T> {
129
135
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
130
136
pub fn get ( & self ) -> Option < & T > {
131
137
if self . is_initialized ( ) {
132
- // Safe b/c checked is_initialized
138
+ // Safe b/c checked is_initialize
133
139
Some ( unsafe { self . get_unchecked ( ) } )
134
140
} else {
135
141
None
@@ -142,7 +148,7 @@ impl<T> SyncOnceCell<T> {
142
148
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
143
149
pub fn get_mut ( & mut self ) -> Option < & mut T > {
144
150
if self . is_initialized ( ) {
145
- // Safe b/c checked is_initialized and we have a unique access
151
+ // Safe b/c checked is_initialize and we have a unique access
146
152
Some ( unsafe { self . get_unchecked_mut ( ) } )
147
153
} else {
148
154
None
@@ -344,32 +350,37 @@ impl<T> SyncOnceCell<T> {
344
350
}
345
351
}
346
352
353
+ /// Safety: synchronizes with store to value via Release/(Acquire|SeqCst).
347
354
#[ inline]
348
355
fn is_initialized ( & self ) -> bool {
349
- self . once . is_completed ( )
356
+ // An `Acquire` load is enough because that makes all the initialization
357
+ // operations visible to us, and, this being a fast path, weaker
358
+ // ordering helps with performance. This `Acquire` synchronizes with
359
+ // `SeqCst` operations on the slow path.
360
+ self . state_and_queue . load ( Ordering :: Acquire ) == COMPLETE
350
361
}
351
362
363
+ /// Safety: synchronizes with store to value via SeqCst read from state,
364
+ /// writes value only once because we never get to INCOMPLETE state after a
365
+ /// successful write.
352
366
#[ cold]
353
367
fn initialize < F , E > ( & self , f : F ) -> Result < ( ) , E >
354
368
where
355
369
F : FnOnce ( ) -> Result < T , E > ,
356
370
{
371
+ let mut f = Some ( f) ;
357
372
let mut res: Result < ( ) , E > = Ok ( ( ) ) ;
358
373
let slot = & self . value ;
359
-
360
- // Ignore poisoning from other threads
361
- // If another thread panics, then we'll be able to run our closure
362
- self . once . call_once_force ( |p| {
374
+ initialize_inner ( & self . state_and_queue , & mut || {
375
+ let f = f. take ( ) . unwrap ( ) ;
363
376
match f ( ) {
364
377
Ok ( value) => {
365
378
unsafe { ( & mut * slot. get ( ) ) . write ( value) } ;
379
+ true
366
380
}
367
381
Err ( e) => {
368
382
res = Err ( e) ;
369
-
370
- // Treat the underlying `Once` as poisoned since we
371
- // failed to initialize our value. Calls
372
- p. poison ( ) ;
383
+ false
373
384
}
374
385
}
375
386
} ) ;
@@ -396,6 +407,106 @@ impl<T> Drop for SyncOnceCell<T> {
396
407
}
397
408
}
398
409
410
+ const INCOMPLETE : usize = 0x0 ;
411
+ const RUNNING : usize = 0x1 ;
412
+ const COMPLETE : usize = 0x2 ;
413
+
414
+ const STATE_MASK : usize = 0x3 ;
415
+
416
+ // The alignment here is so that we can stash the state in the lower
417
+ // bits of the `next` pointer
418
+ #[ repr( align( 4 ) ) ]
419
+ struct Waiter {
420
+ thread : Cell < Option < Thread > > ,
421
+ signaled : AtomicBool ,
422
+ next : * const Waiter ,
423
+ }
424
+
425
+ struct WaiterQueue < ' a > {
426
+ state_and_queue : & ' a AtomicUsize ,
427
+ set_state_on_drop_to : usize ,
428
+ }
429
+
430
+ impl Drop for WaiterQueue < ' _ > {
431
+ fn drop ( & mut self ) {
432
+ let state_and_queue =
433
+ self . state_and_queue . swap ( self . set_state_on_drop_to , Ordering :: AcqRel ) ;
434
+
435
+ assert_eq ! ( state_and_queue & STATE_MASK , RUNNING ) ;
436
+
437
+ unsafe {
438
+ let mut queue = ( state_and_queue & !STATE_MASK ) as * const Waiter ;
439
+ while !queue. is_null ( ) {
440
+ let next = ( * queue) . next ;
441
+ let thread = ( * queue) . thread . replace ( None ) . unwrap ( ) ;
442
+ ( * queue) . signaled . store ( true , Ordering :: Release ) ;
443
+ queue = next;
444
+ thread. unpark ( ) ;
445
+ }
446
+ }
447
+ }
448
+ }
449
+
450
+ fn initialize_inner ( my_state_and_queue : & AtomicUsize , init : & mut dyn FnMut ( ) -> bool ) -> bool {
451
+ let mut state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
452
+
453
+ loop {
454
+ match state_and_queue {
455
+ COMPLETE => return true ,
456
+ INCOMPLETE => {
457
+ let old = my_state_and_queue. compare_and_swap (
458
+ state_and_queue,
459
+ RUNNING ,
460
+ Ordering :: Acquire ,
461
+ ) ;
462
+ if old != state_and_queue {
463
+ state_and_queue = old;
464
+ continue ;
465
+ }
466
+ let mut waiter_queue = WaiterQueue {
467
+ state_and_queue : my_state_and_queue,
468
+ set_state_on_drop_to : INCOMPLETE ,
469
+ } ;
470
+ let success = init ( ) ;
471
+
472
+ waiter_queue. set_state_on_drop_to = if success { COMPLETE } else { INCOMPLETE } ;
473
+ return success;
474
+ }
475
+ _ => {
476
+ assert ! ( state_and_queue & STATE_MASK == RUNNING ) ;
477
+ wait ( & my_state_and_queue, state_and_queue) ;
478
+ state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
479
+ }
480
+ }
481
+ }
482
+ }
483
+
484
+ fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) {
485
+ loop {
486
+ if current_state & STATE_MASK != RUNNING {
487
+ return ;
488
+ }
489
+
490
+ let node = Waiter {
491
+ thread : Cell :: new ( Some ( thread:: current ( ) ) ) ,
492
+ signaled : AtomicBool :: new ( false ) ,
493
+ next : ( current_state & !STATE_MASK ) as * const Waiter ,
494
+ } ;
495
+ let me = & node as * const Waiter as usize ;
496
+
497
+ let old = state_and_queue. compare_and_swap ( current_state, me | RUNNING , Ordering :: Release ) ;
498
+ if old != current_state {
499
+ current_state = old;
500
+ continue ;
501
+ }
502
+
503
+ while !node. signaled . load ( Ordering :: Acquire ) {
504
+ thread:: park ( ) ;
505
+ }
506
+ break ;
507
+ }
508
+ }
509
+
399
510
/// A value which is initialized on the first access.
400
511
///
401
512
/// This type is a thread-safe `Lazy`, and can be used in statics.
@@ -652,7 +763,6 @@ mod tests {
652
763
653
764
let res = panic:: catch_unwind ( || cell. get_or_try_init ( || -> Result < _ , ( ) > { panic ! ( ) } ) ) ;
654
765
assert ! ( res. is_err( ) ) ;
655
- assert ! ( !cell. is_initialized( ) ) ;
656
766
assert ! ( cell. get( ) . is_none( ) ) ;
657
767
658
768
assert_eq ! ( cell. get_or_try_init( || Err ( ( ) ) ) , Err ( ( ) ) ) ;
0 commit comments