3
3
use crate :: {
4
4
cell:: { Cell , UnsafeCell } ,
5
5
fmt,
6
- marker:: PhantomData ,
7
6
mem:: { self , MaybeUninit } ,
8
7
ops:: { Deref , Drop } ,
9
8
panic:: { RefUnwindSafe , UnwindSafe } ,
10
- sync:: atomic:: { AtomicBool , AtomicUsize , Ordering } ,
11
- thread:: { self , Thread } ,
9
+ sync:: Once ,
12
10
} ;
13
11
14
12
#[ doc( inline) ]
@@ -42,10 +40,7 @@ pub use core::lazy::*;
42
40
/// ```
43
41
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
44
42
pub struct SyncOnceCell < T > {
45
- // This `state` word is actually an encoded version of just a pointer to a
46
- // `Waiter`, so we add the `PhantomData` appropriately.
47
- state_and_queue : AtomicUsize ,
48
- _marker : PhantomData < * mut Waiter > ,
43
+ once : Once ,
49
44
// Whether or not the value is initialized is tracked by `state_and_queue`.
50
45
value : UnsafeCell < MaybeUninit < T > > ,
51
46
}
@@ -122,8 +117,7 @@ impl<T> SyncOnceCell<T> {
122
117
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
123
118
pub const fn new ( ) -> SyncOnceCell < T > {
124
119
SyncOnceCell {
125
- state_and_queue : AtomicUsize :: new ( INCOMPLETE ) ,
126
- _marker : PhantomData ,
120
+ once : Once :: new ( ) ,
127
121
value : UnsafeCell :: new ( MaybeUninit :: uninit ( ) ) ,
128
122
}
129
123
}
@@ -135,7 +129,7 @@ impl<T> SyncOnceCell<T> {
135
129
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
136
130
pub fn get ( & self ) -> Option < & T > {
137
131
if self . is_initialized ( ) {
138
- // Safe b/c checked is_initialize
132
+ // Safe b/c checked is_initialized
139
133
Some ( unsafe { self . get_unchecked ( ) } )
140
134
} else {
141
135
None
@@ -148,7 +142,7 @@ impl<T> SyncOnceCell<T> {
148
142
#[ unstable( feature = "once_cell" , issue = "68198" ) ]
149
143
pub fn get_mut ( & mut self ) -> Option < & mut T > {
150
144
if self . is_initialized ( ) {
151
- // Safe b/c checked is_initialize and we have a unique access
145
+ // Safe b/c checked is_initialized and we have a unique access
152
146
Some ( unsafe { self . get_unchecked_mut ( ) } )
153
147
} else {
154
148
None
@@ -350,37 +344,32 @@ impl<T> SyncOnceCell<T> {
350
344
}
351
345
}
352
346
353
- /// Safety: synchronizes with store to value via Release/(Acquire|SeqCst).
354
347
#[ inline]
355
348
fn is_initialized ( & self ) -> bool {
356
- // An `Acquire` load is enough because that makes all the initialization
357
- // operations visible to us, and, this being a fast path, weaker
358
- // ordering helps with performance. This `Acquire` synchronizes with
359
- // `SeqCst` operations on the slow path.
360
- self . state_and_queue . load ( Ordering :: Acquire ) == COMPLETE
349
+ self . once . is_completed ( )
361
350
}
362
351
363
- /// Safety: synchronizes with store to value via SeqCst read from state,
364
- /// writes value only once because we never get to INCOMPLETE state after a
365
- /// successful write.
366
352
#[ cold]
367
353
fn initialize < F , E > ( & self , f : F ) -> Result < ( ) , E >
368
354
where
369
355
F : FnOnce ( ) -> Result < T , E > ,
370
356
{
371
- let mut f = Some ( f) ;
372
357
let mut res: Result < ( ) , E > = Ok ( ( ) ) ;
373
358
let slot = & self . value ;
374
- initialize_inner ( & self . state_and_queue , & mut || {
375
- let f = f. take ( ) . unwrap ( ) ;
359
+
360
+ // Ignore poisoning from other threads
361
+ // If another thread panics, then we'll be able to run our closure
362
+ self . once . call_once_force ( |p| {
376
363
match f ( ) {
377
364
Ok ( value) => {
378
365
unsafe { ( & mut * slot. get ( ) ) . write ( value) } ;
379
- true
380
366
}
381
367
Err ( e) => {
382
368
res = Err ( e) ;
383
- false
369
+
370
+ // Treat the underlying `Once` as poisoned since we
371
+ // failed to initialize our value. Calls
372
+ p. poison ( ) ;
384
373
}
385
374
}
386
375
} ) ;
@@ -407,106 +396,6 @@ impl<T> Drop for SyncOnceCell<T> {
407
396
}
408
397
}
409
398
410
- const INCOMPLETE : usize = 0x0 ;
411
- const RUNNING : usize = 0x1 ;
412
- const COMPLETE : usize = 0x2 ;
413
-
414
- const STATE_MASK : usize = 0x3 ;
415
-
416
- // The alignment here is so that we can stash the state in the lower
417
- // bits of the `next` pointer
418
- #[ repr( align( 4 ) ) ]
419
- struct Waiter {
420
- thread : Cell < Option < Thread > > ,
421
- signaled : AtomicBool ,
422
- next : * const Waiter ,
423
- }
424
-
425
- struct WaiterQueue < ' a > {
426
- state_and_queue : & ' a AtomicUsize ,
427
- set_state_on_drop_to : usize ,
428
- }
429
-
430
- impl Drop for WaiterQueue < ' _ > {
431
- fn drop ( & mut self ) {
432
- let state_and_queue =
433
- self . state_and_queue . swap ( self . set_state_on_drop_to , Ordering :: AcqRel ) ;
434
-
435
- assert_eq ! ( state_and_queue & STATE_MASK , RUNNING ) ;
436
-
437
- unsafe {
438
- let mut queue = ( state_and_queue & !STATE_MASK ) as * const Waiter ;
439
- while !queue. is_null ( ) {
440
- let next = ( * queue) . next ;
441
- let thread = ( * queue) . thread . replace ( None ) . unwrap ( ) ;
442
- ( * queue) . signaled . store ( true , Ordering :: Release ) ;
443
- queue = next;
444
- thread. unpark ( ) ;
445
- }
446
- }
447
- }
448
- }
449
-
450
- fn initialize_inner ( my_state_and_queue : & AtomicUsize , init : & mut dyn FnMut ( ) -> bool ) -> bool {
451
- let mut state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
452
-
453
- loop {
454
- match state_and_queue {
455
- COMPLETE => return true ,
456
- INCOMPLETE => {
457
- let old = my_state_and_queue. compare_and_swap (
458
- state_and_queue,
459
- RUNNING ,
460
- Ordering :: Acquire ,
461
- ) ;
462
- if old != state_and_queue {
463
- state_and_queue = old;
464
- continue ;
465
- }
466
- let mut waiter_queue = WaiterQueue {
467
- state_and_queue : my_state_and_queue,
468
- set_state_on_drop_to : INCOMPLETE ,
469
- } ;
470
- let success = init ( ) ;
471
-
472
- waiter_queue. set_state_on_drop_to = if success { COMPLETE } else { INCOMPLETE } ;
473
- return success;
474
- }
475
- _ => {
476
- assert ! ( state_and_queue & STATE_MASK == RUNNING ) ;
477
- wait ( & my_state_and_queue, state_and_queue) ;
478
- state_and_queue = my_state_and_queue. load ( Ordering :: Acquire ) ;
479
- }
480
- }
481
- }
482
- }
483
-
484
- fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) {
485
- loop {
486
- if current_state & STATE_MASK != RUNNING {
487
- return ;
488
- }
489
-
490
- let node = Waiter {
491
- thread : Cell :: new ( Some ( thread:: current ( ) ) ) ,
492
- signaled : AtomicBool :: new ( false ) ,
493
- next : ( current_state & !STATE_MASK ) as * const Waiter ,
494
- } ;
495
- let me = & node as * const Waiter as usize ;
496
-
497
- let old = state_and_queue. compare_and_swap ( current_state, me | RUNNING , Ordering :: Release ) ;
498
- if old != current_state {
499
- current_state = old;
500
- continue ;
501
- }
502
-
503
- while !node. signaled . load ( Ordering :: Acquire ) {
504
- thread:: park ( ) ;
505
- }
506
- break ;
507
- }
508
- }
509
-
510
399
/// A value which is initialized on the first access.
511
400
///
512
401
/// This type is a thread-safe `Lazy`, and can be used in statics.
@@ -763,6 +652,7 @@ mod tests {
763
652
764
653
let res = panic:: catch_unwind ( || cell. get_or_try_init ( || -> Result < _ , ( ) > { panic ! ( ) } ) ) ;
765
654
assert ! ( res. is_err( ) ) ;
655
+ assert ! ( !cell. is_initialized( ) ) ;
766
656
assert ! ( cell. get( ) . is_none( ) ) ;
767
657
768
658
assert_eq ! ( cell. get_or_try_init( || Err ( ( ) ) ) , Err ( ( ) ) ) ;
0 commit comments