@@ -261,13 +261,13 @@ typedef struct _Py_atomic_int {
261
261
#define _Py_atomic_store_64bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
262
262
switch (ORDER) { \
263
263
case _Py_memory_order_acquire: \
264
- _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
264
+ _InterlockedExchange64_HLEAcquire((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
265
265
break; \
266
266
case _Py_memory_order_release: \
267
- _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
267
+ _InterlockedExchange64_HLERelease((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
268
268
break; \
269
269
default: \
270
- _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
270
+ _InterlockedExchange64((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)( NEW_VAL) ); \
271
271
break; \
272
272
}
273
273
#else
@@ -277,13 +277,13 @@ typedef struct _Py_atomic_int {
277
277
#define _Py_atomic_store_32bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
278
278
switch (ORDER) { \
279
279
case _Py_memory_order_acquire: \
280
- _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
280
+ _InterlockedExchange_HLEAcquire((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
281
281
break; \
282
282
case _Py_memory_order_release: \
283
- _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
283
+ _InterlockedExchange_HLERelease((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
284
284
break; \
285
285
default: \
286
- _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
286
+ _InterlockedExchange((volatile long*)&(( ATOMIC_VAL)->_value) , (int)( NEW_VAL) ); \
287
287
break; \
288
288
}
289
289
@@ -292,7 +292,7 @@ typedef struct _Py_atomic_int {
292
292
gil_created() uses -1 as a sentinel value, if this returns
293
293
a uintptr_t it will do an unsigned compare and crash
294
294
*/
295
- inline intptr_t _Py_atomic_load_64bit (volatile uintptr_t * value , int order ) {
295
+ inline intptr_t _Py_atomic_load_64bit_impl (volatile uintptr_t * value , int order ) {
296
296
__int64 old ;
297
297
switch (order ) {
298
298
case _Py_memory_order_acquire :
@@ -323,11 +323,14 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
323
323
return old ;
324
324
}
325
325
326
+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) \
327
+ _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
328
+
326
329
#else
327
- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
330
+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) (( ATOMIC_VAL)->_value )
328
331
#endif
329
332
330
- inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
333
+ inline int _Py_atomic_load_32bit_impl (volatile int * value , int order ) {
331
334
long old ;
332
335
switch (order ) {
333
336
case _Py_memory_order_acquire :
@@ -358,16 +361,19 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
358
361
return old ;
359
362
}
360
363
364
+ #define _Py_atomic_load_32bit (ATOMIC_VAL , ORDER ) \
365
+ _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
366
+
361
367
#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
362
368
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
363
- _Py_atomic_store_64bit((volatile long long*)&(( ATOMIC_VAL)->_value ), NEW_VAL, ORDER) } else { \
364
- _Py_atomic_store_32bit((volatile long*)&(( ATOMIC_VAL)->_value ), NEW_VAL, ORDER) }
369
+ _Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \
370
+ _Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) }
365
371
366
372
#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
367
373
( \
368
374
sizeof((ATOMIC_VAL)->_value) == 8 ? \
369
- _Py_atomic_load_64bit((volatile long long*)&(( ATOMIC_VAL)->_value ), ORDER) : \
370
- _Py_atomic_load_32bit((volatile long*)&(( ATOMIC_VAL)->_value ), ORDER) \
375
+ _Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \
376
+ _Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \
371
377
)
372
378
#elif defined(_M_ARM ) || defined(_M_ARM64 )
373
379
typedef enum _Py_memory_order {
@@ -422,7 +428,7 @@ typedef struct _Py_atomic_int {
422
428
gil_created() uses -1 as a sentinel value, if this returns
423
429
a uintptr_t it will do an unsigned compare and crash
424
430
*/
425
- inline intptr_t _Py_atomic_load_64bit (volatile uintptr_t * value , int order ) {
431
+ inline intptr_t _Py_atomic_load_64bit_impl (volatile uintptr_t * value , int order ) {
426
432
uintptr_t old ;
427
433
switch (order ) {
428
434
case _Py_memory_order_acquire :
@@ -453,11 +459,14 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
453
459
return old ;
454
460
}
455
461
462
+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) \
463
+ _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
464
+
456
465
#else
457
- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
466
+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) (( ATOMIC_VAL)->_value )
458
467
#endif
459
468
460
- inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
469
+ inline int _Py_atomic_load_32bit_impl (volatile int * value , int order ) {
461
470
int old ;
462
471
switch (order ) {
463
472
case _Py_memory_order_acquire :
@@ -488,16 +497,19 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
488
497
return old ;
489
498
}
490
499
500
+ #define _Py_atomic_load_32bit (ATOMIC_VAL , ORDER ) \
501
+ _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
502
+
491
503
#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
492
504
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
493
- _Py_atomic_store_64bit(&(( ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
494
- _Py_atomic_store_32bit(&(( ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
505
+ _Py_atomic_store_64bit(( ATOMIC_VAL), ( NEW_VAL), ( ORDER) ) } else { \
506
+ _Py_atomic_store_32bit(( ATOMIC_VAL), ( NEW_VAL), ( ORDER) ) }
495
507
496
508
#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
497
509
( \
498
510
sizeof((ATOMIC_VAL)->_value) == 8 ? \
499
- _Py_atomic_load_64bit(&(( ATOMIC_VAL)->_value), ORDER) : \
500
- _Py_atomic_load_32bit(&(( ATOMIC_VAL)->_value), ORDER) \
511
+ _Py_atomic_load_64bit(( ATOMIC_VAL), ( ORDER) ) : \
512
+ _Py_atomic_load_32bit(( ATOMIC_VAL), ( ORDER) ) \
501
513
)
502
514
#endif
503
515
#else /* !gcc x86 !_msc_ver */
@@ -529,16 +541,16 @@ typedef struct _Py_atomic_int {
529
541
530
542
/* Standardized shortcuts. */
531
543
#define _Py_atomic_store (ATOMIC_VAL , NEW_VAL ) \
532
- _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
544
+ _Py_atomic_store_explicit(( ATOMIC_VAL), ( NEW_VAL) , _Py_memory_order_seq_cst)
533
545
#define _Py_atomic_load (ATOMIC_VAL ) \
534
- _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
546
+ _Py_atomic_load_explicit(( ATOMIC_VAL) , _Py_memory_order_seq_cst)
535
547
536
548
/* Python-local extensions */
537
549
538
550
#define _Py_atomic_store_relaxed (ATOMIC_VAL , NEW_VAL ) \
539
- _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
551
+ _Py_atomic_store_explicit(( ATOMIC_VAL), ( NEW_VAL) , _Py_memory_order_relaxed)
540
552
#define _Py_atomic_load_relaxed (ATOMIC_VAL ) \
541
- _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
553
+ _Py_atomic_load_explicit(( ATOMIC_VAL) , _Py_memory_order_relaxed)
542
554
543
555
#ifdef __cplusplus
544
556
}
0 commit comments