@@ -394,16 +394,17 @@ update_refs(PyGC_Head *containers)
394
394
395
395
while (gc != containers ) {
396
396
next = GC_NEXT (gc );
397
+ PyObject * op = FROM_GC (gc );
397
398
/* Move any object that might have become immortal to the
398
399
* permanent generation as the reference count is not accurately
399
400
* reflecting the actual number of live references to this object
400
401
*/
401
- if (_Py_IsImmortal (FROM_GC ( gc ) )) {
402
+ if (_Py_IsImmortal (op )) {
402
403
gc_list_move (gc , & get_gc_state ()-> permanent_generation .head );
403
404
gc = next ;
404
405
continue ;
405
406
}
406
- gc_reset_refs (gc , Py_REFCNT (FROM_GC ( gc ) ));
407
+ gc_reset_refs (gc , Py_REFCNT (op ));
407
408
/* Python's cyclic gc should never see an incoming refcount
408
409
* of 0: if something decref'ed to 0, it should have been
409
410
* deallocated immediately at that time.
@@ -422,7 +423,7 @@ update_refs(PyGC_Head *containers)
422
423
* so serious that maybe this should be a release-build
423
424
* check instead of an assert?
424
425
*/
425
- _PyObject_ASSERT (FROM_GC ( gc ) , gc_get_refs (gc ) != 0 );
426
+ _PyObject_ASSERT (op , gc_get_refs (gc ) != 0 );
426
427
gc = next ;
427
428
}
428
429
}
@@ -488,7 +489,7 @@ visit_reachable(PyObject *op, void *arg)
488
489
}
489
490
// It would be a logic error elsewhere if the collecting flag were set on
490
491
// an untracked object.
491
- assert ( gc -> _gc_next != 0 );
492
+ _PyObject_ASSERT ( op , gc -> _gc_next != 0 );
492
493
493
494
if (gc -> _gc_next & NEXT_MASK_UNREACHABLE ) {
494
495
/* This had gc_refs = 0 when move_unreachable got
@@ -660,7 +661,9 @@ static void
660
661
move_legacy_finalizers (PyGC_Head * unreachable , PyGC_Head * finalizers )
661
662
{
662
663
PyGC_Head * gc , * next ;
663
- assert ((unreachable -> _gc_next & NEXT_MASK_UNREACHABLE ) == 0 );
664
+ _PyObject_ASSERT (
665
+ FROM_GC (unreachable ),
666
+ (unreachable -> _gc_next & NEXT_MASK_UNREACHABLE ) == 0 );
664
667
665
668
/* March over unreachable. Move objects with finalizers into
666
669
* `finalizers`.
@@ -683,10 +686,14 @@ static inline void
683
686
clear_unreachable_mask (PyGC_Head * unreachable )
684
687
{
685
688
/* Check that the list head does not have the unreachable bit set */
686
- assert (((uintptr_t )unreachable & NEXT_MASK_UNREACHABLE ) == 0 );
689
+ _PyObject_ASSERT (
690
+ FROM_GC (unreachable ),
691
+ ((uintptr_t )unreachable & NEXT_MASK_UNREACHABLE ) == 0 );
692
+ _PyObject_ASSERT (
693
+ FROM_GC (unreachable ),
694
+ (unreachable -> _gc_next & NEXT_MASK_UNREACHABLE ) == 0 );
687
695
688
696
PyGC_Head * gc , * next ;
689
- assert ((unreachable -> _gc_next & NEXT_MASK_UNREACHABLE ) == 0 );
690
697
for (gc = GC_NEXT (unreachable ); gc != unreachable ; gc = next ) {
691
698
_PyObject_ASSERT ((PyObject * )FROM_GC (gc ), gc -> _gc_next & NEXT_MASK_UNREACHABLE );
692
699
gc -> _gc_next &= ~NEXT_MASK_UNREACHABLE ;
@@ -840,7 +847,7 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
840
847
*/
841
848
if (gc_is_collecting (AS_GC ((PyObject * )wr ))) {
842
849
/* it should already have been cleared above */
843
- assert ( wr -> wr_object == Py_None );
850
+ _PyObject_ASSERT (( PyObject * ) wr , wr -> wr_object == Py_None );
844
851
continue ;
845
852
}
846
853
@@ -851,9 +858,8 @@ handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
851
858
852
859
/* Move wr to wrcb_to_call, for the next pass. */
853
860
wrasgc = AS_GC ((PyObject * )wr );
854
- assert (wrasgc != next ); /* wrasgc is reachable, but
855
- next isn't, so they can't
856
- be the same */
861
+ // wrasgc is reachable, but next isn't, so they can't be the same
862
+ _PyObject_ASSERT (wr , wrasgc != next );
857
863
gc_list_move (wrasgc , & wrcb_to_call );
858
864
}
859
865
}
@@ -1773,8 +1779,9 @@ _Py_ScheduleGC(PyInterpreterState *interp)
1773
1779
void
1774
1780
_PyObject_GC_Link (PyObject * op )
1775
1781
{
1776
- PyGC_Head * g = AS_GC (op );
1777
- assert (((uintptr_t )g & (sizeof (uintptr_t )-1 )) == 0 ); // g must be correctly aligned
1782
+ PyGC_Head * gc = AS_GC (op );
1783
+ // gc must be correctly aligned
1784
+ _PyObject_ASSERT (op , ((uintptr_t )gc & (sizeof (uintptr_t )-1 )) == 0 );
1778
1785
1779
1786
PyThreadState * tstate = _PyThreadState_GET ();
1780
1787
GCState * gcstate = & tstate -> interp -> gc ;
0 commit comments