@@ -734,13 +734,25 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
734
734
unreachable -> _gc_next &= _PyGC_PREV_MASK ;
735
735
}
736
736
737
+ /* In theory, all tuples should be younger than the
738
+ * objects they refer to, as tuples are immortal.
739
+ * Therefore, untracking tuples in oldest-first order in the
740
+ * young generation before promoting them should have tracked
741
+ * all the tuples that can be untracked.
742
+ *
743
+ * Unfortunately, the C API allows tuples to be created
744
+ * and then filled in. So this won't untrack all tuples
745
+ * that can be untracked. It should untrack most of them
746
+ * and is much faster than a more complex approach that
747
+ * would untrack all relevant tuples.
748
+ */
737
749
static void
738
750
untrack_tuples (PyGC_Head * head )
739
751
{
740
- PyGC_Head * next , * gc = GC_NEXT (head );
752
+ PyGC_Head * gc = GC_NEXT (head );
741
753
while (gc != head ) {
742
754
PyObject * op = FROM_GC (gc );
743
- next = GC_NEXT (gc );
755
+ PyGC_Head * next = GC_NEXT (gc );
744
756
if (PyTuple_CheckExact (op )) {
745
757
_PyTuple_MaybeUntrack (op );
746
758
}
@@ -1553,7 +1565,7 @@ assess_work_to_do(GCState *gcstate)
1553
1565
scale_factor = 2 ;
1554
1566
}
1555
1567
intptr_t new_objects = gcstate -> young .count ;
1556
- intptr_t max_heap_fraction = new_objects * 3 /2 ;
1568
+ intptr_t max_heap_fraction = new_objects * 3 /2 ;
1557
1569
intptr_t heap_fraction = gcstate -> heap_size / SCAN_RATE_DIVISOR / scale_factor ;
1558
1570
if (heap_fraction > max_heap_fraction ) {
1559
1571
heap_fraction = max_heap_fraction ;
@@ -1569,12 +1581,13 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
1569
1581
GCState * gcstate = & tstate -> interp -> gc ;
1570
1582
gcstate -> work_to_do += assess_work_to_do (gcstate );
1571
1583
untrack_tuples (& gcstate -> young .head );
1572
- // if (gcstate->phase == GC_PHASE_MARK) {
1573
- // Py_ssize_t objects_marked = mark_at_start(tstate);
1574
- // GC_STAT_ADD(1, objects_transitively_reachable, objects_marked);
1575
- // gcstate->work_to_do -= objects_marked;
1576
- // return;
1577
- // }
1584
+ if (gcstate -> phase == GC_PHASE_MARK ) {
1585
+ Py_ssize_t objects_marked = mark_at_start (tstate );
1586
+ GC_STAT_ADD (1 , objects_transitively_reachable , objects_marked );
1587
+ gcstate -> work_to_do -= objects_marked ;
1588
+ validate_spaces (gcstate );
1589
+ return ;
1590
+ }
1578
1591
PyGC_Head * not_visited = & gcstate -> old [gcstate -> visited_space ^1 ].head ;
1579
1592
PyGC_Head * visited = & gcstate -> old [gcstate -> visited_space ].head ;
1580
1593
PyGC_Head increment ;
@@ -1583,7 +1596,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
1583
1596
if (scale_factor < 2 ) {
1584
1597
scale_factor = 2 ;
1585
1598
}
1586
- intptr_t objects_marked = 0 ; // mark_stacks(tstate->interp, visited, gcstate->visited_space, false);
1599
+ intptr_t objects_marked = mark_stacks (tstate -> interp , visited , gcstate -> visited_space , false);
1587
1600
GC_STAT_ADD (1 , objects_transitively_reachable , objects_marked );
1588
1601
gcstate -> work_to_do -= objects_marked ;
1589
1602
gc_list_set_space (& gcstate -> young .head , gcstate -> visited_space );
@@ -1645,7 +1658,6 @@ gc_collect_full(PyThreadState *tstate,
1645
1658
gcstate -> old [0 ].count = 0 ;
1646
1659
gcstate -> old [1 ].count = 0 ;
1647
1660
completed_cycle (gcstate );
1648
- gcstate -> work_to_do = - gcstate -> young .threshold * 2 ;
1649
1661
_PyGC_ClearAllFreeLists (tstate -> interp );
1650
1662
validate_spaces (gcstate );
1651
1663
add_stats (gcstate , 2 , stats );
0 commit comments