@@ -2225,8 +2225,6 @@ _Py_MergeZeroRefcount(PyObject *op)
2225
2225
{
2226
2226
assert (_Py_atomic_load_uint32_relaxed (& op -> ob_ref_local ) == 0 );
2227
2227
2228
- _Py_atomic_store_uintptr_relaxed (& op -> ob_tid , 0 );
2229
-
2230
2228
Py_ssize_t refcount ;
2231
2229
for (;;) {
2232
2230
uint32_t shared = _Py_atomic_load_uint32_relaxed (& op -> ob_ref_shared );
@@ -2245,6 +2243,9 @@ _Py_MergeZeroRefcount(PyObject *op)
2245
2243
// count was temporarily negative and hasn't been proceessed yet.
2246
2244
// We don't want to merge it yet because that might result in the
2247
2245
// object being freed while it's still in the queue.
2246
+ // We still need to zero the thread-id so that subsequent decrements
2247
+ // from this thread do not push the ob_ref_local negative.
2248
+ _Py_atomic_store_uintptr_relaxed (& op -> ob_tid , 0 );
2248
2249
break ;
2249
2250
}
2250
2251
@@ -2257,6 +2258,7 @@ _Py_MergeZeroRefcount(PyObject *op)
2257
2258
* a) weak references
2258
2259
* b) dangling pointers (e.g. loading from a list or dict)
2259
2260
*/
2261
+ _Py_atomic_store_uintptr_relaxed (& op -> ob_tid , 0 );
2260
2262
op -> ob_ref_local = 0 ;
2261
2263
PyObject_Del (op );
2262
2264
return ;
@@ -2272,6 +2274,7 @@ _Py_MergeZeroRefcount(PyObject *op)
2272
2274
}
2273
2275
}
2274
2276
2277
+ _Py_atomic_store_uintptr_relaxed (& op -> ob_tid , 0 );
2275
2278
if (refcount == 0 ) {
2276
2279
_Py_Dealloc (op );
2277
2280
}
@@ -2355,12 +2358,15 @@ _Py_TryIncRefShared(PyObject *op)
2355
2358
void
2356
2359
_Py_DecRefShared (PyObject * op )
2357
2360
{
2358
- // TODO: fixme
2359
2361
uint32_t old_shared ;
2360
2362
uint32_t new_shared ;
2361
- int ok ;
2362
2363
2363
- do {
2364
+ // We need to grab the thread-id before modifying the refcount
2365
+ // because the owning thread may set it to zero if we mark the
2366
+ // object as queued.
2367
+ uintptr_t tid = _PyObject_ThreadId (op );
2368
+
2369
+ for (;;) {
2364
2370
old_shared = _Py_atomic_load_uint32_relaxed (& op -> ob_ref_shared );
2365
2371
2366
2372
new_shared = old_shared ;
@@ -2369,15 +2375,22 @@ _Py_DecRefShared(PyObject *op)
2369
2375
}
2370
2376
new_shared -= (1 << _Py_REF_SHARED_SHIFT );
2371
2377
2372
- ok = _Py_atomic_compare_exchange_uint32 (
2378
+ int ok = _Py_atomic_compare_exchange_uint32 (
2373
2379
& op -> ob_ref_shared ,
2374
2380
old_shared ,
2375
2381
new_shared );
2376
- } while (!ok );
2382
+
2383
+ if (ok ) {
2384
+ break ;
2385
+ }
2386
+ }
2377
2387
2378
2388
if (_Py_REF_IS_MERGED (new_shared )) {
2379
2389
// TOOD(sgross): implementation defined behavior
2380
2390
assert (((int32_t )new_shared ) >= 0 );
2391
+ if (((int32_t )new_shared ) < 0 ) {
2392
+ Py_FatalError ("negative refcount on merged object" );
2393
+ }
2381
2394
}
2382
2395
2383
2396
if (_Py_REF_IS_QUEUED (new_shared ) != _Py_REF_IS_QUEUED (old_shared )) {
@@ -2389,7 +2402,7 @@ _Py_DecRefShared(PyObject *op)
2389
2402
}
2390
2403
}
2391
2404
else {
2392
- _Py_queue_object (op );
2405
+ _Py_queue_object (op , tid );
2393
2406
}
2394
2407
}
2395
2408
else if (_Py_REF_IS_MERGED (new_shared ) &&
0 commit comments