Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 0fa4f43

Browse files
authored
bpo-39542: Exclude trashcan from the limited C API (GH-18362)
Exclude trashcan mechanism from the limited C API: it requires access to PyTypeObject and PyThreadState structure fields, whereas these structures are opaque in the limited C API. The trashcan mechanism never worked with the limited C API. Move it from object.h to cpython/object.h.
1 parent f16433a commit 0fa4f43

3 files changed

Lines changed: 89 additions & 87 deletions

File tree

Include/cpython/object.h

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,92 @@ PyAPI_FUNC(int) _PyObject_CheckConsistency(
445445
PyObject *op,
446446
int check_content);
447447

448+
449+
/* Trashcan mechanism, thanks to Christian Tismer.
450+
451+
When deallocating a container object, it's possible to trigger an unbounded
452+
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
453+
next" object in the chain to 0. This can easily lead to stack overflows,
454+
especially in threads (which typically have less stack space to work with).
455+
456+
A container object can avoid this by bracketing the body of its tp_dealloc
457+
function with a pair of macros:
458+
459+
static void
460+
mytype_dealloc(mytype *p)
461+
{
462+
... declarations go here ...
463+
464+
PyObject_GC_UnTrack(p); // must untrack first
465+
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
466+
... The body of the deallocator goes here, including all calls ...
467+
... to Py_DECREF on contained objects. ...
468+
Py_TRASHCAN_END // there should be no code after this
469+
}
470+
471+
CAUTION: Never return from the middle of the body! If the body needs to
472+
"get out early", put a label immediately before the Py_TRASHCAN_END
473+
call, and goto it. Else the call-depth counter (see below) will stay
474+
above 0 forever, and the trashcan will never get emptied.
475+
476+
How it works: The BEGIN macro increments a call-depth counter. So long
477+
as this counter is small, the body of the deallocator is run directly without
478+
further ado. But if the counter gets large, it instead adds p to a list of
479+
objects to be deallocated later, skips the body of the deallocator, and
480+
resumes execution after the END macro. The tp_dealloc routine then returns
481+
without deallocating anything (and so unbounded call-stack depth is avoided).
482+
483+
When the call stack finishes unwinding again, code generated by the END macro
484+
notices this, and calls another routine to deallocate all the objects that
485+
may have been added to the list of deferred deallocations. In effect, a
486+
chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
487+
with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
488+
489+
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
490+
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
491+
of the actual class being deallocated. Otherwise we might end up with a
492+
partially-deallocated object. To check this, the tp_dealloc function must be
493+
passed as second argument to Py_TRASHCAN_BEGIN().
494+
*/
495+
496+
/* The new thread-safe private API, invoked by the macros below. */
497+
PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
498+
PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
499+
500+
#define PyTrash_UNWIND_LEVEL 50
501+
502+
#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
503+
do { \
504+
PyThreadState *_tstate = NULL; \
505+
/* If "cond" is false, then _tstate remains NULL and the deallocator \
506+
* is run normally without involving the trashcan */ \
507+
if (cond) { \
508+
_tstate = PyThreadState_GET(); \
509+
if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \
510+
/* Store the object (to be deallocated later) and jump past \
511+
* Py_TRASHCAN_END, skipping the body of the deallocator */ \
512+
_PyTrash_thread_deposit_object(_PyObject_CAST(op)); \
513+
break; \
514+
} \
515+
++_tstate->trash_delete_nesting; \
516+
}
517+
/* The body of the deallocator is here. */
518+
#define Py_TRASHCAN_END \
519+
if (_tstate) { \
520+
--_tstate->trash_delete_nesting; \
521+
if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \
522+
_PyTrash_thread_destroy_chain(); \
523+
} \
524+
} while (0);
525+
526+
#define Py_TRASHCAN_BEGIN(op, dealloc) Py_TRASHCAN_BEGIN_CONDITION(op, \
527+
Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))
528+
529+
/* For backwards compatibility, these macros enable the trashcan
530+
* unconditionally */
531+
#define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
532+
#define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END
533+
448534
#ifdef __cplusplus
449535
}
450536
#endif

Include/object.h

Lines changed: 0 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -607,93 +607,6 @@ it carefully, it may save lots of calls to Py_INCREF() and Py_DECREF() at
607607
times.
608608
*/
609609

610-
611-
/* Trashcan mechanism, thanks to Christian Tismer.
612-
613-
When deallocating a container object, it's possible to trigger an unbounded
614-
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
615-
next" object in the chain to 0. This can easily lead to stack overflows,
616-
especially in threads (which typically have less stack space to work with).
617-
618-
A container object can avoid this by bracketing the body of its tp_dealloc
619-
function with a pair of macros:
620-
621-
static void
622-
mytype_dealloc(mytype *p)
623-
{
624-
... declarations go here ...
625-
626-
PyObject_GC_UnTrack(p); // must untrack first
627-
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
628-
... The body of the deallocator goes here, including all calls ...
629-
... to Py_DECREF on contained objects. ...
630-
Py_TRASHCAN_END // there should be no code after this
631-
}
632-
633-
CAUTION: Never return from the middle of the body! If the body needs to
634-
"get out early", put a label immediately before the Py_TRASHCAN_END
635-
call, and goto it. Else the call-depth counter (see below) will stay
636-
above 0 forever, and the trashcan will never get emptied.
637-
638-
How it works: The BEGIN macro increments a call-depth counter. So long
639-
as this counter is small, the body of the deallocator is run directly without
640-
further ado. But if the counter gets large, it instead adds p to a list of
641-
objects to be deallocated later, skips the body of the deallocator, and
642-
resumes execution after the END macro. The tp_dealloc routine then returns
643-
without deallocating anything (and so unbounded call-stack depth is avoided).
644-
645-
When the call stack finishes unwinding again, code generated by the END macro
646-
notices this, and calls another routine to deallocate all the objects that
647-
may have been added to the list of deferred deallocations. In effect, a
648-
chain of N deallocations is broken into (N-1)/(PyTrash_UNWIND_LEVEL-1) pieces,
649-
with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
650-
651-
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
652-
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
653-
of the actual class being deallocated. Otherwise we might end up with a
654-
partially-deallocated object. To check this, the tp_dealloc function must be
655-
passed as second argument to Py_TRASHCAN_BEGIN().
656-
*/
657-
658-
/* The new thread-safe private API, invoked by the macros below. */
659-
PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
660-
PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
661-
662-
#define PyTrash_UNWIND_LEVEL 50
663-
664-
#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
665-
do { \
666-
PyThreadState *_tstate = NULL; \
667-
/* If "cond" is false, then _tstate remains NULL and the deallocator \
668-
* is run normally without involving the trashcan */ \
669-
if (cond) { \
670-
_tstate = PyThreadState_GET(); \
671-
if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \
672-
/* Store the object (to be deallocated later) and jump past \
673-
* Py_TRASHCAN_END, skipping the body of the deallocator */ \
674-
_PyTrash_thread_deposit_object(_PyObject_CAST(op)); \
675-
break; \
676-
} \
677-
++_tstate->trash_delete_nesting; \
678-
}
679-
/* The body of the deallocator is here. */
680-
#define Py_TRASHCAN_END \
681-
if (_tstate) { \
682-
--_tstate->trash_delete_nesting; \
683-
if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \
684-
_PyTrash_thread_destroy_chain(); \
685-
} \
686-
} while (0);
687-
688-
#define Py_TRASHCAN_BEGIN(op, dealloc) Py_TRASHCAN_BEGIN_CONDITION(op, \
689-
Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))
690-
691-
/* For backwards compatibility, these macros enable the trashcan
692-
* unconditionally */
693-
#define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
694-
#define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END
695-
696-
697610
#ifndef Py_LIMITED_API
698611
# define Py_CPYTHON_OBJECT_H
699612
# include "cpython/object.h"
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
Exclude trashcan mechanism from the limited C API: it requires access to
2+
PyTypeObject and PyThreadState structure fields, whereas these structures
3+
are opaque in the limited C API.

0 commit comments

Comments
 (0)