»Core Development>Code coverage>Modules/gcmodule.c

Python code coverage for Modules/gcmodule.c

#countcontent
1n/a/*
2n/a
3n/a Reference Cycle Garbage Collection
4n/a ==================================
5n/a
6n/a Neil Schemenauer <nas@arctrix.com>
7n/a
8n/a Based on a post on the python-dev list. Ideas from Guido van Rossum,
9n/a Eric Tiedemann, and various others.
10n/a
11n/a http://www.arctrix.com/nas/python/gc/
12n/a
13n/a The following mailing list threads provide a historical perspective on
14n/a the design of this module. Note that a fair amount of refinement has
15n/a occurred since those discussions.
16n/a
17n/a http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18n/a http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19n/a http://mail.python.org/pipermail/python-dev/2000-March/002497.html
20n/a
21n/a For a highlevel view of the collection process, read the collect
22n/a function.
23n/a
24n/a*/
25n/a
26n/a#include "Python.h"
27n/a#include "frameobject.h" /* for PyFrame_ClearFreeList */
28n/a#include "pydtrace.h"
29n/a#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
30n/a
31n/a/*[clinic input]
32n/amodule gc
33n/a[clinic start generated code]*/
34n/a/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
35n/a
36n/a/* Get an object's GC head */
37n/a#define AS_GC(o) ((PyGC_Head *)(o)-1)
38n/a
39n/a/* Get the object given the GC head */
40n/a#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
41n/a
42n/a/*** Global GC state ***/
43n/a
44n/astruct gc_generation {
45n/a PyGC_Head head;
46n/a int threshold; /* collection threshold */
47n/a int count; /* count of allocations or collections of younger
48n/a generations */
49n/a};
50n/a
51n/a/* If we change this, we need to change the default value in the signature of
52n/a gc.collect. */
53n/a#define NUM_GENERATIONS 3
54n/a#define GEN_HEAD(n) (&generations[n].head)
55n/a
56n/a/* linked lists of container objects */
57n/astatic struct gc_generation generations[NUM_GENERATIONS] = {
58n/a /* PyGC_Head, threshold, count */
59n/a {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
60n/a {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
61n/a {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
62n/a};
63n/a
64n/aPyGC_Head *_PyGC_generation0 = GEN_HEAD(0);
65n/a
66n/astatic int enabled = 1; /* automatic collection enabled? */
67n/a
68n/a/* true if we are currently running the collector */
69n/astatic int collecting = 0;
70n/a
71n/a/* list of uncollectable objects */
72n/astatic PyObject *garbage = NULL;
73n/a
74n/a/* Python string to use if unhandled exception occurs */
75n/astatic PyObject *gc_str = NULL;
76n/a
77n/a/* a list of callbacks to be invoked when collection is performed */
78n/astatic PyObject *callbacks = NULL;
79n/a
80n/a/* This is the number of objects that survived the last full collection. It
81n/a approximates the number of long lived objects tracked by the GC.
82n/a
83n/a (by "full collection", we mean a collection of the oldest generation).
84n/a*/
85n/astatic Py_ssize_t long_lived_total = 0;
86n/a
87n/a/* This is the number of objects that survived all "non-full" collections,
88n/a and are awaiting to undergo a full collection for the first time.
89n/a
90n/a*/
91n/astatic Py_ssize_t long_lived_pending = 0;
92n/a
93n/a/*
94n/a NOTE: about the counting of long-lived objects.
95n/a
96n/a To limit the cost of garbage collection, there are two strategies;
97n/a - make each collection faster, e.g. by scanning fewer objects
98n/a - do less collections
99n/a This heuristic is about the latter strategy.
100n/a
101n/a In addition to the various configurable thresholds, we only trigger a
102n/a full collection if the ratio
103n/a long_lived_pending / long_lived_total
104n/a is above a given value (hardwired to 25%).
105n/a
106n/a The reason is that, while "non-full" collections (i.e., collections of
107n/a the young and middle generations) will always examine roughly the same
108n/a number of objects -- determined by the aforementioned thresholds --,
109n/a the cost of a full collection is proportional to the total number of
110n/a long-lived objects, which is virtually unbounded.
111n/a
112n/a Indeed, it has been remarked that doing a full collection every
113n/a <constant number> of object creations entails a dramatic performance
114n/a degradation in workloads which consist in creating and storing lots of
115n/a long-lived objects (e.g. building a large list of GC-tracked objects would
116n/a show quadratic performance, instead of linear as expected: see issue #4074).
117n/a
118n/a Using the above ratio, instead, yields amortized linear performance in
119n/a the total number of objects (the effect of which can be summarized
120n/a thusly: "each full garbage collection is more and more costly as the
121n/a number of objects grows, but we do fewer and fewer of them").
122n/a
123n/a This heuristic was suggested by Martin von Löwis on python-dev in
124n/a June 2008. His original analysis and proposal can be found at:
125n/a http://mail.python.org/pipermail/python-dev/2008-June/080579.html
126n/a*/
127n/a
128n/a/*
129n/a NOTE: about untracking of mutable objects.
130n/a
131n/a Certain types of container cannot participate in a reference cycle, and
132n/a so do not need to be tracked by the garbage collector. Untracking these
133n/a objects reduces the cost of garbage collections. However, determining
134n/a which objects may be untracked is not free, and the costs must be
135n/a weighed against the benefits for garbage collection.
136n/a
137n/a There are two possible strategies for when to untrack a container:
138n/a
139n/a i) When the container is created.
140n/a ii) When the container is examined by the garbage collector.
141n/a
142n/a Tuples containing only immutable objects (integers, strings etc, and
143n/a recursively, tuples of immutable objects) do not need to be tracked.
144n/a The interpreter creates a large number of tuples, many of which will
145n/a not survive until garbage collection. It is therefore not worthwhile
146n/a to untrack eligible tuples at creation time.
147n/a
148n/a Instead, all tuples except the empty tuple are tracked when created.
149n/a During garbage collection it is determined whether any surviving tuples
150n/a can be untracked. A tuple can be untracked if all of its contents are
151n/a already not tracked. Tuples are examined for untracking in all garbage
152n/a collection cycles. It may take more than one cycle to untrack a tuple.
153n/a
154n/a Dictionaries containing only immutable objects also do not need to be
155n/a tracked. Dictionaries are untracked when created. If a tracked item is
156n/a inserted into a dictionary (either as a key or value), the dictionary
157n/a becomes tracked. During a full garbage collection (all generations),
158n/a the collector will untrack any dictionaries whose contents are not
159n/a tracked.
160n/a
161n/a The module provides the python function is_tracked(obj), which returns
162n/a the CURRENT tracking status of the object. Subsequent garbage
163n/a collections may change the tracking status of the object.
164n/a
165n/a Untracking of certain containers was introduced in issue #4688, and
166n/a the algorithm was refined in response to issue #14775.
167n/a*/
168n/a
169n/a/* set for debugging information */
170n/a#define DEBUG_STATS (1<<0) /* print collection statistics */
171n/a#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
172n/a#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
173n/a#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
174n/a#define DEBUG_LEAK DEBUG_COLLECTABLE | \
175n/a DEBUG_UNCOLLECTABLE | \
176n/a DEBUG_SAVEALL
177n/astatic int debug;
178n/a
179n/a/* Running stats per generation */
180n/astruct gc_generation_stats {
181n/a /* total number of collections */
182n/a Py_ssize_t collections;
183n/a /* total number of collected objects */
184n/a Py_ssize_t collected;
185n/a /* total number of uncollectable objects (put into gc.garbage) */
186n/a Py_ssize_t uncollectable;
187n/a};
188n/a
189n/astatic struct gc_generation_stats generation_stats[NUM_GENERATIONS];
190n/a
191n/a/*--------------------------------------------------------------------------
192n/agc_refs values.
193n/a
194n/aBetween collections, every gc'ed object has one of two gc_refs values:
195n/a
196n/aGC_UNTRACKED
197n/a The initial state; objects returned by PyObject_GC_Malloc are in this
198n/a state. The object doesn't live in any generation list, and its
199n/a tp_traverse slot must not be called.
200n/a
201n/aGC_REACHABLE
202n/a The object lives in some generation list, and its tp_traverse is safe to
203n/a call. An object transitions to GC_REACHABLE when PyObject_GC_Track
204n/a is called.
205n/a
206n/aDuring a collection, gc_refs can temporarily take on other states:
207n/a
208n/a>= 0
209n/a At the start of a collection, update_refs() copies the true refcount
210n/a to gc_refs, for each object in the generation being collected.
211n/a subtract_refs() then adjusts gc_refs so that it equals the number of
212n/a times an object is referenced directly from outside the generation
213n/a being collected.
214n/a gc_refs remains >= 0 throughout these steps.
215n/a
216n/aGC_TENTATIVELY_UNREACHABLE
217n/a move_unreachable() then moves objects not reachable (whether directly or
218n/a indirectly) from outside the generation into an "unreachable" set.
219n/a Objects that are found to be reachable have gc_refs set to GC_REACHABLE
220n/a again. Objects that are found to be unreachable have gc_refs set to
221n/a GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
222n/a this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
223n/a transition back to GC_REACHABLE.
224n/a
225n/a Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
226n/a for collection. If it's decided not to collect such an object (e.g.,
227n/a it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
228n/a----------------------------------------------------------------------------
229n/a*/
230n/a#define GC_UNTRACKED _PyGC_REFS_UNTRACKED
231n/a#define GC_REACHABLE _PyGC_REFS_REACHABLE
232n/a#define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
233n/a
234n/a#define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
235n/a#define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
236n/a#define IS_TENTATIVELY_UNREACHABLE(o) ( \
237n/a _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
238n/a
239n/a/*** list functions ***/
240n/a
241n/astatic void
242n/agc_list_init(PyGC_Head *list)
243n/a{
244n/a list->gc.gc_prev = list;
245n/a list->gc.gc_next = list;
246n/a}
247n/a
248n/astatic int
249n/agc_list_is_empty(PyGC_Head *list)
250n/a{
251n/a return (list->gc.gc_next == list);
252n/a}
253n/a
254n/a#if 0
255n/a/* This became unused after gc_list_move() was introduced. */
256n/a/* Append `node` to `list`. */
257n/astatic void
258n/agc_list_append(PyGC_Head *node, PyGC_Head *list)
259n/a{
260n/a node->gc.gc_next = list;
261n/a node->gc.gc_prev = list->gc.gc_prev;
262n/a node->gc.gc_prev->gc.gc_next = node;
263n/a list->gc.gc_prev = node;
264n/a}
265n/a#endif
266n/a
267n/a/* Remove `node` from the gc list it's currently in. */
268n/astatic void
269n/agc_list_remove(PyGC_Head *node)
270n/a{
271n/a node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
272n/a node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
273n/a node->gc.gc_next = NULL; /* object is not currently tracked */
274n/a}
275n/a
276n/a/* Move `node` from the gc list it's currently in (which is not explicitly
277n/a * named here) to the end of `list`. This is semantically the same as
278n/a * gc_list_remove(node) followed by gc_list_append(node, list).
279n/a */
280n/astatic void
281n/agc_list_move(PyGC_Head *node, PyGC_Head *list)
282n/a{
283n/a PyGC_Head *new_prev;
284n/a PyGC_Head *current_prev = node->gc.gc_prev;
285n/a PyGC_Head *current_next = node->gc.gc_next;
286n/a /* Unlink from current list. */
287n/a current_prev->gc.gc_next = current_next;
288n/a current_next->gc.gc_prev = current_prev;
289n/a /* Relink at end of new list. */
290n/a new_prev = node->gc.gc_prev = list->gc.gc_prev;
291n/a new_prev->gc.gc_next = list->gc.gc_prev = node;
292n/a node->gc.gc_next = list;
293n/a}
294n/a
295n/a/* append list `from` onto list `to`; `from` becomes an empty list */
296n/astatic void
297n/agc_list_merge(PyGC_Head *from, PyGC_Head *to)
298n/a{
299n/a PyGC_Head *tail;
300n/a assert(from != to);
301n/a if (!gc_list_is_empty(from)) {
302n/a tail = to->gc.gc_prev;
303n/a tail->gc.gc_next = from->gc.gc_next;
304n/a tail->gc.gc_next->gc.gc_prev = tail;
305n/a to->gc.gc_prev = from->gc.gc_prev;
306n/a to->gc.gc_prev->gc.gc_next = to;
307n/a }
308n/a gc_list_init(from);
309n/a}
310n/a
311n/astatic Py_ssize_t
312n/agc_list_size(PyGC_Head *list)
313n/a{
314n/a PyGC_Head *gc;
315n/a Py_ssize_t n = 0;
316n/a for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
317n/a n++;
318n/a }
319n/a return n;
320n/a}
321n/a
322n/a/* Append objects in a GC list to a Python list.
323n/a * Return 0 if all OK, < 0 if error (out of memory for list).
324n/a */
325n/astatic int
326n/aappend_objects(PyObject *py_list, PyGC_Head *gc_list)
327n/a{
328n/a PyGC_Head *gc;
329n/a for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
330n/a PyObject *op = FROM_GC(gc);
331n/a if (op != py_list) {
332n/a if (PyList_Append(py_list, op)) {
333n/a return -1; /* exception */
334n/a }
335n/a }
336n/a }
337n/a return 0;
338n/a}
339n/a
340n/a/*** end of list stuff ***/
341n/a
342n/a
343n/a/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
344n/a * in containers, and is GC_REACHABLE for all tracked gc objects not in
345n/a * containers.
346n/a */
347n/astatic void
348n/aupdate_refs(PyGC_Head *containers)
349n/a{
350n/a PyGC_Head *gc = containers->gc.gc_next;
351n/a for (; gc != containers; gc = gc->gc.gc_next) {
352n/a assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
353n/a _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
354n/a /* Python's cyclic gc should never see an incoming refcount
355n/a * of 0: if something decref'ed to 0, it should have been
356n/a * deallocated immediately at that time.
357n/a * Possible cause (if the assert triggers): a tp_dealloc
358n/a * routine left a gc-aware object tracked during its teardown
359n/a * phase, and did something-- or allowed something to happen --
360n/a * that called back into Python. gc can trigger then, and may
361n/a * see the still-tracked dying object. Before this assert
362n/a * was added, such mistakes went on to allow gc to try to
363n/a * delete the object again. In a debug build, that caused
364n/a * a mysterious segfault, when _Py_ForgetReference tried
365n/a * to remove the object from the doubly-linked list of all
366n/a * objects a second time. In a release build, an actual
367n/a * double deallocation occurred, which leads to corruption
368n/a * of the allocator's internal bookkeeping pointers. That's
369n/a * so serious that maybe this should be a release-build
370n/a * check instead of an assert?
371n/a */
372n/a assert(_PyGCHead_REFS(gc) != 0);
373n/a }
374n/a}
375n/a
376n/a/* A traversal callback for subtract_refs. */
377n/astatic int
378n/avisit_decref(PyObject *op, void *data)
379n/a{
380n/a assert(op != NULL);
381n/a if (PyObject_IS_GC(op)) {
382n/a PyGC_Head *gc = AS_GC(op);
383n/a /* We're only interested in gc_refs for objects in the
384n/a * generation being collected, which can be recognized
385n/a * because only they have positive gc_refs.
386n/a */
387n/a assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
388n/a if (_PyGCHead_REFS(gc) > 0)
389n/a _PyGCHead_DECREF(gc);
390n/a }
391n/a return 0;
392n/a}
393n/a
394n/a/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
395n/a * for all objects in containers, and is GC_REACHABLE for all tracked gc
396n/a * objects not in containers. The ones with gc_refs > 0 are directly
397n/a * reachable from outside containers, and so can't be collected.
398n/a */
399n/astatic void
400n/asubtract_refs(PyGC_Head *containers)
401n/a{
402n/a traverseproc traverse;
403n/a PyGC_Head *gc = containers->gc.gc_next;
404n/a for (; gc != containers; gc=gc->gc.gc_next) {
405n/a traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
406n/a (void) traverse(FROM_GC(gc),
407n/a (visitproc)visit_decref,
408n/a NULL);
409n/a }
410n/a}
411n/a
412n/a/* A traversal callback for move_unreachable. */
413n/astatic int
414n/avisit_reachable(PyObject *op, PyGC_Head *reachable)
415n/a{
416n/a if (PyObject_IS_GC(op)) {
417n/a PyGC_Head *gc = AS_GC(op);
418n/a const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
419n/a
420n/a if (gc_refs == 0) {
421n/a /* This is in move_unreachable's 'young' list, but
422n/a * the traversal hasn't yet gotten to it. All
423n/a * we need to do is tell move_unreachable that it's
424n/a * reachable.
425n/a */
426n/a _PyGCHead_SET_REFS(gc, 1);
427n/a }
428n/a else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
429n/a /* This had gc_refs = 0 when move_unreachable got
430n/a * to it, but turns out it's reachable after all.
431n/a * Move it back to move_unreachable's 'young' list,
432n/a * and move_unreachable will eventually get to it
433n/a * again.
434n/a */
435n/a gc_list_move(gc, reachable);
436n/a _PyGCHead_SET_REFS(gc, 1);
437n/a }
438n/a /* Else there's nothing to do.
439n/a * If gc_refs > 0, it must be in move_unreachable's 'young'
440n/a * list, and move_unreachable will eventually get to it.
441n/a * If gc_refs == GC_REACHABLE, it's either in some other
442n/a * generation so we don't care about it, or move_unreachable
443n/a * already dealt with it.
444n/a * If gc_refs == GC_UNTRACKED, it must be ignored.
445n/a */
446n/a else {
447n/a assert(gc_refs > 0
448n/a || gc_refs == GC_REACHABLE
449n/a || gc_refs == GC_UNTRACKED);
450n/a }
451n/a }
452n/a return 0;
453n/a}
454n/a
455n/a/* Move the unreachable objects from young to unreachable. After this,
456n/a * all objects in young have gc_refs = GC_REACHABLE, and all objects in
457n/a * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
458n/a * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
459n/a * All objects in young after this are directly or indirectly reachable
460n/a * from outside the original young; and all objects in unreachable are
461n/a * not.
462n/a */
463n/astatic void
464n/amove_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
465n/a{
466n/a PyGC_Head *gc = young->gc.gc_next;
467n/a
468n/a /* Invariants: all objects "to the left" of us in young have gc_refs
469n/a * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
470n/a * from outside the young list as it was at entry. All other objects
471n/a * from the original young "to the left" of us are in unreachable now,
472n/a * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
473n/a * left of us in 'young' now have been scanned, and no objects here
474n/a * or to the right have been scanned yet.
475n/a */
476n/a
477n/a while (gc != young) {
478n/a PyGC_Head *next;
479n/a
480n/a if (_PyGCHead_REFS(gc)) {
481n/a /* gc is definitely reachable from outside the
482n/a * original 'young'. Mark it as such, and traverse
483n/a * its pointers to find any other objects that may
484n/a * be directly reachable from it. Note that the
485n/a * call to tp_traverse may append objects to young,
486n/a * so we have to wait until it returns to determine
487n/a * the next object to visit.
488n/a */
489n/a PyObject *op = FROM_GC(gc);
490n/a traverseproc traverse = Py_TYPE(op)->tp_traverse;
491n/a assert(_PyGCHead_REFS(gc) > 0);
492n/a _PyGCHead_SET_REFS(gc, GC_REACHABLE);
493n/a (void) traverse(op,
494n/a (visitproc)visit_reachable,
495n/a (void *)young);
496n/a next = gc->gc.gc_next;
497n/a if (PyTuple_CheckExact(op)) {
498n/a _PyTuple_MaybeUntrack(op);
499n/a }
500n/a }
501n/a else {
502n/a /* This *may* be unreachable. To make progress,
503n/a * assume it is. gc isn't directly reachable from
504n/a * any object we've already traversed, but may be
505n/a * reachable from an object we haven't gotten to yet.
506n/a * visit_reachable will eventually move gc back into
507n/a * young if that's so, and we'll see it again.
508n/a */
509n/a next = gc->gc.gc_next;
510n/a gc_list_move(gc, unreachable);
511n/a _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
512n/a }
513n/a gc = next;
514n/a }
515n/a}
516n/a
517n/a/* Try to untrack all currently tracked dictionaries */
518n/astatic void
519n/auntrack_dicts(PyGC_Head *head)
520n/a{
521n/a PyGC_Head *next, *gc = head->gc.gc_next;
522n/a while (gc != head) {
523n/a PyObject *op = FROM_GC(gc);
524n/a next = gc->gc.gc_next;
525n/a if (PyDict_CheckExact(op))
526n/a _PyDict_MaybeUntrack(op);
527n/a gc = next;
528n/a }
529n/a}
530n/a
531n/a/* Return true if object has a pre-PEP 442 finalization method. */
532n/astatic int
533n/ahas_legacy_finalizer(PyObject *op)
534n/a{
535n/a return op->ob_type->tp_del != NULL;
536n/a}
537n/a
538n/a/* Move the objects in unreachable with tp_del slots into `finalizers`.
539n/a * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
540n/a * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
541n/a */
542n/astatic void
543n/amove_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
544n/a{
545n/a PyGC_Head *gc;
546n/a PyGC_Head *next;
547n/a
548n/a /* March over unreachable. Move objects with finalizers into
549n/a * `finalizers`.
550n/a */
551n/a for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
552n/a PyObject *op = FROM_GC(gc);
553n/a
554n/a assert(IS_TENTATIVELY_UNREACHABLE(op));
555n/a next = gc->gc.gc_next;
556n/a
557n/a if (has_legacy_finalizer(op)) {
558n/a gc_list_move(gc, finalizers);
559n/a _PyGCHead_SET_REFS(gc, GC_REACHABLE);
560n/a }
561n/a }
562n/a}
563n/a
564n/a/* A traversal callback for move_legacy_finalizer_reachable. */
565n/astatic int
566n/avisit_move(PyObject *op, PyGC_Head *tolist)
567n/a{
568n/a if (PyObject_IS_GC(op)) {
569n/a if (IS_TENTATIVELY_UNREACHABLE(op)) {
570n/a PyGC_Head *gc = AS_GC(op);
571n/a gc_list_move(gc, tolist);
572n/a _PyGCHead_SET_REFS(gc, GC_REACHABLE);
573n/a }
574n/a }
575n/a return 0;
576n/a}
577n/a
578n/a/* Move objects that are reachable from finalizers, from the unreachable set
579n/a * into finalizers set.
580n/a */
581n/astatic void
582n/amove_legacy_finalizer_reachable(PyGC_Head *finalizers)
583n/a{
584n/a traverseproc traverse;
585n/a PyGC_Head *gc = finalizers->gc.gc_next;
586n/a for (; gc != finalizers; gc = gc->gc.gc_next) {
587n/a /* Note that the finalizers list may grow during this. */
588n/a traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
589n/a (void) traverse(FROM_GC(gc),
590n/a (visitproc)visit_move,
591n/a (void *)finalizers);
592n/a }
593n/a}
594n/a
595n/a/* Clear all weakrefs to unreachable objects, and if such a weakref has a
596n/a * callback, invoke it if necessary. Note that it's possible for such
597n/a * weakrefs to be outside the unreachable set -- indeed, those are precisely
598n/a * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
599n/a * overview & some details. Some weakrefs with callbacks may be reclaimed
600n/a * directly by this routine; the number reclaimed is the return value. Other
601n/a * weakrefs with callbacks may be moved into the `old` generation. Objects
602n/a * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
603n/a * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
604n/a * no object in `unreachable` is weakly referenced anymore.
605n/a */
606n/astatic int
607n/ahandle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
608n/a{
609n/a PyGC_Head *gc;
610n/a PyObject *op; /* generally FROM_GC(gc) */
611n/a PyWeakReference *wr; /* generally a cast of op */
612n/a PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
613n/a PyGC_Head *next;
614n/a int num_freed = 0;
615n/a
616n/a gc_list_init(&wrcb_to_call);
617n/a
618n/a /* Clear all weakrefs to the objects in unreachable. If such a weakref
619n/a * also has a callback, move it into `wrcb_to_call` if the callback
620n/a * needs to be invoked. Note that we cannot invoke any callbacks until
621n/a * all weakrefs to unreachable objects are cleared, lest the callback
622n/a * resurrect an unreachable object via a still-active weakref. We
623n/a * make another pass over wrcb_to_call, invoking callbacks, after this
624n/a * pass completes.
625n/a */
626n/a for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
627n/a PyWeakReference **wrlist;
628n/a
629n/a op = FROM_GC(gc);
630n/a assert(IS_TENTATIVELY_UNREACHABLE(op));
631n/a next = gc->gc.gc_next;
632n/a
633n/a if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
634n/a continue;
635n/a
636n/a /* It supports weakrefs. Does it have any? */
637n/a wrlist = (PyWeakReference **)
638n/a PyObject_GET_WEAKREFS_LISTPTR(op);
639n/a
640n/a /* `op` may have some weakrefs. March over the list, clear
641n/a * all the weakrefs, and move the weakrefs with callbacks
642n/a * that must be called into wrcb_to_call.
643n/a */
644n/a for (wr = *wrlist; wr != NULL; wr = *wrlist) {
645n/a PyGC_Head *wrasgc; /* AS_GC(wr) */
646n/a
647n/a /* _PyWeakref_ClearRef clears the weakref but leaves
648n/a * the callback pointer intact. Obscure: it also
649n/a * changes *wrlist.
650n/a */
651n/a assert(wr->wr_object == op);
652n/a _PyWeakref_ClearRef(wr);
653n/a assert(wr->wr_object == Py_None);
654n/a if (wr->wr_callback == NULL)
655n/a continue; /* no callback */
656n/a
657n/a /* Headache time. `op` is going away, and is weakly referenced by
658n/a * `wr`, which has a callback. Should the callback be invoked? If wr
659n/a * is also trash, no:
660n/a *
661n/a * 1. There's no need to call it. The object and the weakref are
662n/a * both going away, so it's legitimate to pretend the weakref is
663n/a * going away first. The user has to ensure a weakref outlives its
664n/a * referent if they want a guarantee that the wr callback will get
665n/a * invoked.
666n/a *
667n/a * 2. It may be catastrophic to call it. If the callback is also in
668n/a * cyclic trash (CT), then although the CT is unreachable from
669n/a * outside the current generation, CT may be reachable from the
670n/a * callback. Then the callback could resurrect insane objects.
671n/a *
672n/a * Since the callback is never needed and may be unsafe in this case,
673n/a * wr is simply left in the unreachable set. Note that because we
674n/a * already called _PyWeakref_ClearRef(wr), its callback will never
675n/a * trigger.
676n/a *
677n/a * OTOH, if wr isn't part of CT, we should invoke the callback: the
678n/a * weakref outlived the trash. Note that since wr isn't CT in this
679n/a * case, its callback can't be CT either -- wr acted as an external
680n/a * root to this generation, and therefore its callback did too. So
681n/a * nothing in CT is reachable from the callback either, so it's hard
682n/a * to imagine how calling it later could create a problem for us. wr
683n/a * is moved to wrcb_to_call in this case.
684n/a */
685n/a if (IS_TENTATIVELY_UNREACHABLE(wr))
686n/a continue;
687n/a assert(IS_REACHABLE(wr));
688n/a
689n/a /* Create a new reference so that wr can't go away
690n/a * before we can process it again.
691n/a */
692n/a Py_INCREF(wr);
693n/a
694n/a /* Move wr to wrcb_to_call, for the next pass. */
695n/a wrasgc = AS_GC(wr);
696n/a assert(wrasgc != next); /* wrasgc is reachable, but
697n/a next isn't, so they can't
698n/a be the same */
699n/a gc_list_move(wrasgc, &wrcb_to_call);
700n/a }
701n/a }
702n/a
703n/a /* Invoke the callbacks we decided to honor. It's safe to invoke them
704n/a * because they can't reference unreachable objects.
705n/a */
706n/a while (! gc_list_is_empty(&wrcb_to_call)) {
707n/a PyObject *temp;
708n/a PyObject *callback;
709n/a
710n/a gc = wrcb_to_call.gc.gc_next;
711n/a op = FROM_GC(gc);
712n/a assert(IS_REACHABLE(op));
713n/a assert(PyWeakref_Check(op));
714n/a wr = (PyWeakReference *)op;
715n/a callback = wr->wr_callback;
716n/a assert(callback != NULL);
717n/a
718n/a /* copy-paste of weakrefobject.c's handle_callback() */
719n/a temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
720n/a if (temp == NULL)
721n/a PyErr_WriteUnraisable(callback);
722n/a else
723n/a Py_DECREF(temp);
724n/a
725n/a /* Give up the reference we created in the first pass. When
726n/a * op's refcount hits 0 (which it may or may not do right now),
727n/a * op's tp_dealloc will decref op->wr_callback too. Note
728n/a * that the refcount probably will hit 0 now, and because this
729n/a * weakref was reachable to begin with, gc didn't already
730n/a * add it to its count of freed objects. Example: a reachable
731n/a * weak value dict maps some key to this reachable weakref.
732n/a * The callback removes this key->weakref mapping from the
733n/a * dict, leaving no other references to the weakref (excepting
734n/a * ours).
735n/a */
736n/a Py_DECREF(op);
737n/a if (wrcb_to_call.gc.gc_next == gc) {
738n/a /* object is still alive -- move it */
739n/a gc_list_move(gc, old);
740n/a }
741n/a else
742n/a ++num_freed;
743n/a }
744n/a
745n/a return num_freed;
746n/a}
747n/a
748n/astatic void
749n/adebug_cycle(const char *msg, PyObject *op)
750n/a{
751n/a PySys_FormatStderr("gc: %s <%s %p>\n",
752n/a msg, Py_TYPE(op)->tp_name, op);
753n/a}
754n/a
755n/a/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
756n/a * only from such cycles).
757n/a * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
758n/a * garbage list (a Python list), else only the objects in finalizers with
759n/a * __del__ methods are appended to garbage. All objects in finalizers are
760n/a * merged into the old list regardless.
761n/a * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
762n/a * The finalizers list is made empty on a successful return.
763n/a */
764n/astatic int
765n/ahandle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
766n/a{
767n/a PyGC_Head *gc = finalizers->gc.gc_next;
768n/a
769n/a if (garbage == NULL) {
770n/a garbage = PyList_New(0);
771n/a if (garbage == NULL)
772n/a Py_FatalError("gc couldn't create gc.garbage list");
773n/a }
774n/a for (; gc != finalizers; gc = gc->gc.gc_next) {
775n/a PyObject *op = FROM_GC(gc);
776n/a
777n/a if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
778n/a if (PyList_Append(garbage, op) < 0)
779n/a return -1;
780n/a }
781n/a }
782n/a
783n/a gc_list_merge(finalizers, old);
784n/a return 0;
785n/a}
786n/a
787n/a/* Run first-time finalizers (if any) on all the objects in collectable.
788n/a * Note that this may remove some (or even all) of the objects from the
789n/a * list, due to refcounts falling to 0.
790n/a */
791n/astatic void
792n/afinalize_garbage(PyGC_Head *collectable)
793n/a{
794n/a destructor finalize;
795n/a PyGC_Head seen;
796n/a
797n/a /* While we're going through the loop, `finalize(op)` may cause op, or
798n/a * other objects, to be reclaimed via refcounts falling to zero. So
799n/a * there's little we can rely on about the structure of the input
800n/a * `collectable` list across iterations. For safety, we always take the
801n/a * first object in that list and move it to a temporary `seen` list.
802n/a * If objects vanish from the `collectable` and `seen` lists we don't
803n/a * care.
804n/a */
805n/a gc_list_init(&seen);
806n/a
807n/a while (!gc_list_is_empty(collectable)) {
808n/a PyGC_Head *gc = collectable->gc.gc_next;
809n/a PyObject *op = FROM_GC(gc);
810n/a gc_list_move(gc, &seen);
811n/a if (!_PyGCHead_FINALIZED(gc) &&
812n/a PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
813n/a (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
814n/a _PyGCHead_SET_FINALIZED(gc, 1);
815n/a Py_INCREF(op);
816n/a finalize(op);
817n/a Py_DECREF(op);
818n/a }
819n/a }
820n/a gc_list_merge(&seen, collectable);
821n/a}
822n/a
823n/a/* Walk the collectable list and check that they are really unreachable
824n/a from the outside (some objects could have been resurrected by a
825n/a finalizer). */
826n/astatic int
827n/acheck_garbage(PyGC_Head *collectable)
828n/a{
829n/a PyGC_Head *gc;
830n/a for (gc = collectable->gc.gc_next; gc != collectable;
831n/a gc = gc->gc.gc_next) {
832n/a _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
833n/a assert(_PyGCHead_REFS(gc) != 0);
834n/a }
835n/a subtract_refs(collectable);
836n/a for (gc = collectable->gc.gc_next; gc != collectable;
837n/a gc = gc->gc.gc_next) {
838n/a assert(_PyGCHead_REFS(gc) >= 0);
839n/a if (_PyGCHead_REFS(gc) != 0)
840n/a return -1;
841n/a }
842n/a return 0;
843n/a}
844n/a
845n/astatic void
846n/arevive_garbage(PyGC_Head *collectable)
847n/a{
848n/a PyGC_Head *gc;
849n/a for (gc = collectable->gc.gc_next; gc != collectable;
850n/a gc = gc->gc.gc_next) {
851n/a _PyGCHead_SET_REFS(gc, GC_REACHABLE);
852n/a }
853n/a}
854n/a
855n/a/* Break reference cycles by clearing the containers involved. This is
856n/a * tricky business as the lists can be changing and we don't know which
857n/a * objects may be freed. It is possible I screwed something up here.
858n/a */
859n/astatic void
860n/adelete_garbage(PyGC_Head *collectable, PyGC_Head *old)
861n/a{
862n/a inquiry clear;
863n/a
864n/a while (!gc_list_is_empty(collectable)) {
865n/a PyGC_Head *gc = collectable->gc.gc_next;
866n/a PyObject *op = FROM_GC(gc);
867n/a
868n/a if (debug & DEBUG_SAVEALL) {
869n/a PyList_Append(garbage, op);
870n/a }
871n/a else {
872n/a if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
873n/a Py_INCREF(op);
874n/a clear(op);
875n/a Py_DECREF(op);
876n/a }
877n/a }
878n/a if (collectable->gc.gc_next == gc) {
879n/a /* object is still alive, move it, it may die later */
880n/a gc_list_move(gc, old);
881n/a _PyGCHead_SET_REFS(gc, GC_REACHABLE);
882n/a }
883n/a }
884n/a}
885n/a
886n/a/* Clear all free lists
887n/a * All free lists are cleared during the collection of the highest generation.
888n/a * Allocated items in the free list may keep a pymalloc arena occupied.
889n/a * Clearing the free lists may give back memory to the OS earlier.
890n/a */
891n/astatic void
892n/aclear_freelists(void)
893n/a{
894n/a (void)PyMethod_ClearFreeList();
895n/a (void)PyFrame_ClearFreeList();
896n/a (void)PyCFunction_ClearFreeList();
897n/a (void)PyTuple_ClearFreeList();
898n/a (void)PyUnicode_ClearFreeList();
899n/a (void)PyFloat_ClearFreeList();
900n/a (void)PyList_ClearFreeList();
901n/a (void)PyDict_ClearFreeList();
902n/a (void)PySet_ClearFreeList();
903n/a (void)PyAsyncGen_ClearFreeLists();
904n/a}
905n/a
906n/a/* This is the main function. Read this to understand how the
907n/a * collection process works. */
908n/astatic Py_ssize_t
909n/acollect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
910n/a int nofail)
911n/a{
912n/a int i;
913n/a Py_ssize_t m = 0; /* # objects collected */
914n/a Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
915n/a PyGC_Head *young; /* the generation we are examining */
916n/a PyGC_Head *old; /* next older generation */
917n/a PyGC_Head unreachable; /* non-problematic unreachable trash */
918n/a PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
919n/a PyGC_Head *gc;
920n/a _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
921n/a
922n/a struct gc_generation_stats *stats = &generation_stats[generation];
923n/a
924n/a if (debug & DEBUG_STATS) {
925n/a PySys_WriteStderr("gc: collecting generation %d...\n",
926n/a generation);
927n/a PySys_WriteStderr("gc: objects in each generation:");
928n/a for (i = 0; i < NUM_GENERATIONS; i++)
929n/a PySys_FormatStderr(" %zd",
930n/a gc_list_size(GEN_HEAD(i)));
931n/a t1 = _PyTime_GetMonotonicClock();
932n/a
933n/a PySys_WriteStderr("\n");
934n/a }
935n/a
936n/a if (PyDTrace_GC_START_ENABLED())
937n/a PyDTrace_GC_START(generation);
938n/a
939n/a /* update collection and allocation counters */
940n/a if (generation+1 < NUM_GENERATIONS)
941n/a generations[generation+1].count += 1;
942n/a for (i = 0; i <= generation; i++)
943n/a generations[i].count = 0;
944n/a
945n/a /* merge younger generations with one we are currently collecting */
946n/a for (i = 0; i < generation; i++) {
947n/a gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
948n/a }
949n/a
950n/a /* handy references */
951n/a young = GEN_HEAD(generation);
952n/a if (generation < NUM_GENERATIONS-1)
953n/a old = GEN_HEAD(generation+1);
954n/a else
955n/a old = young;
956n/a
957n/a /* Using ob_refcnt and gc_refs, calculate which objects in the
958n/a * container set are reachable from outside the set (i.e., have a
959n/a * refcount greater than 0 when all the references within the
960n/a * set are taken into account).
961n/a */
962n/a update_refs(young);
963n/a subtract_refs(young);
964n/a
965n/a /* Leave everything reachable from outside young in young, and move
966n/a * everything else (in young) to unreachable.
967n/a * NOTE: This used to move the reachable objects into a reachable
968n/a * set instead. But most things usually turn out to be reachable,
969n/a * so it's more efficient to move the unreachable things.
970n/a */
971n/a gc_list_init(&unreachable);
972n/a move_unreachable(young, &unreachable);
973n/a
974n/a /* Move reachable objects to next generation. */
975n/a if (young != old) {
976n/a if (generation == NUM_GENERATIONS - 2) {
977n/a long_lived_pending += gc_list_size(young);
978n/a }
979n/a gc_list_merge(young, old);
980n/a }
981n/a else {
982n/a /* We only untrack dicts in full collections, to avoid quadratic
983n/a dict build-up. See issue #14775. */
984n/a untrack_dicts(young);
985n/a long_lived_pending = 0;
986n/a long_lived_total = gc_list_size(young);
987n/a }
988n/a
989n/a /* All objects in unreachable are trash, but objects reachable from
990n/a * legacy finalizers (e.g. tp_del) can't safely be deleted.
991n/a */
992n/a gc_list_init(&finalizers);
993n/a move_legacy_finalizers(&unreachable, &finalizers);
994n/a /* finalizers contains the unreachable objects with a legacy finalizer;
995n/a * unreachable objects reachable *from* those are also uncollectable,
996n/a * and we move those into the finalizers list too.
997n/a */
998n/a move_legacy_finalizer_reachable(&finalizers);
999n/a
1000n/a /* Collect statistics on collectable objects found and print
1001n/a * debugging information.
1002n/a */
1003n/a for (gc = unreachable.gc.gc_next; gc != &unreachable;
1004n/a gc = gc->gc.gc_next) {
1005n/a m++;
1006n/a if (debug & DEBUG_COLLECTABLE) {
1007n/a debug_cycle("collectable", FROM_GC(gc));
1008n/a }
1009n/a }
1010n/a
1011n/a /* Clear weakrefs and invoke callbacks as necessary. */
1012n/a m += handle_weakrefs(&unreachable, old);
1013n/a
1014n/a /* Call tp_finalize on objects which have one. */
1015n/a finalize_garbage(&unreachable);
1016n/a
1017n/a if (check_garbage(&unreachable)) {
1018n/a revive_garbage(&unreachable);
1019n/a gc_list_merge(&unreachable, old);
1020n/a }
1021n/a else {
1022n/a /* Call tp_clear on objects in the unreachable set. This will cause
1023n/a * the reference cycles to be broken. It may also cause some objects
1024n/a * in finalizers to be freed.
1025n/a */
1026n/a delete_garbage(&unreachable, old);
1027n/a }
1028n/a
1029n/a /* Collect statistics on uncollectable objects found and print
1030n/a * debugging information. */
1031n/a for (gc = finalizers.gc.gc_next;
1032n/a gc != &finalizers;
1033n/a gc = gc->gc.gc_next) {
1034n/a n++;
1035n/a if (debug & DEBUG_UNCOLLECTABLE)
1036n/a debug_cycle("uncollectable", FROM_GC(gc));
1037n/a }
1038n/a if (debug & DEBUG_STATS) {
1039n/a _PyTime_t t2 = _PyTime_GetMonotonicClock();
1040n/a
1041n/a if (m == 0 && n == 0)
1042n/a PySys_WriteStderr("gc: done");
1043n/a else
1044n/a PySys_FormatStderr(
1045n/a "gc: done, %zd unreachable, %zd uncollectable",
1046n/a n+m, n);
1047n/a PySys_WriteStderr(", %.4fs elapsed\n",
1048n/a _PyTime_AsSecondsDouble(t2 - t1));
1049n/a }
1050n/a
1051n/a /* Append instances in the uncollectable set to a Python
1052n/a * reachable list of garbage. The programmer has to deal with
1053n/a * this if they insist on creating this type of structure.
1054n/a */
1055n/a (void)handle_legacy_finalizers(&finalizers, old);
1056n/a
1057n/a /* Clear free list only during the collection of the highest
1058n/a * generation */
1059n/a if (generation == NUM_GENERATIONS-1) {
1060n/a clear_freelists();
1061n/a }
1062n/a
1063n/a if (PyErr_Occurred()) {
1064n/a if (nofail) {
1065n/a PyErr_Clear();
1066n/a }
1067n/a else {
1068n/a if (gc_str == NULL)
1069n/a gc_str = PyUnicode_FromString("garbage collection");
1070n/a PyErr_WriteUnraisable(gc_str);
1071n/a Py_FatalError("unexpected exception during garbage collection");
1072n/a }
1073n/a }
1074n/a
1075n/a /* Update stats */
1076n/a if (n_collected)
1077n/a *n_collected = m;
1078n/a if (n_uncollectable)
1079n/a *n_uncollectable = n;
1080n/a stats->collections++;
1081n/a stats->collected += m;
1082n/a stats->uncollectable += n;
1083n/a
1084n/a if (PyDTrace_GC_DONE_ENABLED())
1085n/a PyDTrace_GC_DONE(n+m);
1086n/a
1087n/a return n+m;
1088n/a}
1089n/a
1090n/a/* Invoke progress callbacks to notify clients that garbage collection
1091n/a * is starting or stopping
1092n/a */
1093n/astatic void
1094n/ainvoke_gc_callback(const char *phase, int generation,
1095n/a Py_ssize_t collected, Py_ssize_t uncollectable)
1096n/a{
1097n/a Py_ssize_t i;
1098n/a PyObject *info = NULL;
1099n/a
1100n/a /* we may get called very early */
1101n/a if (callbacks == NULL)
1102n/a return;
1103n/a /* The local variable cannot be rebound, check it for sanity */
1104n/a assert(callbacks != NULL && PyList_CheckExact(callbacks));
1105n/a if (PyList_GET_SIZE(callbacks) != 0) {
1106n/a info = Py_BuildValue("{sisnsn}",
1107n/a "generation", generation,
1108n/a "collected", collected,
1109n/a "uncollectable", uncollectable);
1110n/a if (info == NULL) {
1111n/a PyErr_WriteUnraisable(NULL);
1112n/a return;
1113n/a }
1114n/a }
1115n/a for (i=0; i<PyList_GET_SIZE(callbacks); i++) {
1116n/a PyObject *r, *cb = PyList_GET_ITEM(callbacks, i);
1117n/a Py_INCREF(cb); /* make sure cb doesn't go away */
1118n/a r = PyObject_CallFunction(cb, "sO", phase, info);
1119n/a Py_XDECREF(r);
1120n/a if (r == NULL)
1121n/a PyErr_WriteUnraisable(cb);
1122n/a Py_DECREF(cb);
1123n/a }
1124n/a Py_XDECREF(info);
1125n/a}
1126n/a
1127n/a/* Perform garbage collection of a generation and invoke
1128n/a * progress callbacks.
1129n/a */
1130n/astatic Py_ssize_t
1131n/acollect_with_callback(int generation)
1132n/a{
1133n/a Py_ssize_t result, collected, uncollectable;
1134n/a invoke_gc_callback("start", generation, 0, 0);
1135n/a result = collect(generation, &collected, &uncollectable, 0);
1136n/a invoke_gc_callback("stop", generation, collected, uncollectable);
1137n/a return result;
1138n/a}
1139n/a
1140n/astatic Py_ssize_t
1141n/acollect_generations(void)
1142n/a{
1143n/a int i;
1144n/a Py_ssize_t n = 0;
1145n/a
1146n/a /* Find the oldest generation (highest numbered) where the count
1147n/a * exceeds the threshold. Objects in the that generation and
1148n/a * generations younger than it will be collected. */
1149n/a for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1150n/a if (generations[i].count > generations[i].threshold) {
1151n/a /* Avoid quadratic performance degradation in number
1152n/a of tracked objects. See comments at the beginning
1153n/a of this file, and issue #4074.
1154n/a */
1155n/a if (i == NUM_GENERATIONS - 1
1156n/a && long_lived_pending < long_lived_total / 4)
1157n/a continue;
1158n/a n = collect_with_callback(i);
1159n/a break;
1160n/a }
1161n/a }
1162n/a return n;
1163n/a}
1164n/a
1165n/a#include "clinic/gcmodule.c.h"
1166n/a
1167n/a/*[clinic input]
1168n/agc.enable
1169n/a
1170n/aEnable automatic garbage collection.
1171n/a[clinic start generated code]*/
1172n/a
1173n/astatic PyObject *
1174n/agc_enable_impl(PyObject *module)
1175n/a/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
1176n/a{
1177n/a enabled = 1;
1178n/a Py_RETURN_NONE;
1179n/a}
1180n/a
1181n/a/*[clinic input]
1182n/agc.disable
1183n/a
1184n/aDisable automatic garbage collection.
1185n/a[clinic start generated code]*/
1186n/a
1187n/astatic PyObject *
1188n/agc_disable_impl(PyObject *module)
1189n/a/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
1190n/a{
1191n/a enabled = 0;
1192n/a Py_RETURN_NONE;
1193n/a}
1194n/a
1195n/a/*[clinic input]
1196n/agc.isenabled -> bool
1197n/a
1198n/aReturns true if automatic garbage collection is enabled.
1199n/a[clinic start generated code]*/
1200n/a
1201n/astatic int
1202n/agc_isenabled_impl(PyObject *module)
1203n/a/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
1204n/a{
1205n/a return enabled;
1206n/a}
1207n/a
1208n/a/*[clinic input]
1209n/agc.collect -> Py_ssize_t
1210n/a
1211n/a generation: int(c_default="NUM_GENERATIONS - 1") = 2
1212n/a
1213n/aRun the garbage collector.
1214n/a
1215n/aWith no arguments, run a full collection. The optional argument
1216n/amay be an integer specifying which generation to collect. A ValueError
1217n/ais raised if the generation number is invalid.
1218n/a
1219n/aThe number of unreachable objects is returned.
1220n/a[clinic start generated code]*/
1221n/a
1222n/astatic Py_ssize_t
1223n/agc_collect_impl(PyObject *module, int generation)
1224n/a/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
1225n/a{
1226n/a Py_ssize_t n;
1227n/a
1228n/a if (generation < 0 || generation >= NUM_GENERATIONS) {
1229n/a PyErr_SetString(PyExc_ValueError, "invalid generation");
1230n/a return -1;
1231n/a }
1232n/a
1233n/a if (collecting)
1234n/a n = 0; /* already collecting, don't do anything */
1235n/a else {
1236n/a collecting = 1;
1237n/a n = collect_with_callback(generation);
1238n/a collecting = 0;
1239n/a }
1240n/a
1241n/a return n;
1242n/a}
1243n/a
1244n/a/*[clinic input]
1245n/agc.set_debug
1246n/a
1247n/a flags: int
1248n/a An integer that can have the following bits turned on:
1249n/a DEBUG_STATS - Print statistics during collection.
1250n/a DEBUG_COLLECTABLE - Print collectable objects found.
1251n/a DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1252n/a found.
1253n/a DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1254n/a DEBUG_LEAK - Debug leaking programs (everything but STATS).
1255n/a /
1256n/a
1257n/aSet the garbage collection debugging flags.
1258n/a
1259n/aDebugging information is written to sys.stderr.
1260n/a[clinic start generated code]*/
1261n/a
1262n/astatic PyObject *
1263n/agc_set_debug_impl(PyObject *module, int flags)
1264n/a/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
1265n/a{
1266n/a debug = flags;
1267n/a
1268n/a Py_RETURN_NONE;
1269n/a}
1270n/a
1271n/a/*[clinic input]
1272n/agc.get_debug -> int
1273n/a
1274n/aGet the garbage collection debugging flags.
1275n/a[clinic start generated code]*/
1276n/a
1277n/astatic int
1278n/agc_get_debug_impl(PyObject *module)
1279n/a/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
1280n/a{
1281n/a return debug;
1282n/a}
1283n/a
1284n/aPyDoc_STRVAR(gc_set_thresh__doc__,
1285n/a"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
1286n/a"\n"
1287n/a"Sets the collection thresholds. Setting threshold0 to zero disables\n"
1288n/a"collection.\n");
1289n/a
1290n/astatic PyObject *
1291n/agc_set_thresh(PyObject *self, PyObject *args)
1292n/a{
1293n/a int i;
1294n/a if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1295n/a &generations[0].threshold,
1296n/a &generations[1].threshold,
1297n/a &generations[2].threshold))
1298n/a return NULL;
1299n/a for (i = 2; i < NUM_GENERATIONS; i++) {
1300n/a /* generations higher than 2 get the same threshold */
1301n/a generations[i].threshold = generations[2].threshold;
1302n/a }
1303n/a
1304n/a Py_RETURN_NONE;
1305n/a}
1306n/a
1307n/a/*[clinic input]
1308n/agc.get_threshold
1309n/a
1310n/aReturn the current collection thresholds.
1311n/a[clinic start generated code]*/
1312n/a
1313n/astatic PyObject *
1314n/agc_get_threshold_impl(PyObject *module)
1315n/a/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
1316n/a{
1317n/a return Py_BuildValue("(iii)",
1318n/a generations[0].threshold,
1319n/a generations[1].threshold,
1320n/a generations[2].threshold);
1321n/a}
1322n/a
1323n/a/*[clinic input]
1324n/agc.get_count
1325n/a
1326n/aReturn a three-tuple of the current collection counts.
1327n/a[clinic start generated code]*/
1328n/a
1329n/astatic PyObject *
1330n/agc_get_count_impl(PyObject *module)
1331n/a/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
1332n/a{
1333n/a return Py_BuildValue("(iii)",
1334n/a generations[0].count,
1335n/a generations[1].count,
1336n/a generations[2].count);
1337n/a}
1338n/a
1339n/astatic int
1340n/areferrersvisit(PyObject* obj, PyObject *objs)
1341n/a{
1342n/a Py_ssize_t i;
1343n/a for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1344n/a if (PyTuple_GET_ITEM(objs, i) == obj)
1345n/a return 1;
1346n/a return 0;
1347n/a}
1348n/a
1349n/astatic int
1350n/agc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
1351n/a{
1352n/a PyGC_Head *gc;
1353n/a PyObject *obj;
1354n/a traverseproc traverse;
1355n/a for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1356n/a obj = FROM_GC(gc);
1357n/a traverse = Py_TYPE(obj)->tp_traverse;
1358n/a if (obj == objs || obj == resultlist)
1359n/a continue;
1360n/a if (traverse(obj, (visitproc)referrersvisit, objs)) {
1361n/a if (PyList_Append(resultlist, obj) < 0)
1362n/a return 0; /* error */
1363n/a }
1364n/a }
1365n/a return 1; /* no error */
1366n/a}
1367n/a
1368n/aPyDoc_STRVAR(gc_get_referrers__doc__,
1369n/a"get_referrers(*objs) -> list\n\
1370n/aReturn the list of objects that directly refer to any of objs.");
1371n/a
1372n/astatic PyObject *
1373n/agc_get_referrers(PyObject *self, PyObject *args)
1374n/a{
1375n/a int i;
1376n/a PyObject *result = PyList_New(0);
1377n/a if (!result) return NULL;
1378n/a
1379n/a for (i = 0; i < NUM_GENERATIONS; i++) {
1380n/a if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1381n/a Py_DECREF(result);
1382n/a return NULL;
1383n/a }
1384n/a }
1385n/a return result;
1386n/a}
1387n/a
1388n/a/* Append obj to list; return true if error (out of memory), false if OK. */
1389n/astatic int
1390n/areferentsvisit(PyObject *obj, PyObject *list)
1391n/a{
1392n/a return PyList_Append(list, obj) < 0;
1393n/a}
1394n/a
1395n/aPyDoc_STRVAR(gc_get_referents__doc__,
1396n/a"get_referents(*objs) -> list\n\
1397n/aReturn the list of objects that are directly referred to by objs.");
1398n/a
1399n/astatic PyObject *
1400n/agc_get_referents(PyObject *self, PyObject *args)
1401n/a{
1402n/a Py_ssize_t i;
1403n/a PyObject *result = PyList_New(0);
1404n/a
1405n/a if (result == NULL)
1406n/a return NULL;
1407n/a
1408n/a for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1409n/a traverseproc traverse;
1410n/a PyObject *obj = PyTuple_GET_ITEM(args, i);
1411n/a
1412n/a if (! PyObject_IS_GC(obj))
1413n/a continue;
1414n/a traverse = Py_TYPE(obj)->tp_traverse;
1415n/a if (! traverse)
1416n/a continue;
1417n/a if (traverse(obj, (visitproc)referentsvisit, result)) {
1418n/a Py_DECREF(result);
1419n/a return NULL;
1420n/a }
1421n/a }
1422n/a return result;
1423n/a}
1424n/a
1425n/a/*[clinic input]
1426n/agc.get_objects
1427n/a
1428n/aReturn a list of objects tracked by the collector (excluding the list returned).
1429n/a[clinic start generated code]*/
1430n/a
1431n/astatic PyObject *
1432n/agc_get_objects_impl(PyObject *module)
1433n/a/*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
1434n/a{
1435n/a int i;
1436n/a PyObject* result;
1437n/a
1438n/a result = PyList_New(0);
1439n/a if (result == NULL)
1440n/a return NULL;
1441n/a for (i = 0; i < NUM_GENERATIONS; i++) {
1442n/a if (append_objects(result, GEN_HEAD(i))) {
1443n/a Py_DECREF(result);
1444n/a return NULL;
1445n/a }
1446n/a }
1447n/a return result;
1448n/a}
1449n/a
1450n/a/*[clinic input]
1451n/agc.get_stats
1452n/a
1453n/aReturn a list of dictionaries containing per-generation statistics.
1454n/a[clinic start generated code]*/
1455n/a
1456n/astatic PyObject *
1457n/agc_get_stats_impl(PyObject *module)
1458n/a/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
1459n/a{
1460n/a int i;
1461n/a PyObject *result;
1462n/a struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1463n/a
1464n/a /* To get consistent values despite allocations while constructing
1465n/a the result list, we use a snapshot of the running stats. */
1466n/a for (i = 0; i < NUM_GENERATIONS; i++) {
1467n/a stats[i] = generation_stats[i];
1468n/a }
1469n/a
1470n/a result = PyList_New(0);
1471n/a if (result == NULL)
1472n/a return NULL;
1473n/a
1474n/a for (i = 0; i < NUM_GENERATIONS; i++) {
1475n/a PyObject *dict;
1476n/a st = &stats[i];
1477n/a dict = Py_BuildValue("{snsnsn}",
1478n/a "collections", st->collections,
1479n/a "collected", st->collected,
1480n/a "uncollectable", st->uncollectable
1481n/a );
1482n/a if (dict == NULL)
1483n/a goto error;
1484n/a if (PyList_Append(result, dict)) {
1485n/a Py_DECREF(dict);
1486n/a goto error;
1487n/a }
1488n/a Py_DECREF(dict);
1489n/a }
1490n/a return result;
1491n/a
1492n/aerror:
1493n/a Py_XDECREF(result);
1494n/a return NULL;
1495n/a}
1496n/a
1497n/a
1498n/a/*[clinic input]
1499n/agc.is_tracked
1500n/a
1501n/a obj: object
1502n/a /
1503n/a
1504n/aReturns true if the object is tracked by the garbage collector.
1505n/a
1506n/aSimple atomic objects will return false.
1507n/a[clinic start generated code]*/
1508n/a
1509n/astatic PyObject *
1510n/agc_is_tracked(PyObject *module, PyObject *obj)
1511n/a/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
1512n/a{
1513n/a PyObject *result;
1514n/a
1515n/a if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1516n/a result = Py_True;
1517n/a else
1518n/a result = Py_False;
1519n/a Py_INCREF(result);
1520n/a return result;
1521n/a}
1522n/a
1523n/a
1524n/aPyDoc_STRVAR(gc__doc__,
1525n/a"This module provides access to the garbage collector for reference cycles.\n"
1526n/a"\n"
1527n/a"enable() -- Enable automatic garbage collection.\n"
1528n/a"disable() -- Disable automatic garbage collection.\n"
1529n/a"isenabled() -- Returns true if automatic collection is enabled.\n"
1530n/a"collect() -- Do a full collection right now.\n"
1531n/a"get_count() -- Return the current collection counts.\n"
1532n/a"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
1533n/a"set_debug() -- Set debugging flags.\n"
1534n/a"get_debug() -- Get debugging flags.\n"
1535n/a"set_threshold() -- Set the collection thresholds.\n"
1536n/a"get_threshold() -- Return the current the collection thresholds.\n"
1537n/a"get_objects() -- Return a list of all objects tracked by the collector.\n"
1538n/a"is_tracked() -- Returns true if a given object is tracked.\n"
1539n/a"get_referrers() -- Return the list of objects that refer to an object.\n"
1540n/a"get_referents() -- Return the list of objects that an object refers to.\n");
1541n/a
1542n/astatic PyMethodDef GcMethods[] = {
1543n/a GC_ENABLE_METHODDEF
1544n/a GC_DISABLE_METHODDEF
1545n/a GC_ISENABLED_METHODDEF
1546n/a GC_SET_DEBUG_METHODDEF
1547n/a GC_GET_DEBUG_METHODDEF
1548n/a GC_GET_COUNT_METHODDEF
1549n/a {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1550n/a GC_GET_THRESHOLD_METHODDEF
1551n/a GC_COLLECT_METHODDEF
1552n/a GC_GET_OBJECTS_METHODDEF
1553n/a GC_GET_STATS_METHODDEF
1554n/a GC_IS_TRACKED_METHODDEF
1555n/a {"get_referrers", gc_get_referrers, METH_VARARGS,
1556n/a gc_get_referrers__doc__},
1557n/a {"get_referents", gc_get_referents, METH_VARARGS,
1558n/a gc_get_referents__doc__},
1559n/a {NULL, NULL} /* Sentinel */
1560n/a};
1561n/a
1562n/astatic struct PyModuleDef gcmodule = {
1563n/a PyModuleDef_HEAD_INIT,
1564n/a "gc", /* m_name */
1565n/a gc__doc__, /* m_doc */
1566n/a -1, /* m_size */
1567n/a GcMethods, /* m_methods */
1568n/a NULL, /* m_reload */
1569n/a NULL, /* m_traverse */
1570n/a NULL, /* m_clear */
1571n/a NULL /* m_free */
1572n/a};
1573n/a
1574n/aPyMODINIT_FUNC
1575n/aPyInit_gc(void)
1576n/a{
1577n/a PyObject *m;
1578n/a
1579n/a m = PyModule_Create(&gcmodule);
1580n/a
1581n/a if (m == NULL)
1582n/a return NULL;
1583n/a
1584n/a if (garbage == NULL) {
1585n/a garbage = PyList_New(0);
1586n/a if (garbage == NULL)
1587n/a return NULL;
1588n/a }
1589n/a Py_INCREF(garbage);
1590n/a if (PyModule_AddObject(m, "garbage", garbage) < 0)
1591n/a return NULL;
1592n/a
1593n/a if (callbacks == NULL) {
1594n/a callbacks = PyList_New(0);
1595n/a if (callbacks == NULL)
1596n/a return NULL;
1597n/a }
1598n/a Py_INCREF(callbacks);
1599n/a if (PyModule_AddObject(m, "callbacks", callbacks) < 0)
1600n/a return NULL;
1601n/a
1602n/a#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
1603n/a ADD_INT(DEBUG_STATS);
1604n/a ADD_INT(DEBUG_COLLECTABLE);
1605n/a ADD_INT(DEBUG_UNCOLLECTABLE);
1606n/a ADD_INT(DEBUG_SAVEALL);
1607n/a ADD_INT(DEBUG_LEAK);
1608n/a#undef ADD_INT
1609n/a return m;
1610n/a}
1611n/a
1612n/a/* API to invoke gc.collect() from C */
1613n/aPy_ssize_t
1614n/aPyGC_Collect(void)
1615n/a{
1616n/a Py_ssize_t n;
1617n/a
1618n/a if (collecting)
1619n/a n = 0; /* already collecting, don't do anything */
1620n/a else {
1621n/a collecting = 1;
1622n/a n = collect_with_callback(NUM_GENERATIONS - 1);
1623n/a collecting = 0;
1624n/a }
1625n/a
1626n/a return n;
1627n/a}
1628n/a
1629n/aPy_ssize_t
1630n/a_PyGC_CollectIfEnabled(void)
1631n/a{
1632n/a if (!enabled)
1633n/a return 0;
1634n/a
1635n/a return PyGC_Collect();
1636n/a}
1637n/a
1638n/aPy_ssize_t
1639n/a_PyGC_CollectNoFail(void)
1640n/a{
1641n/a Py_ssize_t n;
1642n/a
1643n/a /* Ideally, this function is only called on interpreter shutdown,
1644n/a and therefore not recursively. Unfortunately, when there are daemon
1645n/a threads, a daemon thread can start a cyclic garbage collection
1646n/a during interpreter shutdown (and then never finish it).
1647n/a See http://bugs.python.org/issue8713#msg195178 for an example.
1648n/a */
1649n/a if (collecting)
1650n/a n = 0;
1651n/a else {
1652n/a collecting = 1;
1653n/a n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1654n/a collecting = 0;
1655n/a }
1656n/a return n;
1657n/a}
1658n/a
1659n/avoid
1660n/a_PyGC_DumpShutdownStats(void)
1661n/a{
1662n/a if (!(debug & DEBUG_SAVEALL)
1663n/a && garbage != NULL && PyList_GET_SIZE(garbage) > 0) {
1664n/a char *message;
1665n/a if (debug & DEBUG_UNCOLLECTABLE)
1666n/a message = "gc: %zd uncollectable objects at " \
1667n/a "shutdown";
1668n/a else
1669n/a message = "gc: %zd uncollectable objects at " \
1670n/a "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
1671n/a /* PyErr_WarnFormat does too many things and we are at shutdown,
1672n/a the warnings module's dependencies (e.g. linecache) may be gone
1673n/a already. */
1674n/a if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1675n/a "gc", NULL, message,
1676n/a PyList_GET_SIZE(garbage)))
1677n/a PyErr_WriteUnraisable(NULL);
1678n/a if (debug & DEBUG_UNCOLLECTABLE) {
1679n/a PyObject *repr = NULL, *bytes = NULL;
1680n/a repr = PyObject_Repr(garbage);
1681n/a if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1682n/a PyErr_WriteUnraisable(garbage);
1683n/a else {
1684n/a PySys_WriteStderr(
1685n/a " %s\n",
1686n/a PyBytes_AS_STRING(bytes)
1687n/a );
1688n/a }
1689n/a Py_XDECREF(repr);
1690n/a Py_XDECREF(bytes);
1691n/a }
1692n/a }
1693n/a}
1694n/a
1695n/avoid
1696n/a_PyGC_Fini(void)
1697n/a{
1698n/a Py_CLEAR(callbacks);
1699n/a}
1700n/a
1701n/a/* for debugging */
1702n/avoid
1703n/a_PyGC_Dump(PyGC_Head *g)
1704n/a{
1705n/a _PyObject_Dump(FROM_GC(g));
1706n/a}
1707n/a
1708n/a/* extension modules might be compiled with GC support so these
1709n/a functions must always be available */
1710n/a
1711n/a#undef PyObject_GC_Track
1712n/a#undef PyObject_GC_UnTrack
1713n/a#undef PyObject_GC_Del
1714n/a#undef _PyObject_GC_Malloc
1715n/a
1716n/avoid
1717n/aPyObject_GC_Track(void *op)
1718n/a{
1719n/a _PyObject_GC_TRACK(op);
1720n/a}
1721n/a
1722n/avoid
1723n/aPyObject_GC_UnTrack(void *op)
1724n/a{
1725n/a /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1726n/a * call PyObject_GC_UnTrack twice on an object.
1727n/a */
1728n/a if (IS_TRACKED(op))
1729n/a _PyObject_GC_UNTRACK(op);
1730n/a}
1731n/a
1732n/astatic PyObject *
1733n/a_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
1734n/a{
1735n/a PyObject *op;
1736n/a PyGC_Head *g;
1737n/a size_t size;
1738n/a if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1739n/a return PyErr_NoMemory();
1740n/a size = sizeof(PyGC_Head) + basicsize;
1741n/a if (use_calloc)
1742n/a g = (PyGC_Head *)PyObject_Calloc(1, size);
1743n/a else
1744n/a g = (PyGC_Head *)PyObject_Malloc(size);
1745n/a if (g == NULL)
1746n/a return PyErr_NoMemory();
1747n/a g->gc.gc_refs = 0;
1748n/a _PyGCHead_SET_REFS(g, GC_UNTRACKED);
1749n/a generations[0].count++; /* number of allocated GC objects */
1750n/a if (generations[0].count > generations[0].threshold &&
1751n/a enabled &&
1752n/a generations[0].threshold &&
1753n/a !collecting &&
1754n/a !PyErr_Occurred()) {
1755n/a collecting = 1;
1756n/a collect_generations();
1757n/a collecting = 0;
1758n/a }
1759n/a op = FROM_GC(g);
1760n/a return op;
1761n/a}
1762n/a
1763n/aPyObject *
1764n/a_PyObject_GC_Malloc(size_t basicsize)
1765n/a{
1766n/a return _PyObject_GC_Alloc(0, basicsize);
1767n/a}
1768n/a
1769n/aPyObject *
1770n/a_PyObject_GC_Calloc(size_t basicsize)
1771n/a{
1772n/a return _PyObject_GC_Alloc(1, basicsize);
1773n/a}
1774n/a
1775n/aPyObject *
1776n/a_PyObject_GC_New(PyTypeObject *tp)
1777n/a{
1778n/a PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1779n/a if (op != NULL)
1780n/a op = PyObject_INIT(op, tp);
1781n/a return op;
1782n/a}
1783n/a
1784n/aPyVarObject *
1785n/a_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
1786n/a{
1787n/a size_t size;
1788n/a PyVarObject *op;
1789n/a
1790n/a if (nitems < 0) {
1791n/a PyErr_BadInternalCall();
1792n/a return NULL;
1793n/a }
1794n/a size = _PyObject_VAR_SIZE(tp, nitems);
1795n/a op = (PyVarObject *) _PyObject_GC_Malloc(size);
1796n/a if (op != NULL)
1797n/a op = PyObject_INIT_VAR(op, tp, nitems);
1798n/a return op;
1799n/a}
1800n/a
1801n/aPyVarObject *
1802n/a_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
1803n/a{
1804n/a const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1805n/a PyGC_Head *g = AS_GC(op);
1806n/a if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1807n/a return (PyVarObject *)PyErr_NoMemory();
1808n/a g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1809n/a if (g == NULL)
1810n/a return (PyVarObject *)PyErr_NoMemory();
1811n/a op = (PyVarObject *) FROM_GC(g);
1812n/a Py_SIZE(op) = nitems;
1813n/a return op;
1814n/a}
1815n/a
1816n/avoid
1817n/aPyObject_GC_Del(void *op)
1818n/a{
1819n/a PyGC_Head *g = AS_GC(op);
1820n/a if (IS_TRACKED(op))
1821n/a gc_list_remove(g);
1822n/a if (generations[0].count > 0) {
1823n/a generations[0].count--;
1824n/a }
1825n/a PyObject_FREE(g);
1826n/a}