ยปCore Development>Code coverage>Objects/obmalloc.c

Python code coverage for Objects/obmalloc.c

#countcontent
1n/a#include "Python.h"
2n/a
3n/a#include <stdbool.h>
4n/a
5n/a
6n/a/* Defined in tracemalloc.c */
7n/aextern void _PyMem_DumpTraceback(int fd, const void *ptr);
8n/a
9n/a
10n/a/* Python's malloc wrappers (see pymem.h) */
11n/a
12n/a#undef uint
13n/a#define uint unsigned int /* assuming >= 16 bits */
14n/a
15n/a/* Forward declaration */
16n/astatic void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
17n/astatic void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
18n/astatic void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
19n/astatic void _PyMem_DebugRawFree(void *ctx, void *p);
20n/a
21n/astatic void* _PyMem_DebugMalloc(void *ctx, size_t size);
22n/astatic void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
23n/astatic void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
24n/astatic void _PyMem_DebugFree(void *ctx, void *p);
25n/a
26n/astatic void _PyObject_DebugDumpAddress(const void *p);
27n/astatic void _PyMem_DebugCheckAddress(char api_id, const void *p);
28n/a
29n/a#if defined(__has_feature) /* Clang */
30n/a #if __has_feature(address_sanitizer) /* is ASAN enabled? */
31n/a #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
32n/a __attribute__((no_address_safety_analysis))
33n/a #else
34n/a #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
35n/a #endif
36n/a#else
37n/a #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
38n/a #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
39n/a __attribute__((no_address_safety_analysis))
40n/a #else
41n/a #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
42n/a #endif
43n/a#endif
44n/a
45n/a#ifdef WITH_PYMALLOC
46n/a
47n/a#ifdef MS_WINDOWS
48n/a# include <windows.h>
49n/a#elif defined(HAVE_MMAP)
50n/a# include <sys/mman.h>
51n/a# ifdef MAP_ANONYMOUS
52n/a# define ARENAS_USE_MMAP
53n/a# endif
54n/a#endif
55n/a
56n/a/* Forward declaration */
57n/astatic void* _PyObject_Malloc(void *ctx, size_t size);
58n/astatic void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
59n/astatic void _PyObject_Free(void *ctx, void *p);
60n/astatic void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
61n/a#endif
62n/a
63n/a
64n/astatic void *
65n/a_PyMem_RawMalloc(void *ctx, size_t size)
66n/a{
67n/a /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
68n/a for malloc(0), which would be treated as an error. Some platforms would
69n/a return a pointer with no memory behind it, which would break pymalloc.
70n/a To solve these problems, allocate an extra byte. */
71n/a if (size == 0)
72n/a size = 1;
73n/a return malloc(size);
74n/a}
75n/a
76n/astatic void *
77n/a_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
78n/a{
79n/a /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
80n/a for calloc(0, 0), which would be treated as an error. Some platforms
81n/a would return a pointer with no memory behind it, which would break
82n/a pymalloc. To solve these problems, allocate an extra byte. */
83n/a if (nelem == 0 || elsize == 0) {
84n/a nelem = 1;
85n/a elsize = 1;
86n/a }
87n/a return calloc(nelem, elsize);
88n/a}
89n/a
90n/astatic void *
91n/a_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
92n/a{
93n/a if (size == 0)
94n/a size = 1;
95n/a return realloc(ptr, size);
96n/a}
97n/a
98n/astatic void
99n/a_PyMem_RawFree(void *ctx, void *ptr)
100n/a{
101n/a free(ptr);
102n/a}
103n/a
104n/a
105n/a#ifdef MS_WINDOWS
106n/astatic void *
107n/a_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
108n/a{
109n/a return VirtualAlloc(NULL, size,
110n/a MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
111n/a}
112n/a
113n/astatic void
114n/a_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
115n/a{
116n/a VirtualFree(ptr, 0, MEM_RELEASE);
117n/a}
118n/a
119n/a#elif defined(ARENAS_USE_MMAP)
120n/astatic void *
121n/a_PyObject_ArenaMmap(void *ctx, size_t size)
122n/a{
123n/a void *ptr;
124n/a ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
125n/a MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
126n/a if (ptr == MAP_FAILED)
127n/a return NULL;
128n/a assert(ptr != NULL);
129n/a return ptr;
130n/a}
131n/a
132n/astatic void
133n/a_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
134n/a{
135n/a munmap(ptr, size);
136n/a}
137n/a
138n/a#else
139n/astatic void *
140n/a_PyObject_ArenaMalloc(void *ctx, size_t size)
141n/a{
142n/a return malloc(size);
143n/a}
144n/a
145n/astatic void
146n/a_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
147n/a{
148n/a free(ptr);
149n/a}
150n/a#endif
151n/a
152n/a
153n/a#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
154n/a#ifdef WITH_PYMALLOC
155n/a# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
156n/a#else
157n/a# define PYOBJ_FUNCS PYRAW_FUNCS
158n/a#endif
159n/a#define PYMEM_FUNCS PYOBJ_FUNCS
160n/a
161n/atypedef struct {
162n/a /* We tag each block with an API ID in order to tag API violations */
163n/a char api_id;
164n/a PyMemAllocatorEx alloc;
165n/a} debug_alloc_api_t;
166n/astatic struct {
167n/a debug_alloc_api_t raw;
168n/a debug_alloc_api_t mem;
169n/a debug_alloc_api_t obj;
170n/a} _PyMem_Debug = {
171n/a {'r', {NULL, PYRAW_FUNCS}},
172n/a {'m', {NULL, PYMEM_FUNCS}},
173n/a {'o', {NULL, PYOBJ_FUNCS}}
174n/a };
175n/a
176n/a#define PYRAWDBG_FUNCS \
177n/a _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree
178n/a#define PYDBG_FUNCS \
179n/a _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
180n/a
181n/astatic PyMemAllocatorEx _PyMem_Raw = {
182n/a#ifdef Py_DEBUG
183n/a &_PyMem_Debug.raw, PYRAWDBG_FUNCS
184n/a#else
185n/a NULL, PYRAW_FUNCS
186n/a#endif
187n/a };
188n/a
189n/astatic PyMemAllocatorEx _PyMem = {
190n/a#ifdef Py_DEBUG
191n/a &_PyMem_Debug.mem, PYDBG_FUNCS
192n/a#else
193n/a NULL, PYMEM_FUNCS
194n/a#endif
195n/a };
196n/a
197n/astatic PyMemAllocatorEx _PyObject = {
198n/a#ifdef Py_DEBUG
199n/a &_PyMem_Debug.obj, PYDBG_FUNCS
200n/a#else
201n/a NULL, PYOBJ_FUNCS
202n/a#endif
203n/a };
204n/a
205n/aint
206n/a_PyMem_SetupAllocators(const char *opt)
207n/a{
208n/a if (opt == NULL || *opt == '\0') {
209n/a /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
210n/a options): use default allocators */
211n/a#ifdef Py_DEBUG
212n/a# ifdef WITH_PYMALLOC
213n/a opt = "pymalloc_debug";
214n/a# else
215n/a opt = "malloc_debug";
216n/a# endif
217n/a#else
218n/a /* !Py_DEBUG */
219n/a# ifdef WITH_PYMALLOC
220n/a opt = "pymalloc";
221n/a# else
222n/a opt = "malloc";
223n/a# endif
224n/a#endif
225n/a }
226n/a
227n/a if (strcmp(opt, "debug") == 0) {
228n/a PyMem_SetupDebugHooks();
229n/a }
230n/a else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0)
231n/a {
232n/a PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
233n/a
234n/a PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
235n/a PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
236n/a PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
237n/a
238n/a if (strcmp(opt, "malloc_debug") == 0)
239n/a PyMem_SetupDebugHooks();
240n/a }
241n/a#ifdef WITH_PYMALLOC
242n/a else if (strcmp(opt, "pymalloc") == 0
243n/a || strcmp(opt, "pymalloc_debug") == 0)
244n/a {
245n/a PyMemAllocatorEx raw_alloc = {NULL, PYRAW_FUNCS};
246n/a PyMemAllocatorEx mem_alloc = {NULL, PYMEM_FUNCS};
247n/a PyMemAllocatorEx obj_alloc = {NULL, PYOBJ_FUNCS};
248n/a
249n/a PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &raw_alloc);
250n/a PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &mem_alloc);
251n/a PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &obj_alloc);
252n/a
253n/a if (strcmp(opt, "pymalloc_debug") == 0)
254n/a PyMem_SetupDebugHooks();
255n/a }
256n/a#endif
257n/a else {
258n/a /* unknown allocator */
259n/a return -1;
260n/a }
261n/a return 0;
262n/a}
263n/a
264n/a#undef PYRAW_FUNCS
265n/a#undef PYMEM_FUNCS
266n/a#undef PYOBJ_FUNCS
267n/a#undef PYRAWDBG_FUNCS
268n/a#undef PYDBG_FUNCS
269n/a
270n/astatic PyObjectArenaAllocator _PyObject_Arena = {NULL,
271n/a#ifdef MS_WINDOWS
272n/a _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
273n/a#elif defined(ARENAS_USE_MMAP)
274n/a _PyObject_ArenaMmap, _PyObject_ArenaMunmap
275n/a#else
276n/a _PyObject_ArenaMalloc, _PyObject_ArenaFree
277n/a#endif
278n/a };
279n/a
280n/a#ifdef WITH_PYMALLOC
281n/astatic int
282n/a_PyMem_DebugEnabled(void)
283n/a{
284n/a return (_PyObject.malloc == _PyMem_DebugMalloc);
285n/a}
286n/a
287n/aint
288n/a_PyMem_PymallocEnabled(void)
289n/a{
290n/a if (_PyMem_DebugEnabled()) {
291n/a return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
292n/a }
293n/a else {
294n/a return (_PyObject.malloc == _PyObject_Malloc);
295n/a }
296n/a}
297n/a#endif
298n/a
299n/avoid
300n/aPyMem_SetupDebugHooks(void)
301n/a{
302n/a PyMemAllocatorEx alloc;
303n/a
304n/a alloc.malloc = _PyMem_DebugRawMalloc;
305n/a alloc.calloc = _PyMem_DebugRawCalloc;
306n/a alloc.realloc = _PyMem_DebugRawRealloc;
307n/a alloc.free = _PyMem_DebugRawFree;
308n/a
309n/a if (_PyMem_Raw.malloc != _PyMem_DebugRawMalloc) {
310n/a alloc.ctx = &_PyMem_Debug.raw;
311n/a PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
312n/a PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
313n/a }
314n/a
315n/a alloc.malloc = _PyMem_DebugMalloc;
316n/a alloc.calloc = _PyMem_DebugCalloc;
317n/a alloc.realloc = _PyMem_DebugRealloc;
318n/a alloc.free = _PyMem_DebugFree;
319n/a
320n/a if (_PyMem.malloc != _PyMem_DebugMalloc) {
321n/a alloc.ctx = &_PyMem_Debug.mem;
322n/a PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
323n/a PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
324n/a }
325n/a
326n/a if (_PyObject.malloc != _PyMem_DebugMalloc) {
327n/a alloc.ctx = &_PyMem_Debug.obj;
328n/a PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
329n/a PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
330n/a }
331n/a}
332n/a
333n/avoid
334n/aPyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
335n/a{
336n/a switch(domain)
337n/a {
338n/a case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
339n/a case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
340n/a case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
341n/a default:
342n/a /* unknown domain: set all attributes to NULL */
343n/a allocator->ctx = NULL;
344n/a allocator->malloc = NULL;
345n/a allocator->calloc = NULL;
346n/a allocator->realloc = NULL;
347n/a allocator->free = NULL;
348n/a }
349n/a}
350n/a
351n/avoid
352n/aPyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
353n/a{
354n/a switch(domain)
355n/a {
356n/a case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
357n/a case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
358n/a case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
359n/a /* ignore unknown domain */
360n/a }
361n/a}
362n/a
363n/avoid
364n/aPyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
365n/a{
366n/a *allocator = _PyObject_Arena;
367n/a}
368n/a
369n/avoid
370n/aPyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
371n/a{
372n/a _PyObject_Arena = *allocator;
373n/a}
374n/a
375n/avoid *
376n/aPyMem_RawMalloc(size_t size)
377n/a{
378n/a /*
379n/a * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
380n/a * Most python internals blindly use a signed Py_ssize_t to track
381n/a * things without checking for overflows or negatives.
382n/a * As size_t is unsigned, checking for size < 0 is not required.
383n/a */
384n/a if (size > (size_t)PY_SSIZE_T_MAX)
385n/a return NULL;
386n/a return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
387n/a}
388n/a
389n/avoid *
390n/aPyMem_RawCalloc(size_t nelem, size_t elsize)
391n/a{
392n/a /* see PyMem_RawMalloc() */
393n/a if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
394n/a return NULL;
395n/a return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
396n/a}
397n/a
398n/avoid*
399n/aPyMem_RawRealloc(void *ptr, size_t new_size)
400n/a{
401n/a /* see PyMem_RawMalloc() */
402n/a if (new_size > (size_t)PY_SSIZE_T_MAX)
403n/a return NULL;
404n/a return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
405n/a}
406n/a
407n/avoid PyMem_RawFree(void *ptr)
408n/a{
409n/a _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
410n/a}
411n/a
412n/avoid *
413n/aPyMem_Malloc(size_t size)
414n/a{
415n/a /* see PyMem_RawMalloc() */
416n/a if (size > (size_t)PY_SSIZE_T_MAX)
417n/a return NULL;
418n/a return _PyMem.malloc(_PyMem.ctx, size);
419n/a}
420n/a
421n/avoid *
422n/aPyMem_Calloc(size_t nelem, size_t elsize)
423n/a{
424n/a /* see PyMem_RawMalloc() */
425n/a if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
426n/a return NULL;
427n/a return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
428n/a}
429n/a
430n/avoid *
431n/aPyMem_Realloc(void *ptr, size_t new_size)
432n/a{
433n/a /* see PyMem_RawMalloc() */
434n/a if (new_size > (size_t)PY_SSIZE_T_MAX)
435n/a return NULL;
436n/a return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
437n/a}
438n/a
439n/avoid
440n/aPyMem_Free(void *ptr)
441n/a{
442n/a _PyMem.free(_PyMem.ctx, ptr);
443n/a}
444n/a
445n/achar *
446n/a_PyMem_RawStrdup(const char *str)
447n/a{
448n/a size_t size;
449n/a char *copy;
450n/a
451n/a size = strlen(str) + 1;
452n/a copy = PyMem_RawMalloc(size);
453n/a if (copy == NULL)
454n/a return NULL;
455n/a memcpy(copy, str, size);
456n/a return copy;
457n/a}
458n/a
459n/achar *
460n/a_PyMem_Strdup(const char *str)
461n/a{
462n/a size_t size;
463n/a char *copy;
464n/a
465n/a size = strlen(str) + 1;
466n/a copy = PyMem_Malloc(size);
467n/a if (copy == NULL)
468n/a return NULL;
469n/a memcpy(copy, str, size);
470n/a return copy;
471n/a}
472n/a
473n/avoid *
474n/aPyObject_Malloc(size_t size)
475n/a{
476n/a /* see PyMem_RawMalloc() */
477n/a if (size > (size_t)PY_SSIZE_T_MAX)
478n/a return NULL;
479n/a return _PyObject.malloc(_PyObject.ctx, size);
480n/a}
481n/a
482n/avoid *
483n/aPyObject_Calloc(size_t nelem, size_t elsize)
484n/a{
485n/a /* see PyMem_RawMalloc() */
486n/a if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
487n/a return NULL;
488n/a return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
489n/a}
490n/a
491n/avoid *
492n/aPyObject_Realloc(void *ptr, size_t new_size)
493n/a{
494n/a /* see PyMem_RawMalloc() */
495n/a if (new_size > (size_t)PY_SSIZE_T_MAX)
496n/a return NULL;
497n/a return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
498n/a}
499n/a
500n/avoid
501n/aPyObject_Free(void *ptr)
502n/a{
503n/a _PyObject.free(_PyObject.ctx, ptr);
504n/a}
505n/a
506n/a
507n/a#ifdef WITH_PYMALLOC
508n/a
509n/a#ifdef WITH_VALGRIND
510n/a#include <valgrind/valgrind.h>
511n/a
512n/a/* If we're using GCC, use __builtin_expect() to reduce overhead of
513n/a the valgrind checks */
514n/a#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
515n/a# define UNLIKELY(value) __builtin_expect((value), 0)
516n/a#else
517n/a# define UNLIKELY(value) (value)
518n/a#endif
519n/a
520n/a/* -1 indicates that we haven't checked that we're running on valgrind yet. */
521n/astatic int running_on_valgrind = -1;
522n/a#endif
523n/a
524n/a/* An object allocator for Python.
525n/a
526n/a Here is an introduction to the layers of the Python memory architecture,
527n/a showing where the object allocator is actually used (layer +2), It is
528n/a called for every object allocation and deallocation (PyObject_New/Del),
529n/a unless the object-specific allocators implement a proprietary allocation
530n/a scheme (ex.: ints use a simple free list). This is also the place where
531n/a the cyclic garbage collector operates selectively on container objects.
532n/a
533n/a
534n/a Object-specific allocators
535n/a _____ ______ ______ ________
536n/a [ int ] [ dict ] [ list ] ... [ string ] Python core |
537n/a+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
538n/a _______________________________ | |
539n/a [ Python's object allocator ] | |
540n/a+2 | ####### Object memory ####### | <------ Internal buffers ------> |
541n/a ______________________________________________________________ |
542n/a [ Python's raw memory allocator (PyMem_ API) ] |
543n/a+1 | <----- Python memory (under PyMem manager's control) ------> | |
544n/a __________________________________________________________________
545n/a [ Underlying general-purpose allocator (ex: C library malloc) ]
546n/a 0 | <------ Virtual memory allocated for the python process -------> |
547n/a
548n/a =========================================================================
549n/a _______________________________________________________________________
550n/a [ OS-specific Virtual Memory Manager (VMM) ]
551n/a-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
552n/a __________________________________ __________________________________
553n/a [ ] [ ]
554n/a-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
555n/a
556n/a*/
557n/a/*==========================================================================*/
558n/a
559n/a/* A fast, special-purpose memory allocator for small blocks, to be used
560n/a on top of a general-purpose malloc -- heavily based on previous art. */
561n/a
562n/a/* Vladimir Marangozov -- August 2000 */
563n/a
564n/a/*
565n/a * "Memory management is where the rubber meets the road -- if we do the wrong
566n/a * thing at any level, the results will not be good. And if we don't make the
567n/a * levels work well together, we are in serious trouble." (1)
568n/a *
569n/a * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
570n/a * "Dynamic Storage Allocation: A Survey and Critical Review",
571n/a * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
572n/a */
573n/a
574n/a/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
575n/a
576n/a/*==========================================================================*/
577n/a
578n/a/*
579n/a * Allocation strategy abstract:
580n/a *
581n/a * For small requests, the allocator sub-allocates <Big> blocks of memory.
582n/a * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
583n/a * system's allocator.
584n/a *
585n/a * Small requests are grouped in size classes spaced 8 bytes apart, due
586n/a * to the required valid alignment of the returned address. Requests of
587n/a * a particular size are serviced from memory pools of 4K (one VMM page).
588n/a * Pools are fragmented on demand and contain free lists of blocks of one
589n/a * particular size class. In other words, there is a fixed-size allocator
590n/a * for each size class. Free pools are shared by the different allocators
591n/a * thus minimizing the space reserved for a particular size class.
592n/a *
593n/a * This allocation strategy is a variant of what is known as "simple
594n/a * segregated storage based on array of free lists". The main drawback of
595n/a * simple segregated storage is that we might end up with lot of reserved
596n/a * memory for the different free lists, which degenerate in time. To avoid
597n/a * this, we partition each free list in pools and we share dynamically the
598n/a * reserved space between all free lists. This technique is quite efficient
599n/a * for memory intensive programs which allocate mainly small-sized blocks.
600n/a *
601n/a * For small requests we have the following table:
602n/a *
603n/a * Request in bytes Size of allocated block Size class idx
604n/a * ----------------------------------------------------------------
605n/a * 1-8 8 0
606n/a * 9-16 16 1
607n/a * 17-24 24 2
608n/a * 25-32 32 3
609n/a * 33-40 40 4
610n/a * 41-48 48 5
611n/a * 49-56 56 6
612n/a * 57-64 64 7
613n/a * 65-72 72 8
614n/a * ... ... ...
615n/a * 497-504 504 62
616n/a * 505-512 512 63
617n/a *
618n/a * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
619n/a * allocator.
620n/a */
621n/a
622n/a/*==========================================================================*/
623n/a
624n/a/*
625n/a * -- Main tunable settings section --
626n/a */
627n/a
628n/a/*
629n/a * Alignment of addresses returned to the user. 8-bytes alignment works
630n/a * on most current architectures (with 32-bit or 64-bit address busses).
631n/a * The alignment value is also used for grouping small requests in size
632n/a * classes spaced ALIGNMENT bytes apart.
633n/a *
634n/a * You shouldn't change this unless you know what you are doing.
635n/a */
636n/a#define ALIGNMENT 8 /* must be 2^N */
637n/a#define ALIGNMENT_SHIFT 3
638n/a
639n/a/* Return the number of bytes in size class I, as a uint. */
640n/a#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
641n/a
642n/a/*
643n/a * Max size threshold below which malloc requests are considered to be
644n/a * small enough in order to use preallocated memory pools. You can tune
645n/a * this value according to your application behaviour and memory needs.
646n/a *
647n/a * Note: a size threshold of 512 guarantees that newly created dictionaries
648n/a * will be allocated from preallocated memory pools on 64-bit.
649n/a *
650n/a * The following invariants must hold:
651n/a * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
652n/a * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
653n/a *
654n/a * Although not required, for better performance and space efficiency,
655n/a * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
656n/a */
657n/a#define SMALL_REQUEST_THRESHOLD 512
658n/a#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
659n/a
660n/a/*
661n/a * The system's VMM page size can be obtained on most unices with a
662n/a * getpagesize() call or deduced from various header files. To make
663n/a * things simpler, we assume that it is 4K, which is OK for most systems.
664n/a * It is probably better if this is the native page size, but it doesn't
665n/a * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
666n/a * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
667n/a * violation fault. 4K is apparently OK for all the platforms that python
668n/a * currently targets.
669n/a */
670n/a#define SYSTEM_PAGE_SIZE (4 * 1024)
671n/a#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
672n/a
673n/a/*
674n/a * Maximum amount of memory managed by the allocator for small requests.
675n/a */
676n/a#ifdef WITH_MEMORY_LIMITS
677n/a#ifndef SMALL_MEMORY_LIMIT
678n/a#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
679n/a#endif
680n/a#endif
681n/a
682n/a/*
683n/a * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
684n/a * on a page boundary. This is a reserved virtual address space for the
685n/a * current process (obtained through a malloc()/mmap() call). In no way this
686n/a * means that the memory arenas will be used entirely. A malloc(<Big>) is
687n/a * usually an address range reservation for <Big> bytes, unless all pages within
688n/a * this space are referenced subsequently. So malloc'ing big blocks and not
689n/a * using them does not mean "wasting memory". It's an addressable range
690n/a * wastage...
691n/a *
692n/a * Arenas are allocated with mmap() on systems supporting anonymous memory
693n/a * mappings to reduce heap fragmentation.
694n/a */
695n/a#define ARENA_SIZE (256 << 10) /* 256KB */
696n/a
697n/a#ifdef WITH_MEMORY_LIMITS
698n/a#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
699n/a#endif
700n/a
701n/a/*
702n/a * Size of the pools used for small blocks. Should be a power of 2,
703n/a * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
704n/a */
705n/a#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
706n/a#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
707n/a
708n/a/*
709n/a * -- End of tunable settings section --
710n/a */
711n/a
712n/a/*==========================================================================*/
713n/a
714n/a/*
715n/a * Locking
716n/a *
717n/a * To reduce lock contention, it would probably be better to refine the
718n/a * crude function locking with per size class locking. I'm not positive
719n/a * however, whether it's worth switching to such locking policy because
720n/a * of the performance penalty it might introduce.
721n/a *
722n/a * The following macros describe the simplest (should also be the fastest)
723n/a * lock object on a particular platform and the init/fini/lock/unlock
724n/a * operations on it. The locks defined here are not expected to be recursive
725n/a * because it is assumed that they will always be called in the order:
726n/a * INIT, [LOCK, UNLOCK]*, FINI.
727n/a */
728n/a
729n/a/*
730n/a * Python's threads are serialized, so object malloc locking is disabled.
731n/a */
732n/a#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
733n/a#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
734n/a#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
735n/a#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
736n/a#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
737n/a
738n/a/* When you say memory, my mind reasons in terms of (pointers to) blocks */
739n/atypedef uint8_t block;
740n/a
741n/a/* Pool for small blocks. */
742n/astruct pool_header {
743n/a union { block *_padding;
744n/a uint count; } ref; /* number of allocated blocks */
745n/a block *freeblock; /* pool's free list head */
746n/a struct pool_header *nextpool; /* next pool of this size class */
747n/a struct pool_header *prevpool; /* previous pool "" */
748n/a uint arenaindex; /* index into arenas of base adr */
749n/a uint szidx; /* block size class index */
750n/a uint nextoffset; /* bytes to virgin block */
751n/a uint maxnextoffset; /* largest valid nextoffset */
752n/a};
753n/a
754n/atypedef struct pool_header *poolp;
755n/a
756n/a/* Record keeping for arenas. */
757n/astruct arena_object {
758n/a /* The address of the arena, as returned by malloc. Note that 0
759n/a * will never be returned by a successful malloc, and is used
760n/a * here to mark an arena_object that doesn't correspond to an
761n/a * allocated arena.
762n/a */
763n/a uintptr_t address;
764n/a
765n/a /* Pool-aligned pointer to the next pool to be carved off. */
766n/a block* pool_address;
767n/a
768n/a /* The number of available pools in the arena: free pools + never-
769n/a * allocated pools.
770n/a */
771n/a uint nfreepools;
772n/a
773n/a /* The total number of pools in the arena, whether or not available. */
774n/a uint ntotalpools;
775n/a
776n/a /* Singly-linked list of available pools. */
777n/a struct pool_header* freepools;
778n/a
779n/a /* Whenever this arena_object is not associated with an allocated
780n/a * arena, the nextarena member is used to link all unassociated
781n/a * arena_objects in the singly-linked `unused_arena_objects` list.
782n/a * The prevarena member is unused in this case.
783n/a *
784n/a * When this arena_object is associated with an allocated arena
785n/a * with at least one available pool, both members are used in the
786n/a * doubly-linked `usable_arenas` list, which is maintained in
787n/a * increasing order of `nfreepools` values.
788n/a *
789n/a * Else this arena_object is associated with an allocated arena
790n/a * all of whose pools are in use. `nextarena` and `prevarena`
791n/a * are both meaningless in this case.
792n/a */
793n/a struct arena_object* nextarena;
794n/a struct arena_object* prevarena;
795n/a};
796n/a
797n/a#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
798n/a
799n/a#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
800n/a
801n/a/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
802n/a#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
803n/a
804n/a/* Return total number of blocks in pool of size index I, as a uint. */
805n/a#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
806n/a
807n/a/*==========================================================================*/
808n/a
809n/a/*
810n/a * This malloc lock
811n/a */
812n/aSIMPLELOCK_DECL(_malloc_lock)
813n/a#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
814n/a#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
815n/a#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
816n/a#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
817n/a
818n/a/*
819n/a * Pool table -- headed, circular, doubly-linked lists of partially used pools.
820n/a
821n/aThis is involved. For an index i, usedpools[i+i] is the header for a list of
822n/aall partially used pools holding small blocks with "size class idx" i. So
823n/ausedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
824n/a16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
825n/a
826n/aPools are carved off an arena's highwater mark (an arena_object's pool_address
827n/amember) as needed. Once carved off, a pool is in one of three states forever
828n/aafter:
829n/a
830n/aused == partially used, neither empty nor full
831n/a At least one block in the pool is currently allocated, and at least one
832n/a block in the pool is not currently allocated (note this implies a pool
833n/a has room for at least two blocks).
834n/a This is a pool's initial state, as a pool is created only when malloc
835n/a needs space.
836n/a The pool holds blocks of a fixed size, and is in the circular list headed
837n/a at usedpools[i] (see above). It's linked to the other used pools of the
838n/a same size class via the pool_header's nextpool and prevpool members.
839n/a If all but one block is currently allocated, a malloc can cause a
840n/a transition to the full state. If all but one block is not currently
841n/a allocated, a free can cause a transition to the empty state.
842n/a
843n/afull == all the pool's blocks are currently allocated
844n/a On transition to full, a pool is unlinked from its usedpools[] list.
845n/a It's not linked to from anything then anymore, and its nextpool and
846n/a prevpool members are meaningless until it transitions back to used.
847n/a A free of a block in a full pool puts the pool back in the used state.
848n/a Then it's linked in at the front of the appropriate usedpools[] list, so
849n/a that the next allocation for its size class will reuse the freed block.
850n/a
851n/aempty == all the pool's blocks are currently available for allocation
852n/a On transition to empty, a pool is unlinked from its usedpools[] list,
853n/a and linked to the front of its arena_object's singly-linked freepools list,
854n/a via its nextpool member. The prevpool member has no meaning in this case.
855n/a Empty pools have no inherent size class: the next time a malloc finds
856n/a an empty list in usedpools[], it takes the first pool off of freepools.
857n/a If the size class needed happens to be the same as the size class the pool
858n/a last had, some pool initialization can be skipped.
859n/a
860n/a
861n/aBlock Management
862n/a
863n/aBlocks within pools are again carved out as needed. pool->freeblock points to
864n/athe start of a singly-linked list of free blocks within the pool. When a
865n/ablock is freed, it's inserted at the front of its pool's freeblock list. Note
866n/athat the available blocks in a pool are *not* linked all together when a pool
867n/ais initialized. Instead only "the first two" (lowest addresses) blocks are
868n/aset up, returning the first such block, and setting pool->freeblock to a
869n/aone-block list holding the second such block. This is consistent with that
870n/apymalloc strives at all levels (arena, pool, and block) never to touch a piece
871n/aof memory until it's actually needed.
872n/a
873n/aSo long as a pool is in the used state, we're certain there *is* a block
874n/aavailable for allocating, and pool->freeblock is not NULL. If pool->freeblock
875n/apoints to the end of the free list before we've carved the entire pool into
876n/ablocks, that means we simply haven't yet gotten to one of the higher-address
877n/ablocks. The offset from the pool_header to the start of "the next" virgin
878n/ablock is stored in the pool_header nextoffset member, and the largest value
879n/aof nextoffset that makes sense is stored in the maxnextoffset member when a
880n/apool is initialized. All the blocks in a pool have been passed out at least
881n/aonce when and only when nextoffset > maxnextoffset.
882n/a
883n/a
884n/aMajor obscurity: While the usedpools vector is declared to have poolp
885n/aentries, it doesn't really. It really contains two pointers per (conceptual)
886n/apoolp entry, the nextpool and prevpool members of a pool_header. The
887n/aexcruciating initialization code below fools C so that
888n/a
889n/a usedpool[i+i]
890n/a
891n/a"acts like" a genuine poolp, but only so long as you only reference its
892n/anextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
893n/acompensating for that a pool_header's nextpool and prevpool members
894n/aimmediately follow a pool_header's first two members:
895n/a
896n/a union { block *_padding;
897n/a uint count; } ref;
898n/a block *freeblock;
899n/a
900n/aeach of which consume sizeof(block *) bytes. So what usedpools[i+i] really
901n/acontains is a fudged-up pointer p such that *if* C believes it's a poolp
902n/apointer, then p->nextpool and p->prevpool are both p (meaning that the headed
903n/acircular list is empty).
904n/a
905n/aIt's unclear why the usedpools setup is so convoluted. It could be to
906n/aminimize the amount of cache required to hold this heavily-referenced table
907n/a(which only *needs* the two interpool pointer members of a pool_header). OTOH,
908n/areferencing code has to remember to "double the index" and doing so isn't
909n/afree, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
910n/aon that C doesn't insert any padding anywhere in a pool_header at or before
911n/athe prevpool member.
912n/a**************************************************************************** */
913n/a
914n/a#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
915n/a#define PT(x) PTA(x), PTA(x)
916n/a
917n/astatic poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
918n/a PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
919n/a#if NB_SMALL_SIZE_CLASSES > 8
920n/a , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
921n/a#if NB_SMALL_SIZE_CLASSES > 16
922n/a , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
923n/a#if NB_SMALL_SIZE_CLASSES > 24
924n/a , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
925n/a#if NB_SMALL_SIZE_CLASSES > 32
926n/a , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
927n/a#if NB_SMALL_SIZE_CLASSES > 40
928n/a , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
929n/a#if NB_SMALL_SIZE_CLASSES > 48
930n/a , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
931n/a#if NB_SMALL_SIZE_CLASSES > 56
932n/a , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
933n/a#if NB_SMALL_SIZE_CLASSES > 64
934n/a#error "NB_SMALL_SIZE_CLASSES should be less than 64"
935n/a#endif /* NB_SMALL_SIZE_CLASSES > 64 */
936n/a#endif /* NB_SMALL_SIZE_CLASSES > 56 */
937n/a#endif /* NB_SMALL_SIZE_CLASSES > 48 */
938n/a#endif /* NB_SMALL_SIZE_CLASSES > 40 */
939n/a#endif /* NB_SMALL_SIZE_CLASSES > 32 */
940n/a#endif /* NB_SMALL_SIZE_CLASSES > 24 */
941n/a#endif /* NB_SMALL_SIZE_CLASSES > 16 */
942n/a#endif /* NB_SMALL_SIZE_CLASSES > 8 */
943n/a};
944n/a
945n/a/*==========================================================================
946n/aArena management.
947n/a
948n/a`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
949n/awhich may not be currently used (== they're arena_objects that aren't
950n/acurrently associated with an allocated arena). Note that arenas proper are
951n/aseparately malloc'ed.
952n/a
953n/aPrior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
954n/awe do try to free() arenas, and use some mild heuristic strategies to increase
955n/athe likelihood that arenas eventually can be freed.
956n/a
957n/aunused_arena_objects
958n/a
959n/a This is a singly-linked list of the arena_objects that are currently not
960n/a being used (no arena is associated with them). Objects are taken off the
961n/a head of the list in new_arena(), and are pushed on the head of the list in
962n/a PyObject_Free() when the arena is empty. Key invariant: an arena_object
963n/a is on this list if and only if its .address member is 0.
964n/a
965n/ausable_arenas
966n/a
967n/a This is a doubly-linked list of the arena_objects associated with arenas
968n/a that have pools available. These pools are either waiting to be reused,
969n/a or have not been used before. The list is sorted to have the most-
970n/a allocated arenas first (ascending order based on the nfreepools member).
971n/a This means that the next allocation will come from a heavily used arena,
972n/a which gives the nearly empty arenas a chance to be returned to the system.
973n/a In my unscientific tests this dramatically improved the number of arenas
974n/a that could be freed.
975n/a
976n/aNote that an arena_object associated with an arena all of whose pools are
977n/acurrently in use isn't on either list.
978n/a*/
979n/a
980n/a/* Array of objects used to track chunks of memory (arenas). */
981n/astatic struct arena_object* arenas = NULL;
982n/a/* Number of slots currently allocated in the `arenas` vector. */
983n/astatic uint maxarenas = 0;
984n/a
985n/a/* The head of the singly-linked, NULL-terminated list of available
986n/a * arena_objects.
987n/a */
988n/astatic struct arena_object* unused_arena_objects = NULL;
989n/a
990n/a/* The head of the doubly-linked, NULL-terminated at each end, list of
991n/a * arena_objects associated with arenas that have pools available.
992n/a */
993n/astatic struct arena_object* usable_arenas = NULL;
994n/a
995n/a/* How many arena_objects do we initially allocate?
996n/a * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
997n/a * `arenas` vector.
998n/a */
999n/a#define INITIAL_ARENA_OBJECTS 16
1000n/a
1001n/a/* Number of arenas allocated that haven't been free()'d. */
1002n/astatic size_t narenas_currently_allocated = 0;
1003n/a
1004n/a/* Total number of times malloc() called to allocate an arena. */
1005n/astatic size_t ntimes_arena_allocated = 0;
1006n/a/* High water mark (max value ever seen) for narenas_currently_allocated. */
1007n/astatic size_t narenas_highwater = 0;
1008n/a
1009n/astatic Py_ssize_t _Py_AllocatedBlocks = 0;
1010n/a
1011n/aPy_ssize_t
1012n/a_Py_GetAllocatedBlocks(void)
1013n/a{
1014n/a return _Py_AllocatedBlocks;
1015n/a}
1016n/a
1017n/a
1018n/a/* Allocate a new arena. If we run out of memory, return NULL. Else
1019n/a * allocate a new arena, and return the address of an arena_object
1020n/a * describing the new arena. It's expected that the caller will set
1021n/a * `usable_arenas` to the return value.
1022n/a */
1023n/astatic struct arena_object*
1024n/anew_arena(void)
1025n/a{
1026n/a struct arena_object* arenaobj;
1027n/a uint excess; /* number of bytes above pool alignment */
1028n/a void *address;
1029n/a static int debug_stats = -1;
1030n/a
1031n/a if (debug_stats == -1) {
1032n/a char *opt = Py_GETENV("PYTHONMALLOCSTATS");
1033n/a debug_stats = (opt != NULL && *opt != '\0');
1034n/a }
1035n/a if (debug_stats)
1036n/a _PyObject_DebugMallocStats(stderr);
1037n/a
1038n/a if (unused_arena_objects == NULL) {
1039n/a uint i;
1040n/a uint numarenas;
1041n/a size_t nbytes;
1042n/a
1043n/a /* Double the number of arena objects on each allocation.
1044n/a * Note that it's possible for `numarenas` to overflow.
1045n/a */
1046n/a numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1047n/a if (numarenas <= maxarenas)
1048n/a return NULL; /* overflow */
1049n/a#if SIZEOF_SIZE_T <= SIZEOF_INT
1050n/a if (numarenas > SIZE_MAX / sizeof(*arenas))
1051n/a return NULL; /* overflow */
1052n/a#endif
1053n/a nbytes = numarenas * sizeof(*arenas);
1054n/a arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
1055n/a if (arenaobj == NULL)
1056n/a return NULL;
1057n/a arenas = arenaobj;
1058n/a
1059n/a /* We might need to fix pointers that were copied. However,
1060n/a * new_arena only gets called when all the pages in the
1061n/a * previous arenas are full. Thus, there are *no* pointers
1062n/a * into the old array. Thus, we don't have to worry about
1063n/a * invalid pointers. Just to be sure, some asserts:
1064n/a */
1065n/a assert(usable_arenas == NULL);
1066n/a assert(unused_arena_objects == NULL);
1067n/a
1068n/a /* Put the new arenas on the unused_arena_objects list. */
1069n/a for (i = maxarenas; i < numarenas; ++i) {
1070n/a arenas[i].address = 0; /* mark as unassociated */
1071n/a arenas[i].nextarena = i < numarenas - 1 ?
1072n/a &arenas[i+1] : NULL;
1073n/a }
1074n/a
1075n/a /* Update globals. */
1076n/a unused_arena_objects = &arenas[maxarenas];
1077n/a maxarenas = numarenas;
1078n/a }
1079n/a
1080n/a /* Take the next available arena object off the head of the list. */
1081n/a assert(unused_arena_objects != NULL);
1082n/a arenaobj = unused_arena_objects;
1083n/a unused_arena_objects = arenaobj->nextarena;
1084n/a assert(arenaobj->address == 0);
1085n/a address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
1086n/a if (address == NULL) {
1087n/a /* The allocation failed: return NULL after putting the
1088n/a * arenaobj back.
1089n/a */
1090n/a arenaobj->nextarena = unused_arena_objects;
1091n/a unused_arena_objects = arenaobj;
1092n/a return NULL;
1093n/a }
1094n/a arenaobj->address = (uintptr_t)address;
1095n/a
1096n/a ++narenas_currently_allocated;
1097n/a ++ntimes_arena_allocated;
1098n/a if (narenas_currently_allocated > narenas_highwater)
1099n/a narenas_highwater = narenas_currently_allocated;
1100n/a arenaobj->freepools = NULL;
1101n/a /* pool_address <- first pool-aligned address in the arena
1102n/a nfreepools <- number of whole pools that fit after alignment */
1103n/a arenaobj->pool_address = (block*)arenaobj->address;
1104n/a arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
1105n/a assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
1106n/a excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1107n/a if (excess != 0) {
1108n/a --arenaobj->nfreepools;
1109n/a arenaobj->pool_address += POOL_SIZE - excess;
1110n/a }
1111n/a arenaobj->ntotalpools = arenaobj->nfreepools;
1112n/a
1113n/a return arenaobj;
1114n/a}
1115n/a
1116n/a/*
1117n/aaddress_in_range(P, POOL)
1118n/a
1119n/aReturn true if and only if P is an address that was allocated by pymalloc.
1120n/aPOOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1121n/a(the caller is asked to compute this because the macro expands POOL more than
1122n/aonce, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
1123n/avariable and pass the latter to the macro; because address_in_range is
1124n/acalled on every alloc/realloc/free, micro-efficiency is important here).
1125n/a
1126n/aTricky: Let B be the arena base address associated with the pool, B =
1127n/aarenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1128n/a
1129n/a B <= P < B + ARENA_SIZE
1130n/a
1131n/aSubtracting B throughout, this is true iff
1132n/a
1133n/a 0 <= P-B < ARENA_SIZE
1134n/a
1135n/aBy using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1136n/a
1137n/aObscure: A PyMem "free memory" function can call the pymalloc free or realloc
1138n/abefore the first arena has been allocated. `arenas` is still NULL in that
1139n/acase. We're relying on that maxarenas is also 0 in that case, so that
1140n/a(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1141n/ainto a NULL arenas.
1142n/a
1143n/aDetails: given P and POOL, the arena_object corresponding to P is AO =
1144n/aarenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1145n/astores, etc), POOL is the correct address of P's pool, AO.address is the
1146n/acorrect base address of the pool's arena, and P must be within ARENA_SIZE of
1147n/aAO.address. In addition, AO.address is not 0 (no arena can start at address 0
1148n/a(NULL)). Therefore address_in_range correctly reports that obmalloc
1149n/acontrols P.
1150n/a
1151n/aNow suppose obmalloc does not control P (e.g., P was obtained via a direct
1152n/acall to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1153n/ain this case -- it may even be uninitialized trash. If the trash arenaindex
1154n/ais >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1155n/acontrol P.
1156n/a
1157n/aElse arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1158n/aallocated arena, obmalloc controls all the memory in slice AO.address :
1159n/aAO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1160n/aso P doesn't lie in that slice, so the macro correctly reports that P is not
1161n/acontrolled by obmalloc.
1162n/a
1163n/aFinally, if P is not controlled by obmalloc and AO corresponds to an unused
1164n/aarena_object (one not currently associated with an allocated arena),
1165n/aAO.address is 0, and the second test in the macro reduces to:
1166n/a
1167n/a P < ARENA_SIZE
1168n/a
1169n/aIf P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1170n/athat P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1171n/aof the test still passes, and the third clause (AO.address != 0) is necessary
1172n/ato get the correct result: AO.address is 0 in this case, so the macro
1173n/acorrectly reports that P is not controlled by obmalloc (despite that P lies in
1174n/aslice AO.address : AO.address + ARENA_SIZE).
1175n/a
1176n/aNote: The third (AO.address != 0) clause was added in Python 2.5. Before
1177n/a2.5, arenas were never free()'ed, and an arenaindex < maxarena always
1178n/acorresponded to a currently-allocated arena, so the "P is not controlled by
1179n/aobmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1180n/awas impossible.
1181n/a
1182n/aNote that the logic is excruciating, and reading up possibly uninitialized
1183n/amemory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1184n/acreates problems for some memory debuggers. The overwhelming advantage is
1185n/athat this test determines whether an arbitrary address is controlled by
1186n/aobmalloc in a small constant time, independent of the number of arenas
1187n/aobmalloc controls. Since this test is needed at every entry point, it's
1188n/aextremely desirable that it be this fast.
1189n/a*/
1190n/a
1191n/astatic bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
1192n/aaddress_in_range(void *p, poolp pool)
1193n/a{
1194n/a // Since address_in_range may be reading from memory which was not allocated
1195n/a // by Python, it is important that pool->arenaindex is read only once, as
1196n/a // another thread may be concurrently modifying the value without holding
1197n/a // the GIL. The following dance forces the compiler to read pool->arenaindex
1198n/a // only once.
1199n/a uint arenaindex = *((volatile uint *)&pool->arenaindex);
1200n/a return arenaindex < maxarenas &&
1201n/a (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1202n/a arenas[arenaindex].address != 0;
1203n/a}
1204n/a
1205n/a/*==========================================================================*/
1206n/a
1207n/a/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
1208n/a * from all other currently live pointers. This may not be possible.
1209n/a */
1210n/a
1211n/a/*
1212n/a * The basic blocks are ordered by decreasing execution frequency,
1213n/a * which minimizes the number of jumps in the most common cases,
1214n/a * improves branching prediction and instruction scheduling (small
1215n/a * block allocations typically result in a couple of instructions).
1216n/a * Unless the optimizer reorders everything, being too smart...
1217n/a */
1218n/a
1219n/astatic void *
1220n/a_PyObject_Alloc(int use_calloc, void *ctx, size_t nelem, size_t elsize)
1221n/a{
1222n/a size_t nbytes;
1223n/a block *bp;
1224n/a poolp pool;
1225n/a poolp next;
1226n/a uint size;
1227n/a
1228n/a _Py_AllocatedBlocks++;
1229n/a
1230n/a assert(nelem <= PY_SSIZE_T_MAX / elsize);
1231n/a nbytes = nelem * elsize;
1232n/a
1233n/a#ifdef WITH_VALGRIND
1234n/a if (UNLIKELY(running_on_valgrind == -1))
1235n/a running_on_valgrind = RUNNING_ON_VALGRIND;
1236n/a if (UNLIKELY(running_on_valgrind))
1237n/a goto redirect;
1238n/a#endif
1239n/a
1240n/a if (nelem == 0 || elsize == 0)
1241n/a goto redirect;
1242n/a
1243n/a if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
1244n/a LOCK();
1245n/a /*
1246n/a * Most frequent paths first
1247n/a */
1248n/a size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
1249n/a pool = usedpools[size + size];
1250n/a if (pool != pool->nextpool) {
1251n/a /*
1252n/a * There is a used pool for this size class.
1253n/a * Pick up the head block of its free list.
1254n/a */
1255n/a ++pool->ref.count;
1256n/a bp = pool->freeblock;
1257n/a assert(bp != NULL);
1258n/a if ((pool->freeblock = *(block **)bp) != NULL) {
1259n/a UNLOCK();
1260n/a if (use_calloc)
1261n/a memset(bp, 0, nbytes);
1262n/a return (void *)bp;
1263n/a }
1264n/a /*
1265n/a * Reached the end of the free list, try to extend it.
1266n/a */
1267n/a if (pool->nextoffset <= pool->maxnextoffset) {
1268n/a /* There is room for another block. */
1269n/a pool->freeblock = (block*)pool +
1270n/a pool->nextoffset;
1271n/a pool->nextoffset += INDEX2SIZE(size);
1272n/a *(block **)(pool->freeblock) = NULL;
1273n/a UNLOCK();
1274n/a if (use_calloc)
1275n/a memset(bp, 0, nbytes);
1276n/a return (void *)bp;
1277n/a }
1278n/a /* Pool is full, unlink from used pools. */
1279n/a next = pool->nextpool;
1280n/a pool = pool->prevpool;
1281n/a next->prevpool = pool;
1282n/a pool->nextpool = next;
1283n/a UNLOCK();
1284n/a if (use_calloc)
1285n/a memset(bp, 0, nbytes);
1286n/a return (void *)bp;
1287n/a }
1288n/a
1289n/a /* There isn't a pool of the right size class immediately
1290n/a * available: use a free pool.
1291n/a */
1292n/a if (usable_arenas == NULL) {
1293n/a /* No arena has a free pool: allocate a new arena. */
1294n/a#ifdef WITH_MEMORY_LIMITS
1295n/a if (narenas_currently_allocated >= MAX_ARENAS) {
1296n/a UNLOCK();
1297n/a goto redirect;
1298n/a }
1299n/a#endif
1300n/a usable_arenas = new_arena();
1301n/a if (usable_arenas == NULL) {
1302n/a UNLOCK();
1303n/a goto redirect;
1304n/a }
1305n/a usable_arenas->nextarena =
1306n/a usable_arenas->prevarena = NULL;
1307n/a }
1308n/a assert(usable_arenas->address != 0);
1309n/a
1310n/a /* Try to get a cached free pool. */
1311n/a pool = usable_arenas->freepools;
1312n/a if (pool != NULL) {
1313n/a /* Unlink from cached pools. */
1314n/a usable_arenas->freepools = pool->nextpool;
1315n/a
1316n/a /* This arena already had the smallest nfreepools
1317n/a * value, so decreasing nfreepools doesn't change
1318n/a * that, and we don't need to rearrange the
1319n/a * usable_arenas list. However, if the arena has
1320n/a * become wholly allocated, we need to remove its
1321n/a * arena_object from usable_arenas.
1322n/a */
1323n/a --usable_arenas->nfreepools;
1324n/a if (usable_arenas->nfreepools == 0) {
1325n/a /* Wholly allocated: remove. */
1326n/a assert(usable_arenas->freepools == NULL);
1327n/a assert(usable_arenas->nextarena == NULL ||
1328n/a usable_arenas->nextarena->prevarena ==
1329n/a usable_arenas);
1330n/a
1331n/a usable_arenas = usable_arenas->nextarena;
1332n/a if (usable_arenas != NULL) {
1333n/a usable_arenas->prevarena = NULL;
1334n/a assert(usable_arenas->address != 0);
1335n/a }
1336n/a }
1337n/a else {
1338n/a /* nfreepools > 0: it must be that freepools
1339n/a * isn't NULL, or that we haven't yet carved
1340n/a * off all the arena's pools for the first
1341n/a * time.
1342n/a */
1343n/a assert(usable_arenas->freepools != NULL ||
1344n/a usable_arenas->pool_address <=
1345n/a (block*)usable_arenas->address +
1346n/a ARENA_SIZE - POOL_SIZE);
1347n/a }
1348n/a init_pool:
1349n/a /* Frontlink to used pools. */
1350n/a next = usedpools[size + size]; /* == prev */
1351n/a pool->nextpool = next;
1352n/a pool->prevpool = next;
1353n/a next->nextpool = pool;
1354n/a next->prevpool = pool;
1355n/a pool->ref.count = 1;
1356n/a if (pool->szidx == size) {
1357n/a /* Luckily, this pool last contained blocks
1358n/a * of the same size class, so its header
1359n/a * and free list are already initialized.
1360n/a */
1361n/a bp = pool->freeblock;
1362n/a assert(bp != NULL);
1363n/a pool->freeblock = *(block **)bp;
1364n/a UNLOCK();
1365n/a if (use_calloc)
1366n/a memset(bp, 0, nbytes);
1367n/a return (void *)bp;
1368n/a }
1369n/a /*
1370n/a * Initialize the pool header, set up the free list to
1371n/a * contain just the second block, and return the first
1372n/a * block.
1373n/a */
1374n/a pool->szidx = size;
1375n/a size = INDEX2SIZE(size);
1376n/a bp = (block *)pool + POOL_OVERHEAD;
1377n/a pool->nextoffset = POOL_OVERHEAD + (size << 1);
1378n/a pool->maxnextoffset = POOL_SIZE - size;
1379n/a pool->freeblock = bp + size;
1380n/a *(block **)(pool->freeblock) = NULL;
1381n/a UNLOCK();
1382n/a if (use_calloc)
1383n/a memset(bp, 0, nbytes);
1384n/a return (void *)bp;
1385n/a }
1386n/a
1387n/a /* Carve off a new pool. */
1388n/a assert(usable_arenas->nfreepools > 0);
1389n/a assert(usable_arenas->freepools == NULL);
1390n/a pool = (poolp)usable_arenas->pool_address;
1391n/a assert((block*)pool <= (block*)usable_arenas->address +
1392n/a ARENA_SIZE - POOL_SIZE);
1393n/a pool->arenaindex = (uint)(usable_arenas - arenas);
1394n/a assert(&arenas[pool->arenaindex] == usable_arenas);
1395n/a pool->szidx = DUMMY_SIZE_IDX;
1396n/a usable_arenas->pool_address += POOL_SIZE;
1397n/a --usable_arenas->nfreepools;
1398n/a
1399n/a if (usable_arenas->nfreepools == 0) {
1400n/a assert(usable_arenas->nextarena == NULL ||
1401n/a usable_arenas->nextarena->prevarena ==
1402n/a usable_arenas);
1403n/a /* Unlink the arena: it is completely allocated. */
1404n/a usable_arenas = usable_arenas->nextarena;
1405n/a if (usable_arenas != NULL) {
1406n/a usable_arenas->prevarena = NULL;
1407n/a assert(usable_arenas->address != 0);
1408n/a }
1409n/a }
1410n/a
1411n/a goto init_pool;
1412n/a }
1413n/a
1414n/a /* The small block allocator ends here. */
1415n/a
1416n/aredirect:
1417n/a /* Redirect the original request to the underlying (libc) allocator.
1418n/a * We jump here on bigger requests, on error in the code above (as a
1419n/a * last chance to serve the request) or when the max memory limit
1420n/a * has been reached.
1421n/a */
1422n/a {
1423n/a void *result;
1424n/a if (use_calloc)
1425n/a result = PyMem_RawCalloc(nelem, elsize);
1426n/a else
1427n/a result = PyMem_RawMalloc(nbytes);
1428n/a if (!result)
1429n/a _Py_AllocatedBlocks--;
1430n/a return result;
1431n/a }
1432n/a}
1433n/a
1434n/astatic void *
1435n/a_PyObject_Malloc(void *ctx, size_t nbytes)
1436n/a{
1437n/a return _PyObject_Alloc(0, ctx, 1, nbytes);
1438n/a}
1439n/a
1440n/astatic void *
1441n/a_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1442n/a{
1443n/a return _PyObject_Alloc(1, ctx, nelem, elsize);
1444n/a}
1445n/a
1446n/a/* free */
1447n/a
1448n/astatic void
1449n/a_PyObject_Free(void *ctx, void *p)
1450n/a{
1451n/a poolp pool;
1452n/a block *lastfree;
1453n/a poolp next, prev;
1454n/a uint size;
1455n/a
1456n/a if (p == NULL) /* free(NULL) has no effect */
1457n/a return;
1458n/a
1459n/a _Py_AllocatedBlocks--;
1460n/a
1461n/a#ifdef WITH_VALGRIND
1462n/a if (UNLIKELY(running_on_valgrind > 0))
1463n/a goto redirect;
1464n/a#endif
1465n/a
1466n/a pool = POOL_ADDR(p);
1467n/a if (address_in_range(p, pool)) {
1468n/a /* We allocated this address. */
1469n/a LOCK();
1470n/a /* Link p to the start of the pool's freeblock list. Since
1471n/a * the pool had at least the p block outstanding, the pool
1472n/a * wasn't empty (so it's already in a usedpools[] list, or
1473n/a * was full and is in no list -- it's not in the freeblocks
1474n/a * list in any case).
1475n/a */
1476n/a assert(pool->ref.count > 0); /* else it was empty */
1477n/a *(block **)p = lastfree = pool->freeblock;
1478n/a pool->freeblock = (block *)p;
1479n/a if (lastfree) {
1480n/a struct arena_object* ao;
1481n/a uint nf; /* ao->nfreepools */
1482n/a
1483n/a /* freeblock wasn't NULL, so the pool wasn't full,
1484n/a * and the pool is in a usedpools[] list.
1485n/a */
1486n/a if (--pool->ref.count != 0) {
1487n/a /* pool isn't empty: leave it in usedpools */
1488n/a UNLOCK();
1489n/a return;
1490n/a }
1491n/a /* Pool is now empty: unlink from usedpools, and
1492n/a * link to the front of freepools. This ensures that
1493n/a * previously freed pools will be allocated later
1494n/a * (being not referenced, they are perhaps paged out).
1495n/a */
1496n/a next = pool->nextpool;
1497n/a prev = pool->prevpool;
1498n/a next->prevpool = prev;
1499n/a prev->nextpool = next;
1500n/a
1501n/a /* Link the pool to freepools. This is a singly-linked
1502n/a * list, and pool->prevpool isn't used there.
1503n/a */
1504n/a ao = &arenas[pool->arenaindex];
1505n/a pool->nextpool = ao->freepools;
1506n/a ao->freepools = pool;
1507n/a nf = ++ao->nfreepools;
1508n/a
1509n/a /* All the rest is arena management. We just freed
1510n/a * a pool, and there are 4 cases for arena mgmt:
1511n/a * 1. If all the pools are free, return the arena to
1512n/a * the system free().
1513n/a * 2. If this is the only free pool in the arena,
1514n/a * add the arena back to the `usable_arenas` list.
1515n/a * 3. If the "next" arena has a smaller count of free
1516n/a * pools, we have to "slide this arena right" to
1517n/a * restore that usable_arenas is sorted in order of
1518n/a * nfreepools.
1519n/a * 4. Else there's nothing more to do.
1520n/a */
1521n/a if (nf == ao->ntotalpools) {
1522n/a /* Case 1. First unlink ao from usable_arenas.
1523n/a */
1524n/a assert(ao->prevarena == NULL ||
1525n/a ao->prevarena->address != 0);
1526n/a assert(ao ->nextarena == NULL ||
1527n/a ao->nextarena->address != 0);
1528n/a
1529n/a /* Fix the pointer in the prevarena, or the
1530n/a * usable_arenas pointer.
1531n/a */
1532n/a if (ao->prevarena == NULL) {
1533n/a usable_arenas = ao->nextarena;
1534n/a assert(usable_arenas == NULL ||
1535n/a usable_arenas->address != 0);
1536n/a }
1537n/a else {
1538n/a assert(ao->prevarena->nextarena == ao);
1539n/a ao->prevarena->nextarena =
1540n/a ao->nextarena;
1541n/a }
1542n/a /* Fix the pointer in the nextarena. */
1543n/a if (ao->nextarena != NULL) {
1544n/a assert(ao->nextarena->prevarena == ao);
1545n/a ao->nextarena->prevarena =
1546n/a ao->prevarena;
1547n/a }
1548n/a /* Record that this arena_object slot is
1549n/a * available to be reused.
1550n/a */
1551n/a ao->nextarena = unused_arena_objects;
1552n/a unused_arena_objects = ao;
1553n/a
1554n/a /* Free the entire arena. */
1555n/a _PyObject_Arena.free(_PyObject_Arena.ctx,
1556n/a (void *)ao->address, ARENA_SIZE);
1557n/a ao->address = 0; /* mark unassociated */
1558n/a --narenas_currently_allocated;
1559n/a
1560n/a UNLOCK();
1561n/a return;
1562n/a }
1563n/a if (nf == 1) {
1564n/a /* Case 2. Put ao at the head of
1565n/a * usable_arenas. Note that because
1566n/a * ao->nfreepools was 0 before, ao isn't
1567n/a * currently on the usable_arenas list.
1568n/a */
1569n/a ao->nextarena = usable_arenas;
1570n/a ao->prevarena = NULL;
1571n/a if (usable_arenas)
1572n/a usable_arenas->prevarena = ao;
1573n/a usable_arenas = ao;
1574n/a assert(usable_arenas->address != 0);
1575n/a
1576n/a UNLOCK();
1577n/a return;
1578n/a }
1579n/a /* If this arena is now out of order, we need to keep
1580n/a * the list sorted. The list is kept sorted so that
1581n/a * the "most full" arenas are used first, which allows
1582n/a * the nearly empty arenas to be completely freed. In
1583n/a * a few un-scientific tests, it seems like this
1584n/a * approach allowed a lot more memory to be freed.
1585n/a */
1586n/a if (ao->nextarena == NULL ||
1587n/a nf <= ao->nextarena->nfreepools) {
1588n/a /* Case 4. Nothing to do. */
1589n/a UNLOCK();
1590n/a return;
1591n/a }
1592n/a /* Case 3: We have to move the arena towards the end
1593n/a * of the list, because it has more free pools than
1594n/a * the arena to its right.
1595n/a * First unlink ao from usable_arenas.
1596n/a */
1597n/a if (ao->prevarena != NULL) {
1598n/a /* ao isn't at the head of the list */
1599n/a assert(ao->prevarena->nextarena == ao);
1600n/a ao->prevarena->nextarena = ao->nextarena;
1601n/a }
1602n/a else {
1603n/a /* ao is at the head of the list */
1604n/a assert(usable_arenas == ao);
1605n/a usable_arenas = ao->nextarena;
1606n/a }
1607n/a ao->nextarena->prevarena = ao->prevarena;
1608n/a
1609n/a /* Locate the new insertion point by iterating over
1610n/a * the list, using our nextarena pointer.
1611n/a */
1612n/a while (ao->nextarena != NULL &&
1613n/a nf > ao->nextarena->nfreepools) {
1614n/a ao->prevarena = ao->nextarena;
1615n/a ao->nextarena = ao->nextarena->nextarena;
1616n/a }
1617n/a
1618n/a /* Insert ao at this point. */
1619n/a assert(ao->nextarena == NULL ||
1620n/a ao->prevarena == ao->nextarena->prevarena);
1621n/a assert(ao->prevarena->nextarena == ao->nextarena);
1622n/a
1623n/a ao->prevarena->nextarena = ao;
1624n/a if (ao->nextarena != NULL)
1625n/a ao->nextarena->prevarena = ao;
1626n/a
1627n/a /* Verify that the swaps worked. */
1628n/a assert(ao->nextarena == NULL ||
1629n/a nf <= ao->nextarena->nfreepools);
1630n/a assert(ao->prevarena == NULL ||
1631n/a nf > ao->prevarena->nfreepools);
1632n/a assert(ao->nextarena == NULL ||
1633n/a ao->nextarena->prevarena == ao);
1634n/a assert((usable_arenas == ao &&
1635n/a ao->prevarena == NULL) ||
1636n/a ao->prevarena->nextarena == ao);
1637n/a
1638n/a UNLOCK();
1639n/a return;
1640n/a }
1641n/a /* Pool was full, so doesn't currently live in any list:
1642n/a * link it to the front of the appropriate usedpools[] list.
1643n/a * This mimics LRU pool usage for new allocations and
1644n/a * targets optimal filling when several pools contain
1645n/a * blocks of the same size class.
1646n/a */
1647n/a --pool->ref.count;
1648n/a assert(pool->ref.count > 0); /* else the pool is empty */
1649n/a size = pool->szidx;
1650n/a next = usedpools[size + size];
1651n/a prev = next->prevpool;
1652n/a /* insert pool before next: prev <-> pool <-> next */
1653n/a pool->nextpool = next;
1654n/a pool->prevpool = prev;
1655n/a next->prevpool = pool;
1656n/a prev->nextpool = pool;
1657n/a UNLOCK();
1658n/a return;
1659n/a }
1660n/a
1661n/a#ifdef WITH_VALGRIND
1662n/aredirect:
1663n/a#endif
1664n/a /* We didn't allocate this address. */
1665n/a PyMem_RawFree(p);
1666n/a}
1667n/a
1668n/a/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1669n/a * then as the Python docs promise, we do not treat this like free(p), and
1670n/a * return a non-NULL result.
1671n/a */
1672n/a
1673n/astatic void *
1674n/a_PyObject_Realloc(void *ctx, void *p, size_t nbytes)
1675n/a{
1676n/a void *bp;
1677n/a poolp pool;
1678n/a size_t size;
1679n/a
1680n/a if (p == NULL)
1681n/a return _PyObject_Alloc(0, ctx, 1, nbytes);
1682n/a
1683n/a#ifdef WITH_VALGRIND
1684n/a /* Treat running_on_valgrind == -1 the same as 0 */
1685n/a if (UNLIKELY(running_on_valgrind > 0))
1686n/a goto redirect;
1687n/a#endif
1688n/a
1689n/a pool = POOL_ADDR(p);
1690n/a if (address_in_range(p, pool)) {
1691n/a /* We're in charge of this block */
1692n/a size = INDEX2SIZE(pool->szidx);
1693n/a if (nbytes <= size) {
1694n/a /* The block is staying the same or shrinking. If
1695n/a * it's shrinking, there's a tradeoff: it costs
1696n/a * cycles to copy the block to a smaller size class,
1697n/a * but it wastes memory not to copy it. The
1698n/a * compromise here is to copy on shrink only if at
1699n/a * least 25% of size can be shaved off.
1700n/a */
1701n/a if (4 * nbytes > 3 * size) {
1702n/a /* It's the same,
1703n/a * or shrinking and new/old > 3/4.
1704n/a */
1705n/a return p;
1706n/a }
1707n/a size = nbytes;
1708n/a }
1709n/a bp = _PyObject_Alloc(0, ctx, 1, nbytes);
1710n/a if (bp != NULL) {
1711n/a memcpy(bp, p, size);
1712n/a _PyObject_Free(ctx, p);
1713n/a }
1714n/a return bp;
1715n/a }
1716n/a#ifdef WITH_VALGRIND
1717n/a redirect:
1718n/a#endif
1719n/a /* We're not managing this block. If nbytes <=
1720n/a * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1721n/a * block. However, if we do, we need to copy the valid data from
1722n/a * the C-managed block to one of our blocks, and there's no portable
1723n/a * way to know how much of the memory space starting at p is valid.
1724n/a * As bug 1185883 pointed out the hard way, it's possible that the
1725n/a * C-managed block is "at the end" of allocated VM space, so that
1726n/a * a memory fault can occur if we try to copy nbytes bytes starting
1727n/a * at p. Instead we punt: let C continue to manage this block.
1728n/a */
1729n/a if (nbytes)
1730n/a return PyMem_RawRealloc(p, nbytes);
1731n/a /* C doesn't define the result of realloc(p, 0) (it may or may not
1732n/a * return NULL then), but Python's docs promise that nbytes==0 never
1733n/a * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1734n/a * to begin with. Even then, we can't be sure that realloc() won't
1735n/a * return NULL.
1736n/a */
1737n/a bp = PyMem_RawRealloc(p, 1);
1738n/a return bp ? bp : p;
1739n/a}
1740n/a
1741n/a#else /* ! WITH_PYMALLOC */
1742n/a
1743n/a/*==========================================================================*/
1744n/a/* pymalloc not enabled: Redirect the entry points to malloc. These will
1745n/a * only be used by extensions that are compiled with pymalloc enabled. */
1746n/a
1747n/aPy_ssize_t
1748n/a_Py_GetAllocatedBlocks(void)
1749n/a{
1750n/a return 0;
1751n/a}
1752n/a
1753n/a#endif /* WITH_PYMALLOC */
1754n/a
1755n/a
1756n/a/*==========================================================================*/
1757n/a/* A x-platform debugging allocator. This doesn't manage memory directly,
1758n/a * it wraps a real allocator, adding extra debugging info to the memory blocks.
1759n/a */
1760n/a
1761n/a/* Special bytes broadcast into debug memory blocks at appropriate times.
1762n/a * Strings of these are unlikely to be valid addresses, floats, ints or
1763n/a * 7-bit ASCII.
1764n/a */
1765n/a#undef CLEANBYTE
1766n/a#undef DEADBYTE
1767n/a#undef FORBIDDENBYTE
1768n/a#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
1769n/a#define DEADBYTE 0xDB /* dead (newly freed) memory */
1770n/a#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
1771n/a
1772n/astatic size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1773n/a
1774n/a/* serialno is always incremented via calling this routine. The point is
1775n/a * to supply a single place to set a breakpoint.
1776n/a */
1777n/astatic void
1778n/abumpserialno(void)
1779n/a{
1780n/a ++serialno;
1781n/a}
1782n/a
1783n/a#define SST SIZEOF_SIZE_T
1784n/a
1785n/a/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1786n/astatic size_t
1787n/aread_size_t(const void *p)
1788n/a{
1789n/a const uint8_t *q = (const uint8_t *)p;
1790n/a size_t result = *q++;
1791n/a int i;
1792n/a
1793n/a for (i = SST; --i > 0; ++q)
1794n/a result = (result << 8) | *q;
1795n/a return result;
1796n/a}
1797n/a
1798n/a/* Write n as a big-endian size_t, MSB at address p, LSB at
1799n/a * p + sizeof(size_t) - 1.
1800n/a */
1801n/astatic void
1802n/awrite_size_t(void *p, size_t n)
1803n/a{
1804n/a uint8_t *q = (uint8_t *)p + SST - 1;
1805n/a int i;
1806n/a
1807n/a for (i = SST; --i >= 0; --q) {
1808n/a *q = (uint8_t)(n & 0xff);
1809n/a n >>= 8;
1810n/a }
1811n/a}
1812n/a
1813n/a/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1814n/a fills them with useful stuff, here calling the underlying malloc's result p:
1815n/a
1816n/ap[0: S]
1817n/a Number of bytes originally asked for. This is a size_t, big-endian (easier
1818n/a to read in a memory dump).
1819n/ap[S]
1820n/a API ID. See PEP 445. This is a character, but seems undocumented.
1821n/ap[S+1: 2*S]
1822n/a Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
1823n/ap[2*S: 2*S+n]
1824n/a The requested memory, filled with copies of CLEANBYTE.
1825n/a Used to catch reference to uninitialized memory.
1826n/a &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
1827n/a handled the request itself.
1828n/ap[2*S+n: 2*S+n+S]
1829n/a Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
1830n/ap[2*S+n+S: 2*S+n+2*S]
1831n/a A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1832n/a and _PyMem_DebugRealloc.
1833n/a This is a big-endian size_t.
1834n/a If "bad memory" is detected later, the serial number gives an
1835n/a excellent way to set a breakpoint on the next run, to capture the
1836n/a instant at which this block was passed out.
1837n/a*/
1838n/a
1839n/astatic void *
1840n/a_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
1841n/a{
1842n/a debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
1843n/a uint8_t *p; /* base address of malloc'ed block */
1844n/a uint8_t *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1845n/a size_t total; /* nbytes + 4*SST */
1846n/a
1847n/a bumpserialno();
1848n/a total = nbytes + 4*SST;
1849n/a if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1850n/a /* overflow: can't represent total as a Py_ssize_t */
1851n/a return NULL;
1852n/a
1853n/a if (use_calloc)
1854n/a p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1855n/a else
1856n/a p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
1857n/a if (p == NULL)
1858n/a return NULL;
1859n/a
1860n/a /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1861n/a write_size_t(p, nbytes);
1862n/a p[SST] = (uint8_t)api->api_id;
1863n/a memset(p + SST + 1, FORBIDDENBYTE, SST-1);
1864n/a
1865n/a if (nbytes > 0 && !use_calloc)
1866n/a memset(p + 2*SST, CLEANBYTE, nbytes);
1867n/a
1868n/a /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1869n/a tail = p + 2*SST + nbytes;
1870n/a memset(tail, FORBIDDENBYTE, SST);
1871n/a write_size_t(tail + SST, serialno);
1872n/a
1873n/a return p + 2*SST;
1874n/a}
1875n/a
1876n/astatic void *
1877n/a_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
1878n/a{
1879n/a return _PyMem_DebugRawAlloc(0, ctx, nbytes);
1880n/a}
1881n/a
1882n/astatic void *
1883n/a_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
1884n/a{
1885n/a size_t nbytes;
1886n/a assert(elsize == 0 || nelem <= PY_SSIZE_T_MAX / elsize);
1887n/a nbytes = nelem * elsize;
1888n/a return _PyMem_DebugRawAlloc(1, ctx, nbytes);
1889n/a}
1890n/a
1891n/a/* The debug free first checks the 2*SST bytes on each end for sanity (in
1892n/a particular, that the FORBIDDENBYTEs with the api ID are still intact).
1893n/a Then fills the original bytes with DEADBYTE.
1894n/a Then calls the underlying free.
1895n/a*/
1896n/astatic void
1897n/a_PyMem_DebugRawFree(void *ctx, void *p)
1898n/a{
1899n/a debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
1900n/a uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
1901n/a size_t nbytes;
1902n/a
1903n/a if (p == NULL)
1904n/a return;
1905n/a _PyMem_DebugCheckAddress(api->api_id, p);
1906n/a nbytes = read_size_t(q);
1907n/a nbytes += 4*SST;
1908n/a if (nbytes > 0)
1909n/a memset(q, DEADBYTE, nbytes);
1910n/a api->alloc.free(api->alloc.ctx, q);
1911n/a}
1912n/a
1913n/astatic void *
1914n/a_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
1915n/a{
1916n/a debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
1917n/a uint8_t *q = (uint8_t *)p, *oldq;
1918n/a uint8_t *tail;
1919n/a size_t total; /* nbytes + 4*SST */
1920n/a size_t original_nbytes;
1921n/a int i;
1922n/a
1923n/a if (p == NULL)
1924n/a return _PyMem_DebugRawAlloc(0, ctx, nbytes);
1925n/a
1926n/a _PyMem_DebugCheckAddress(api->api_id, p);
1927n/a bumpserialno();
1928n/a original_nbytes = read_size_t(q - 2*SST);
1929n/a total = nbytes + 4*SST;
1930n/a if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1931n/a /* overflow: can't represent total as a Py_ssize_t */
1932n/a return NULL;
1933n/a
1934n/a /* Resize and add decorations. We may get a new pointer here, in which
1935n/a * case we didn't get the chance to mark the old memory with DEADBYTE,
1936n/a * but we live with that.
1937n/a */
1938n/a oldq = q;
1939n/a q = (uint8_t *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total);
1940n/a if (q == NULL)
1941n/a return NULL;
1942n/a
1943n/a if (q == oldq && nbytes < original_nbytes) {
1944n/a /* shrinking: mark old extra memory dead */
1945n/a memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1946n/a }
1947n/a
1948n/a write_size_t(q, nbytes);
1949n/a assert(q[SST] == (uint8_t)api->api_id);
1950n/a for (i = 1; i < SST; ++i)
1951n/a assert(q[SST + i] == FORBIDDENBYTE);
1952n/a q += 2*SST;
1953n/a
1954n/a tail = q + nbytes;
1955n/a memset(tail, FORBIDDENBYTE, SST);
1956n/a write_size_t(tail + SST, serialno);
1957n/a
1958n/a if (nbytes > original_nbytes) {
1959n/a /* growing: mark new extra memory clean */
1960n/a memset(q + original_nbytes, CLEANBYTE,
1961n/a nbytes - original_nbytes);
1962n/a }
1963n/a
1964n/a return q;
1965n/a}
1966n/a
1967n/astatic void
1968n/a_PyMem_DebugCheckGIL(void)
1969n/a{
1970n/a#ifdef WITH_THREAD
1971n/a if (!PyGILState_Check())
1972n/a Py_FatalError("Python memory allocator called "
1973n/a "without holding the GIL");
1974n/a#endif
1975n/a}
1976n/a
1977n/astatic void *
1978n/a_PyMem_DebugMalloc(void *ctx, size_t nbytes)
1979n/a{
1980n/a _PyMem_DebugCheckGIL();
1981n/a return _PyMem_DebugRawMalloc(ctx, nbytes);
1982n/a}
1983n/a
1984n/astatic void *
1985n/a_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
1986n/a{
1987n/a _PyMem_DebugCheckGIL();
1988n/a return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
1989n/a}
1990n/a
1991n/astatic void
1992n/a_PyMem_DebugFree(void *ctx, void *ptr)
1993n/a{
1994n/a _PyMem_DebugCheckGIL();
1995n/a _PyMem_DebugRawFree(ctx, ptr);
1996n/a}
1997n/a
1998n/astatic void *
1999n/a_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2000n/a{
2001n/a _PyMem_DebugCheckGIL();
2002n/a return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2003n/a}
2004n/a
2005n/a/* Check the forbidden bytes on both ends of the memory allocated for p.
2006n/a * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
2007n/a * and call Py_FatalError to kill the program.
2008n/a * The API id, is also checked.
2009n/a */
2010n/astatic void
2011n/a_PyMem_DebugCheckAddress(char api, const void *p)
2012n/a{
2013n/a const uint8_t *q = (const uint8_t *)p;
2014n/a char msgbuf[64];
2015n/a char *msg;
2016n/a size_t nbytes;
2017n/a const uint8_t *tail;
2018n/a int i;
2019n/a char id;
2020n/a
2021n/a if (p == NULL) {
2022n/a msg = "didn't expect a NULL pointer";
2023n/a goto error;
2024n/a }
2025n/a
2026n/a /* Check the API id */
2027n/a id = (char)q[-SST];
2028n/a if (id != api) {
2029n/a msg = msgbuf;
2030n/a snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
2031n/a msgbuf[sizeof(msgbuf)-1] = 0;
2032n/a goto error;
2033n/a }
2034n/a
2035n/a /* Check the stuff at the start of p first: if there's underwrite
2036n/a * corruption, the number-of-bytes field may be nuts, and checking
2037n/a * the tail could lead to a segfault then.
2038n/a */
2039n/a for (i = SST-1; i >= 1; --i) {
2040n/a if (*(q-i) != FORBIDDENBYTE) {
2041n/a msg = "bad leading pad byte";
2042n/a goto error;
2043n/a }
2044n/a }
2045n/a
2046n/a nbytes = read_size_t(q - 2*SST);
2047n/a tail = q + nbytes;
2048n/a for (i = 0; i < SST; ++i) {
2049n/a if (tail[i] != FORBIDDENBYTE) {
2050n/a msg = "bad trailing pad byte";
2051n/a goto error;
2052n/a }
2053n/a }
2054n/a
2055n/a return;
2056n/a
2057n/aerror:
2058n/a _PyObject_DebugDumpAddress(p);
2059n/a Py_FatalError(msg);
2060n/a}
2061n/a
2062n/a/* Display info to stderr about the memory block at p. */
2063n/astatic void
2064n/a_PyObject_DebugDumpAddress(const void *p)
2065n/a{
2066n/a const uint8_t *q = (const uint8_t *)p;
2067n/a const uint8_t *tail;
2068n/a size_t nbytes, serial;
2069n/a int i;
2070n/a int ok;
2071n/a char id;
2072n/a
2073n/a fprintf(stderr, "Debug memory block at address p=%p:", p);
2074n/a if (p == NULL) {
2075n/a fprintf(stderr, "\n");
2076n/a return;
2077n/a }
2078n/a id = (char)q[-SST];
2079n/a fprintf(stderr, " API '%c'\n", id);
2080n/a
2081n/a nbytes = read_size_t(q - 2*SST);
2082n/a fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
2083n/a "requested\n", nbytes);
2084n/a
2085n/a /* In case this is nuts, check the leading pad bytes first. */
2086n/a fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2087n/a ok = 1;
2088n/a for (i = 1; i <= SST-1; ++i) {
2089n/a if (*(q-i) != FORBIDDENBYTE) {
2090n/a ok = 0;
2091n/a break;
2092n/a }
2093n/a }
2094n/a if (ok)
2095n/a fputs("FORBIDDENBYTE, as expected.\n", stderr);
2096n/a else {
2097n/a fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2098n/a FORBIDDENBYTE);
2099n/a for (i = SST-1; i >= 1; --i) {
2100n/a const uint8_t byte = *(q-i);
2101n/a fprintf(stderr, " at p-%d: 0x%02x", i, byte);
2102n/a if (byte != FORBIDDENBYTE)
2103n/a fputs(" *** OUCH", stderr);
2104n/a fputc('\n', stderr);
2105n/a }
2106n/a
2107n/a fputs(" Because memory is corrupted at the start, the "
2108n/a "count of bytes requested\n"
2109n/a " may be bogus, and checking the trailing pad "
2110n/a "bytes may segfault.\n", stderr);
2111n/a }
2112n/a
2113n/a tail = q + nbytes;
2114n/a fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
2115n/a ok = 1;
2116n/a for (i = 0; i < SST; ++i) {
2117n/a if (tail[i] != FORBIDDENBYTE) {
2118n/a ok = 0;
2119n/a break;
2120n/a }
2121n/a }
2122n/a if (ok)
2123n/a fputs("FORBIDDENBYTE, as expected.\n", stderr);
2124n/a else {
2125n/a fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2126n/a FORBIDDENBYTE);
2127n/a for (i = 0; i < SST; ++i) {
2128n/a const uint8_t byte = tail[i];
2129n/a fprintf(stderr, " at tail+%d: 0x%02x",
2130n/a i, byte);
2131n/a if (byte != FORBIDDENBYTE)
2132n/a fputs(" *** OUCH", stderr);
2133n/a fputc('\n', stderr);
2134n/a }
2135n/a }
2136n/a
2137n/a serial = read_size_t(tail + SST);
2138n/a fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
2139n/a "u to debug malloc/realloc.\n", serial);
2140n/a
2141n/a if (nbytes > 0) {
2142n/a i = 0;
2143n/a fputs(" Data at p:", stderr);
2144n/a /* print up to 8 bytes at the start */
2145n/a while (q < tail && i < 8) {
2146n/a fprintf(stderr, " %02x", *q);
2147n/a ++i;
2148n/a ++q;
2149n/a }
2150n/a /* and up to 8 at the end */
2151n/a if (q < tail) {
2152n/a if (tail - q > 8) {
2153n/a fputs(" ...", stderr);
2154n/a q = tail - 8;
2155n/a }
2156n/a while (q < tail) {
2157n/a fprintf(stderr, " %02x", *q);
2158n/a ++q;
2159n/a }
2160n/a }
2161n/a fputc('\n', stderr);
2162n/a }
2163n/a fputc('\n', stderr);
2164n/a
2165n/a fflush(stderr);
2166n/a _PyMem_DumpTraceback(fileno(stderr), p);
2167n/a}
2168n/a
2169n/a
2170n/astatic size_t
2171n/aprintone(FILE *out, const char* msg, size_t value)
2172n/a{
2173n/a int i, k;
2174n/a char buf[100];
2175n/a size_t origvalue = value;
2176n/a
2177n/a fputs(msg, out);
2178n/a for (i = (int)strlen(msg); i < 35; ++i)
2179n/a fputc(' ', out);
2180n/a fputc('=', out);
2181n/a
2182n/a /* Write the value with commas. */
2183n/a i = 22;
2184n/a buf[i--] = '\0';
2185n/a buf[i--] = '\n';
2186n/a k = 3;
2187n/a do {
2188n/a size_t nextvalue = value / 10;
2189n/a unsigned int digit = (unsigned int)(value - nextvalue * 10);
2190n/a value = nextvalue;
2191n/a buf[i--] = (char)(digit + '0');
2192n/a --k;
2193n/a if (k == 0 && value && i >= 0) {
2194n/a k = 3;
2195n/a buf[i--] = ',';
2196n/a }
2197n/a } while (value && i >= 0);
2198n/a
2199n/a while (i >= 0)
2200n/a buf[i--] = ' ';
2201n/a fputs(buf, out);
2202n/a
2203n/a return origvalue;
2204n/a}
2205n/a
2206n/avoid
2207n/a_PyDebugAllocatorStats(FILE *out,
2208n/a const char *block_name, int num_blocks, size_t sizeof_block)
2209n/a{
2210n/a char buf1[128];
2211n/a char buf2[128];
2212n/a PyOS_snprintf(buf1, sizeof(buf1),
2213n/a "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
2214n/a num_blocks, block_name, sizeof_block);
2215n/a PyOS_snprintf(buf2, sizeof(buf2),
2216n/a "%48s ", buf1);
2217n/a (void)printone(out, buf2, num_blocks * sizeof_block);
2218n/a}
2219n/a
2220n/a
2221n/a#ifdef WITH_PYMALLOC
2222n/a
2223n/a#ifdef Py_DEBUG
2224n/a/* Is target in the list? The list is traversed via the nextpool pointers.
2225n/a * The list may be NULL-terminated, or circular. Return 1 if target is in
2226n/a * list, else 0.
2227n/a */
2228n/astatic int
2229n/apool_is_in_list(const poolp target, poolp list)
2230n/a{
2231n/a poolp origlist = list;
2232n/a assert(target != NULL);
2233n/a if (list == NULL)
2234n/a return 0;
2235n/a do {
2236n/a if (target == list)
2237n/a return 1;
2238n/a list = list->nextpool;
2239n/a } while (list != NULL && list != origlist);
2240n/a return 0;
2241n/a}
2242n/a#endif
2243n/a
2244n/a/* Print summary info to "out" about the state of pymalloc's structures.
2245n/a * In Py_DEBUG mode, also perform some expensive internal consistency
2246n/a * checks.
2247n/a */
2248n/avoid
2249n/a_PyObject_DebugMallocStats(FILE *out)
2250n/a{
2251n/a uint i;
2252n/a const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2253n/a /* # of pools, allocated blocks, and free blocks per class index */
2254n/a size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2255n/a size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2256n/a size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2257n/a /* total # of allocated bytes in used and full pools */
2258n/a size_t allocated_bytes = 0;
2259n/a /* total # of available bytes in used pools */
2260n/a size_t available_bytes = 0;
2261n/a /* # of free pools + pools not yet carved out of current arena */
2262n/a uint numfreepools = 0;
2263n/a /* # of bytes for arena alignment padding */
2264n/a size_t arena_alignment = 0;
2265n/a /* # of bytes in used and full pools used for pool_headers */
2266n/a size_t pool_header_bytes = 0;
2267n/a /* # of bytes in used and full pools wasted due to quantization,
2268n/a * i.e. the necessarily leftover space at the ends of used and
2269n/a * full pools.
2270n/a */
2271n/a size_t quantization = 0;
2272n/a /* # of arenas actually allocated. */
2273n/a size_t narenas = 0;
2274n/a /* running total -- should equal narenas * ARENA_SIZE */
2275n/a size_t total;
2276n/a char buf[128];
2277n/a
2278n/a fprintf(out, "Small block threshold = %d, in %u size classes.\n",
2279n/a SMALL_REQUEST_THRESHOLD, numclasses);
2280n/a
2281n/a for (i = 0; i < numclasses; ++i)
2282n/a numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
2283n/a
2284n/a /* Because full pools aren't linked to from anything, it's easiest
2285n/a * to march over all the arenas. If we're lucky, most of the memory
2286n/a * will be living in full pools -- would be a shame to miss them.
2287n/a */
2288n/a for (i = 0; i < maxarenas; ++i) {
2289n/a uint j;
2290n/a uintptr_t base = arenas[i].address;
2291n/a
2292n/a /* Skip arenas which are not allocated. */
2293n/a if (arenas[i].address == (uintptr_t)NULL)
2294n/a continue;
2295n/a narenas += 1;
2296n/a
2297n/a numfreepools += arenas[i].nfreepools;
2298n/a
2299n/a /* round up to pool alignment */
2300n/a if (base & (uintptr_t)POOL_SIZE_MASK) {
2301n/a arena_alignment += POOL_SIZE;
2302n/a base &= ~(uintptr_t)POOL_SIZE_MASK;
2303n/a base += POOL_SIZE;
2304n/a }
2305n/a
2306n/a /* visit every pool in the arena */
2307n/a assert(base <= (uintptr_t) arenas[i].pool_address);
2308n/a for (j = 0; base < (uintptr_t) arenas[i].pool_address;
2309n/a ++j, base += POOL_SIZE) {
2310n/a poolp p = (poolp)base;
2311n/a const uint sz = p->szidx;
2312n/a uint freeblocks;
2313n/a
2314n/a if (p->ref.count == 0) {
2315n/a /* currently unused */
2316n/a#ifdef Py_DEBUG
2317n/a assert(pool_is_in_list(p, arenas[i].freepools));
2318n/a#endif
2319n/a continue;
2320n/a }
2321n/a ++numpools[sz];
2322n/a numblocks[sz] += p->ref.count;
2323n/a freeblocks = NUMBLOCKS(sz) - p->ref.count;
2324n/a numfreeblocks[sz] += freeblocks;
2325n/a#ifdef Py_DEBUG
2326n/a if (freeblocks > 0)
2327n/a assert(pool_is_in_list(p, usedpools[sz + sz]));
2328n/a#endif
2329n/a }
2330n/a }
2331n/a assert(narenas == narenas_currently_allocated);
2332n/a
2333n/a fputc('\n', out);
2334n/a fputs("class size num pools blocks in use avail blocks\n"
2335n/a "----- ---- --------- ------------- ------------\n",
2336n/a out);
2337n/a
2338n/a for (i = 0; i < numclasses; ++i) {
2339n/a size_t p = numpools[i];
2340n/a size_t b = numblocks[i];
2341n/a size_t f = numfreeblocks[i];
2342n/a uint size = INDEX2SIZE(i);
2343n/a if (p == 0) {
2344n/a assert(b == 0 && f == 0);
2345n/a continue;
2346n/a }
2347n/a fprintf(out, "%5u %6u "
2348n/a "%11" PY_FORMAT_SIZE_T "u "
2349n/a "%15" PY_FORMAT_SIZE_T "u "
2350n/a "%13" PY_FORMAT_SIZE_T "u\n",
2351n/a i, size, p, b, f);
2352n/a allocated_bytes += b * size;
2353n/a available_bytes += f * size;
2354n/a pool_header_bytes += p * POOL_OVERHEAD;
2355n/a quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2356n/a }
2357n/a fputc('\n', out);
2358n/a if (_PyMem_DebugEnabled())
2359n/a (void)printone(out, "# times object malloc called", serialno);
2360n/a (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2361n/a (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2362n/a (void)printone(out, "# arenas highwater mark", narenas_highwater);
2363n/a (void)printone(out, "# arenas allocated current", narenas);
2364n/a
2365n/a PyOS_snprintf(buf, sizeof(buf),
2366n/a "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2367n/a narenas, ARENA_SIZE);
2368n/a (void)printone(out, buf, narenas * ARENA_SIZE);
2369n/a
2370n/a fputc('\n', out);
2371n/a
2372n/a total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2373n/a total += printone(out, "# bytes in available blocks", available_bytes);
2374n/a
2375n/a PyOS_snprintf(buf, sizeof(buf),
2376n/a "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
2377n/a total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
2378n/a
2379n/a total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2380n/a total += printone(out, "# bytes lost to quantization", quantization);
2381n/a total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2382n/a (void)printone(out, "Total", total);
2383n/a}
2384n/a
2385n/a#endif /* #ifdef WITH_PYMALLOC */