From 9a44e7a611213e5f9854e60842c6694ea2822b4a Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 5 Oct 2022 17:35:50 -0600 Subject: [PATCH 01/19] Look up the current interpreter in PyObject_Malloc(). --- Objects/obmalloc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 449b618a0e76a0..db19b464a056c3 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -1,6 +1,7 @@ #include "Python.h" #include "pycore_pymem.h" // _PyTraceMalloc_Config -#include "pycore_code.h" // stats +#include "pycore_code.h" // stats +#include "pycore_pystate.h" // _PyInterpreterState_GET #include #include // malloc() @@ -700,12 +701,15 @@ _PyMem_Strdup(const char *str) return copy; } +static PyInterpreterState *interp; + void * PyObject_Malloc(size_t size) { /* see PyMem_RawMalloc() */ if (size > (size_t)PY_SSIZE_T_MAX) return NULL; + interp = _PyInterpreterState_GET(); OBJECT_STAT_INC_COND(allocations512, size < 512); OBJECT_STAT_INC_COND(allocations4k, size >= 512 && size < 4094); OBJECT_STAT_INC_COND(allocations_big, size >= 4094); From 16cd128dd79ac95b67bef8cf684dcb6e864c9601 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 6 Oct 2022 14:44:27 -0600 Subject: [PATCH 02/19] Move the low-level allocator implementations to a header file. --- Include/internal/pycore_allocators.h | 124 +++++++++++++++++++++++++++ Makefile.pre.in | 1 + Objects/obmalloc.c | 100 +-------------------- PCbuild/pythoncore.vcxproj | 1 + PCbuild/pythoncore.vcxproj.filters | 3 + 5 files changed, 130 insertions(+), 99 deletions(-) create mode 100644 Include/internal/pycore_allocators.h diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h new file mode 100644 index 00000000000000..f376cc4a630536 --- /dev/null +++ b/Include/internal/pycore_allocators.h @@ -0,0 +1,124 @@ +#ifndef Py_INTERNAL_ALLOCATORS_H +#define Py_INTERNAL_ALLOCATORS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include // malloc() + + +/*************************************/ +/* the (raw) malloc allocator */ + +static void * +_PyMem_RawMalloc(void *Py_UNUSED(ctx), size_t size) +{ + /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL + for malloc(0), which would be treated as an error. Some platforms would + return a pointer with no memory behind it, which would break pymalloc. + To solve these problems, allocate an extra byte. */ + if (size == 0) + size = 1; + return malloc(size); +} + +static void * +_PyMem_RawCalloc(void *Py_UNUSED(ctx), size_t nelem, size_t elsize) +{ + /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL + for calloc(0, 0), which would be treated as an error. Some platforms + would return a pointer with no memory behind it, which would break + pymalloc. To solve these problems, allocate an extra byte. */ + if (nelem == 0 || elsize == 0) { + nelem = 1; + elsize = 1; + } + return calloc(nelem, elsize); +} + +static void * +_PyMem_RawRealloc(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + if (size == 0) + size = 1; + return realloc(ptr, size); +} + +static void +_PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) +{ + free(ptr); +} + + +/*************************************/ +/* the object allocator */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_INTERNAL_ALLOCATORS_H */ diff --git a/Makefile.pre.in b/Makefile.pre.in index 2c0ff3d1c7b9a6..d20a592a609297 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1612,6 +1612,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/cpython/weakrefobject.h \ \ $(srcdir)/Include/internal/pycore_abstract.h \ + $(srcdir)/Include/internal/pycore_allocators.h \ $(srcdir)/Include/internal/pycore_asdl.h \ $(srcdir)/Include/internal/pycore_ast.h \ $(srcdir)/Include/internal/pycore_ast_state.h \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index db19b464a056c3..9b39e3865f8e2a 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -2,6 +2,7 @@ #include "pycore_pymem.h" // _PyTraceMalloc_Config #include "pycore_code.h" // stats #include "pycore_pystate.h" // _PyInterpreterState_GET +#include "pycore_allocators.h" #include #include // malloc() @@ -66,16 +67,6 @@ static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain); #endif #ifdef WITH_PYMALLOC - -#ifdef MS_WINDOWS -# include -#elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -#endif - /* Forward declaration */ static void* _PyObject_Malloc(void *ctx, size_t size); static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); @@ -90,95 +81,6 @@ static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT; -static void * -_PyMem_RawMalloc(void *Py_UNUSED(ctx), size_t size) -{ - /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL - for malloc(0), which would be treated as an error. Some platforms would - return a pointer with no memory behind it, which would break pymalloc. - To solve these problems, allocate an extra byte. */ - if (size == 0) - size = 1; - return malloc(size); -} - -static void * -_PyMem_RawCalloc(void *Py_UNUSED(ctx), size_t nelem, size_t elsize) -{ - /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL - for calloc(0, 0), which would be treated as an error. Some platforms - would return a pointer with no memory behind it, which would break - pymalloc. To solve these problems, allocate an extra byte. */ - if (nelem == 0 || elsize == 0) { - nelem = 1; - elsize = 1; - } - return calloc(nelem, elsize); -} - -static void * -_PyMem_RawRealloc(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - if (size == 0) - size = 1; - return realloc(ptr, size); -} - -static void -_PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) -{ - free(ptr); -} - - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - #define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} #ifdef WITH_PYMALLOC # define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 6d4d859181e580..237123b06b7ebe 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -193,6 +193,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index e71ce2aa05707a..f2ad224ebfe99b 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -486,6 +486,9 @@ Include\internal + + Include\internal + Include\internal From 737877ca6bbcdf0ec7acecc587c194b01132536d Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 6 Oct 2022 15:16:28 -0600 Subject: [PATCH 03/19] Move the top-level allocators state. --- Include/internal/pycore_allocators.h | 76 ++++++++++++++++++++++++++-- Objects/obmalloc.c | 66 ------------------------ 2 files changed, 72 insertions(+), 70 deletions(-) diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h index f376cc4a630536..0b2ab3caead3fc 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_allocators.h @@ -11,8 +11,8 @@ extern "C" { #include // malloc() -/*************************************/ -/* the (raw) malloc allocator */ +/*********************************************/ +/* the (raw) malloc allocator implementation */ static void * _PyMem_RawMalloc(void *Py_UNUSED(ctx), size_t size) @@ -55,8 +55,8 @@ _PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) } -/*************************************/ -/* the object allocator */ +/***************************************/ +/* the object allocator implementation */ #ifdef WITH_PYMALLOC # ifdef MS_WINDOWS @@ -118,6 +118,74 @@ _PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) #endif +/******************/ +/* the allocators */ + +#ifdef WITH_PYMALLOC +/* Forward declaration */ +static void* _PyObject_Malloc(void *ctx, size_t size); +static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); +static void _PyObject_Free(void *ctx, void *p); +static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); +#endif + +#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} +#ifdef WITH_PYMALLOC +# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +#endif + +#define PYRAW_ALLOC MALLOC_ALLOC +#ifdef WITH_PYMALLOC +# define PYOBJ_ALLOC PYMALLOC_ALLOC +#else +# define PYOBJ_ALLOC MALLOC_ALLOC +#endif +#define PYMEM_ALLOC PYOBJ_ALLOC + +typedef struct { + /* We tag each block with an API ID in order to tag API violations */ + char api_id; + PyMemAllocatorEx alloc; +} debug_alloc_api_t; +static struct { + debug_alloc_api_t raw; + debug_alloc_api_t mem; + debug_alloc_api_t obj; +} _PyMem_Debug = { + {'r', PYRAW_ALLOC}, + {'m', PYMEM_ALLOC}, + {'o', PYOBJ_ALLOC} + }; + +/* Forward declaration */ +static void* _PyMem_DebugRawMalloc(void *ctx, size_t size); +static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); +static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); +static void _PyMem_DebugRawFree(void *ctx, void *ptr); + +static void* _PyMem_DebugMalloc(void *ctx, size_t size); +static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); +static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); +static void _PyMem_DebugFree(void *ctx, void *p); + +#define PYDBGRAW_ALLOC \ + {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC \ + {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC \ + {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + +#ifdef Py_DEBUG +static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC; +static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC; +static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC; +#else +static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC; +static PyMemAllocatorEx _PyMem = PYMEM_ALLOC; +static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC; +#endif + + #ifdef __cplusplus } #endif diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 9b39e3865f8e2a..49796cd7b197d8 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -5,7 +5,6 @@ #include "pycore_allocators.h" #include -#include // malloc() /* Defined in tracemalloc.c */ @@ -17,17 +16,6 @@ extern void _PyMem_DumpTraceback(int fd, const void *ptr); #undef uint #define uint unsigned int /* assuming >= 16 bits */ -/* Forward declaration */ -static void* _PyMem_DebugRawMalloc(void *ctx, size_t size); -static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); -static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); -static void _PyMem_DebugRawFree(void *ctx, void *ptr); - -static void* _PyMem_DebugMalloc(void *ctx, size_t size); -static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); -static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); -static void _PyMem_DebugFree(void *ctx, void *p); - static void _PyObject_DebugDumpAddress(const void *p); static void _PyMem_DebugCheckAddress(const char *func, char api_id, const void *p); @@ -66,14 +54,6 @@ static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain); # define _Py_NO_SANITIZE_MEMORY #endif -#ifdef WITH_PYMALLOC -/* Forward declaration */ -static void* _PyObject_Malloc(void *ctx, size_t size); -static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); -static void _PyObject_Free(void *ctx, void *p); -static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); -#endif - /* bpo-35053: Declare tracemalloc configuration here rather than Modules/_tracemalloc.c because _tracemalloc can be compiled as dynamic @@ -81,52 +61,6 @@ static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT; -#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} -#ifdef WITH_PYMALLOC -# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} -#endif - -#define PYRAW_ALLOC MALLOC_ALLOC -#ifdef WITH_PYMALLOC -# define PYOBJ_ALLOC PYMALLOC_ALLOC -#else -# define PYOBJ_ALLOC MALLOC_ALLOC -#endif -#define PYMEM_ALLOC PYOBJ_ALLOC - -typedef struct { - /* We tag each block with an API ID in order to tag API violations */ - char api_id; - PyMemAllocatorEx alloc; -} debug_alloc_api_t; -static struct { - debug_alloc_api_t raw; - debug_alloc_api_t mem; - debug_alloc_api_t obj; -} _PyMem_Debug = { - {'r', PYRAW_ALLOC}, - {'m', PYMEM_ALLOC}, - {'o', PYOBJ_ALLOC} - }; - -#define PYDBGRAW_ALLOC \ - {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} -#define PYDBGMEM_ALLOC \ - {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} -#define PYDBGOBJ_ALLOC \ - {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} - -#ifdef Py_DEBUG -static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC; -static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC; -static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC; -#else -static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC; -static PyMemAllocatorEx _PyMem = PYMEM_ALLOC; -static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC; -#endif - - static int pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug, PyMemAllocatorEx *old_alloc) From 6dd7ba5db4f82e4a53a712c5f86a278e4dafeb8f Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 6 Oct 2022 16:39:08 -0600 Subject: [PATCH 04/19] Move the allocators to the runtime state. --- Include/internal/pycore_allocators.h | 131 -------------------- Include/internal/pycore_pymem.h | 20 +++ Include/internal/pycore_pymem_init.h | 77 ++++++++++++ Include/internal/pycore_runtime.h | 3 + Include/internal/pycore_runtime_init.h | 5 + Makefile.pre.in | 1 + Objects/obmalloc.c | 102 +++++++++++---- PCbuild/pythoncore.vcxproj | 1 + PCbuild/pythoncore.vcxproj.filters | 3 + Tools/c-analyzer/cpython/globals-to-fix.tsv | 3 +- 10 files changed, 193 insertions(+), 153 deletions(-) create mode 100644 Include/internal/pycore_pymem_init.h diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h index 0b2ab3caead3fc..1bd5a52dbab085 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_allocators.h @@ -55,137 +55,6 @@ _PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) } -/***************************************/ -/* the object allocator implementation */ - -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - - -/******************/ -/* the allocators */ - -#ifdef WITH_PYMALLOC -/* Forward declaration */ -static void* _PyObject_Malloc(void *ctx, size_t size); -static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); -static void _PyObject_Free(void *ctx, void *p); -static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); -#endif - -#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} -#ifdef WITH_PYMALLOC -# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} -#endif - -#define PYRAW_ALLOC MALLOC_ALLOC -#ifdef WITH_PYMALLOC -# define PYOBJ_ALLOC PYMALLOC_ALLOC -#else -# define PYOBJ_ALLOC MALLOC_ALLOC -#endif -#define PYMEM_ALLOC PYOBJ_ALLOC - -typedef struct { - /* We tag each block with an API ID in order to tag API violations */ - char api_id; - PyMemAllocatorEx alloc; -} debug_alloc_api_t; -static struct { - debug_alloc_api_t raw; - debug_alloc_api_t mem; - debug_alloc_api_t obj; -} _PyMem_Debug = { - {'r', PYRAW_ALLOC}, - {'m', PYMEM_ALLOC}, - {'o', PYOBJ_ALLOC} - }; - -/* Forward declaration */ -static void* _PyMem_DebugRawMalloc(void *ctx, size_t size); -static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); -static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); -static void _PyMem_DebugRawFree(void *ctx, void *ptr); - -static void* _PyMem_DebugMalloc(void *ctx, size_t size); -static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); -static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); -static void _PyMem_DebugFree(void *ctx, void *p); - -#define PYDBGRAW_ALLOC \ - {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} -#define PYDBGMEM_ALLOC \ - {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} -#define PYDBGOBJ_ALLOC \ - {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} - -#ifdef Py_DEBUG -static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC; -static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC; -static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC; -#else -static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC; -static PyMemAllocatorEx _PyMem = PYMEM_ALLOC; -static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC; -#endif - - #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index b9eea9d4b30ad1..6e4bba255ad6f5 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -11,6 +11,26 @@ extern "C" { #include "pymem.h" // PyMemAllocatorName +typedef struct { + /* We tag each block with an API ID in order to tag API violations */ + char api_id; + PyMemAllocatorEx alloc; +} debug_alloc_api_t; + +struct _pymem_allocators { + struct { + PyMemAllocatorEx raw; + PyMemAllocatorEx mem; + PyMemAllocatorEx obj; + } standard; + struct { + debug_alloc_api_t raw; + debug_alloc_api_t mem; + debug_alloc_api_t obj; + } debug; +}; + + /* Set the memory allocator of the specified domain to the default. Save the old allocator into *old_alloc if it's non-NULL. Return on success, or return -1 if the domain is unknown. */ diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h new file mode 100644 index 00000000000000..d89157844eb26f --- /dev/null +++ b/Include/internal/pycore_pymem_init.h @@ -0,0 +1,77 @@ +#ifndef Py_INTERNAL_PYMEM_INIT_H +#define Py_INTERNAL_PYMEM_INIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +#include "pycore_allocators.h" + +#ifdef WITH_PYMALLOC +void* _PyObject_Malloc(void *ctx, size_t size); +void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); +void _PyObject_Free(void *ctx, void *p); +void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); +#endif + +#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} +#ifdef WITH_PYMALLOC +# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +#endif + +#define PYRAW_ALLOC MALLOC_ALLOC +#ifdef WITH_PYMALLOC +# define PYOBJ_ALLOC PYMALLOC_ALLOC +#else +# define PYOBJ_ALLOC MALLOC_ALLOC +#endif +#define PYMEM_ALLOC PYOBJ_ALLOC + +void* _PyMem_DebugRawMalloc(void *ctx, size_t size); +void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugRawFree(void *ctx, void *ptr); + +void* _PyMem_DebugMalloc(void *ctx, size_t size); +void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugFree(void *ctx, void *p); + +#define PYDBGRAW_ALLOC \ + {&_PyRuntime.allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC \ + {&_PyRuntime.allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC \ + {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + +#ifdef Py_DEBUG +#define _pymem_allocators_standard_INIT \ + { \ + PYDBGRAW_ALLOC, \ + PYDBGMEM_ALLOC, \ + PYDBGOBJ_ALLOC, \ + } +#else +#define _pymem_allocators_standard_INIT \ + { \ + PYRAW_ALLOC, \ + PYMEM_ALLOC, \ + PYOBJ_ALLOC, \ + } +#endif + +#define _pymem_allocators_debug_INIT \ + { \ + {'r', PYRAW_ALLOC}, \ + {'m', PYMEM_ALLOC}, \ + {'o', PYOBJ_ALLOC}, \ + } + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_PYMEM_INIT_H diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index d1fbc09f1ea206..4b0b17c10f82ce 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -12,6 +12,7 @@ extern "C" { #include "pycore_gil.h" // struct _gil_runtime_state #include "pycore_global_objects.h" // struct _Py_global_objects #include "pycore_interp.h" // PyInterpreterState +#include "pycore_pymem.h" // struct _pymem_allocators #include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_ids struct _getargs_runtime_state { @@ -85,6 +86,8 @@ typedef struct pyruntimestate { to access it, don't access it directly. */ _Py_atomic_address _finalizing; + struct _pymem_allocators allocators; + struct pyinterpreters { PyThread_type_lock mutex; /* The linked list of interpreters, newest first. */ diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 3acb16b0992e92..46e875f9cc8369 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -9,6 +9,7 @@ extern "C" { #endif #include "pycore_object.h" +#include "pycore_pymem_init.h" /* The static initializers defined here should only be used @@ -23,6 +24,10 @@ extern "C" { in accordance with the specification. */ \ .autoTSSkey = Py_tss_NEEDS_INIT, \ }, \ + .allocators = { \ + _pymem_allocators_standard_INIT, \ + _pymem_allocators_debug_INIT, \ + }, \ .interpreters = { \ /* This prevents interpreters from getting created \ until _PyInterpreterState_Enable() is called. */ \ diff --git a/Makefile.pre.in b/Makefile.pre.in index d20a592a609297..192411e892ae9b 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1657,6 +1657,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_pyhash.h \ $(srcdir)/Include/internal/pycore_pylifecycle.h \ $(srcdir)/Include/internal/pycore_pymem.h \ + $(srcdir)/Include/internal/pycore_pymem_init.h \ $(srcdir)/Include/internal/pycore_pystate.h \ $(srcdir)/Include/internal/pycore_range.h \ $(srcdir)/Include/internal/pycore_runtime.h \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 49796cd7b197d8..f393cf700ac252 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -2,7 +2,8 @@ #include "pycore_pymem.h" // _PyTraceMalloc_Config #include "pycore_code.h" // stats #include "pycore_pystate.h" // _PyInterpreterState_GET -#include "pycore_allocators.h" +#include "pycore_pymem.h" +#include "pycore_pymem_init.h" #include @@ -61,6 +62,12 @@ static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain); struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT; +#define _PyMem_Raw (_PyRuntime.allocators.standard.raw) +#define _PyMem (_PyRuntime.allocators.standard.mem) +#define _PyObject (_PyRuntime.allocators.standard.obj) +#define _PyMem_Debug (_PyRuntime.allocators.debug) + + static int pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug, PyMemAllocatorEx *old_alloc) @@ -261,14 +268,67 @@ _PyMem_GetCurrentAllocatorName(void) } -#undef MALLOC_ALLOC -#undef PYMALLOC_ALLOC -#undef PYRAW_ALLOC -#undef PYMEM_ALLOC -#undef PYOBJ_ALLOC -#undef PYDBGRAW_ALLOC -#undef PYDBGMEM_ALLOC -#undef PYDBGOBJ_ALLOC +/***************************************/ +/* the object allocator implementation */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif static PyObjectArenaAllocator _PyObject_Arena = {NULL, @@ -1833,7 +1893,7 @@ pymalloc_alloc(void *Py_UNUSED(ctx), size_t nbytes) } -static void * +void * _PyObject_Malloc(void *ctx, size_t nbytes) { void* ptr = pymalloc_alloc(ctx, nbytes); @@ -1849,7 +1909,7 @@ _PyObject_Malloc(void *ctx, size_t nbytes) } -static void * +void * _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize) { assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize); @@ -2113,7 +2173,7 @@ pymalloc_free(void *Py_UNUSED(ctx), void *p) } -static void +void _PyObject_Free(void *ctx, void *p) { /* PyObject_Free(NULL) has no effect */ @@ -2199,7 +2259,7 @@ pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes) } -static void * +void * _PyObject_Realloc(void *ctx, void *ptr, size_t nbytes) { void *ptr2; @@ -2376,13 +2436,13 @@ _PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes) return data; } -static void * +void * _PyMem_DebugRawMalloc(void *ctx, size_t nbytes) { return _PyMem_DebugRawAlloc(0, ctx, nbytes); } -static void * +void * _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize) { size_t nbytes; @@ -2397,7 +2457,7 @@ _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize) Then fills the original bytes with PYMEM_DEADBYTE. Then calls the underlying free. */ -static void +void _PyMem_DebugRawFree(void *ctx, void *p) { /* PyMem_Free(NULL) has no effect */ @@ -2417,7 +2477,7 @@ _PyMem_DebugRawFree(void *ctx, void *p) } -static void * +void * _PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes) { if (p == NULL) { @@ -2527,14 +2587,14 @@ _PyMem_DebugCheckGIL(const char *func) } } -static void * +void * _PyMem_DebugMalloc(void *ctx, size_t nbytes) { _PyMem_DebugCheckGIL(__func__); return _PyMem_DebugRawMalloc(ctx, nbytes); } -static void * +void * _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize) { _PyMem_DebugCheckGIL(__func__); @@ -2542,7 +2602,7 @@ _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize) } -static void +void _PyMem_DebugFree(void *ctx, void *ptr) { _PyMem_DebugCheckGIL(__func__); @@ -2550,7 +2610,7 @@ _PyMem_DebugFree(void *ctx, void *ptr) } -static void * +void * _PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes) { _PyMem_DebugCheckGIL(__func__); diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 237123b06b7ebe..3ebf1947165873 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -238,6 +238,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index f2ad224ebfe99b..2f01b1482b3b1c 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -618,6 +618,9 @@ Include\internal + + Include\internal + Include\internal diff --git a/Tools/c-analyzer/cpython/globals-to-fix.tsv b/Tools/c-analyzer/cpython/globals-to-fix.tsv index 56e499dcf98836..3d93f26ce8c4b0 100644 --- a/Tools/c-analyzer/cpython/globals-to-fix.tsv +++ b/Tools/c-analyzer/cpython/globals-to-fix.tsv @@ -421,7 +421,7 @@ Python/pathconfig.c - _Py_path_config - #----------------------- # state -# allocator +# object allocator Objects/obmalloc.c - _PyObject_Arena - Objects/obmalloc.c - _Py_tracemalloc_config - Objects/obmalloc.c - arena_map_bot_count - @@ -436,6 +436,7 @@ Objects/obmalloc.c - ntimes_arena_allocated - Objects/obmalloc.c - raw_allocated_blocks - Objects/obmalloc.c - unused_arena_objects - Objects/obmalloc.c - usable_arenas - +Objects/obmalloc.c - usedpools - Objects/obmalloc.c new_arena debug_stats - # pre-allocated memory From 56251cbe79d0acecf8d3f3866c69797a74dae530 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 6 Oct 2022 16:47:21 -0600 Subject: [PATCH 05/19] Drop debug code. --- Objects/obmalloc.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index f393cf700ac252..c24f047ecb6edc 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -597,15 +597,12 @@ _PyMem_Strdup(const char *str) return copy; } -static PyInterpreterState *interp; - void * PyObject_Malloc(size_t size) { /* see PyMem_RawMalloc() */ if (size > (size_t)PY_SSIZE_T_MAX) return NULL; - interp = _PyInterpreterState_GET(); OBJECT_STAT_INC_COND(allocations512, size < 512); OBJECT_STAT_INC_COND(allocations4k, size >= 512 && size < 4094); OBJECT_STAT_INC_COND(allocations_big, size >= 4094); From 007eed4b95e26dfa1bc004fc3cb4cac8d9568177 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 6 Oct 2022 17:19:25 -0600 Subject: [PATCH 06/19] Move the top-level object allocator to the runtime state. --- Include/internal/pycore_allocators.h | 63 ++++++++++++++++++ Include/internal/pycore_pymem.h | 1 + Include/internal/pycore_pymem_init.h | 11 +++ Include/internal/pycore_runtime_init.h | 1 + Objects/obmalloc.c | 74 +-------------------- Tools/c-analyzer/cpython/globals-to-fix.tsv | 1 - 6 files changed, 77 insertions(+), 74 deletions(-) diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h index 1bd5a52dbab085..6c4860eb483892 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_allocators.h @@ -55,6 +55,69 @@ _PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) } +/***************************************/ +/* the object allocator implementation */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index 6e4bba255ad6f5..ef40f16c972f29 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -28,6 +28,7 @@ struct _pymem_allocators { debug_alloc_api_t mem; debug_alloc_api_t obj; } debug; + PyObjectArenaAllocator obj_arena; }; diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index d89157844eb26f..87a7f38ad7858f 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -70,6 +70,17 @@ void _PyMem_DebugFree(void *ctx, void *p); {'o', PYOBJ_ALLOC}, \ } +#ifdef MS_WINDOWS +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree } +#elif defined(ARENAS_USE_MMAP) +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaMmap, _PyObject_ArenaMunmap } +#else +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaMalloc, _PyObject_ArenaFree } +#endif + #ifdef __cplusplus } diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 46e875f9cc8369..4b812987c13bf5 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -27,6 +27,7 @@ extern "C" { .allocators = { \ _pymem_allocators_standard_INIT, \ _pymem_allocators_debug_INIT, \ + _pymem_allocators_obj_arena_INIT, \ }, \ .interpreters = { \ /* This prevents interpreters from getting created \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index c24f047ecb6edc..c7309fc05a5324 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -66,6 +66,7 @@ struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT #define _PyMem (_PyRuntime.allocators.standard.mem) #define _PyObject (_PyRuntime.allocators.standard.obj) #define _PyMem_Debug (_PyRuntime.allocators.debug) +#define _PyObject_Arena (_PyRuntime.allocators.obj_arena) static int @@ -268,79 +269,6 @@ _PyMem_GetCurrentAllocatorName(void) } -/***************************************/ -/* the object allocator implementation */ - -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - - -static PyObjectArenaAllocator _PyObject_Arena = {NULL, -#ifdef MS_WINDOWS - _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree -#elif defined(ARENAS_USE_MMAP) - _PyObject_ArenaMmap, _PyObject_ArenaMunmap -#else - _PyObject_ArenaMalloc, _PyObject_ArenaFree -#endif - }; - #ifdef WITH_PYMALLOC static int _PyMem_DebugEnabled(void) diff --git a/Tools/c-analyzer/cpython/globals-to-fix.tsv b/Tools/c-analyzer/cpython/globals-to-fix.tsv index 3d93f26ce8c4b0..964ad927e06cfe 100644 --- a/Tools/c-analyzer/cpython/globals-to-fix.tsv +++ b/Tools/c-analyzer/cpython/globals-to-fix.tsv @@ -422,7 +422,6 @@ Python/pathconfig.c - _Py_path_config - # state # object allocator -Objects/obmalloc.c - _PyObject_Arena - Objects/obmalloc.c - _Py_tracemalloc_config - Objects/obmalloc.c - arena_map_bot_count - Objects/obmalloc.c - arena_map_mid_count - From 38a80d849fbee950330daf95c48efd940ca0d611 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 7 Oct 2022 11:51:49 -0600 Subject: [PATCH 07/19] Move the arenas state to the runtime state. --- Include/internal/pycore_obmalloc.h | 662 +++++++++++++++++ Include/internal/pycore_obmalloc_init.h | 57 ++ Include/internal/pycore_runtime.h | 2 + Include/internal/pycore_runtime_init.h | 2 + Makefile.pre.in | 2 + Objects/obmalloc.c | 748 ++------------------ PCbuild/pythoncore.vcxproj | 2 + PCbuild/pythoncore.vcxproj.filters | 6 + Tools/c-analyzer/cpython/globals-to-fix.tsv | 13 - 9 files changed, 788 insertions(+), 706 deletions(-) create mode 100644 Include/internal/pycore_obmalloc.h create mode 100644 Include/internal/pycore_obmalloc_init.h diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h new file mode 100644 index 00000000000000..94e6abf3df5a07 --- /dev/null +++ b/Include/internal/pycore_obmalloc.h @@ -0,0 +1,662 @@ +#ifndef Py_INTERNAL_OBMALLOC_H +#define Py_INTERNAL_OBMALLOC_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + +/* An object allocator for Python. + + Here is an introduction to the layers of the Python memory architecture, + showing where the object allocator is actually used (layer +2), It is + called for every object allocation and deallocation (PyObject_New/Del), + unless the object-specific allocators implement a proprietary allocation + scheme (ex.: ints use a simple free list). This is also the place where + the cyclic garbage collector operates selectively on container objects. + + + Object-specific allocators + _____ ______ ______ ________ + [ int ] [ dict ] [ list ] ... [ string ] Python core | ++3 | <----- Object-specific memory -----> | <-- Non-object memory --> | + _______________________________ | | + [ Python's object allocator ] | | ++2 | ####### Object memory ####### | <------ Internal buffers ------> | + ______________________________________________________________ | + [ Python's raw memory allocator (PyMem_ API) ] | ++1 | <----- Python memory (under PyMem manager's control) ------> | | + __________________________________________________________________ + [ Underlying general-purpose allocator (ex: C library malloc) ] + 0 | <------ Virtual memory allocated for the python process -------> | + + ========================================================================= + _______________________________________________________________________ + [ OS-specific Virtual Memory Manager (VMM) ] +-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | + __________________________________ __________________________________ + [ ] [ ] +-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | + +*/ +/*==========================================================================*/ + +/* A fast, special-purpose memory allocator for small blocks, to be used + on top of a general-purpose malloc -- heavily based on previous art. */ + +/* Vladimir Marangozov -- August 2000 */ + +/* + * "Memory management is where the rubber meets the road -- if we do the wrong + * thing at any level, the results will not be good. And if we don't make the + * levels work well together, we are in serious trouble." (1) + * + * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, + * "Dynamic Storage Allocation: A Survey and Critical Review", + * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. + */ + +/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ + +/*==========================================================================*/ + +/* + * Allocation strategy abstract: + * + * For small requests, the allocator sub-allocates blocks of memory. + * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the + * system's allocator. + * + * Small requests are grouped in size classes spaced 8 bytes apart, due + * to the required valid alignment of the returned address. Requests of + * a particular size are serviced from memory pools of 4K (one VMM page). + * Pools are fragmented on demand and contain free lists of blocks of one + * particular size class. In other words, there is a fixed-size allocator + * for each size class. Free pools are shared by the different allocators + * thus minimizing the space reserved for a particular size class. + * + * This allocation strategy is a variant of what is known as "simple + * segregated storage based on array of free lists". The main drawback of + * simple segregated storage is that we might end up with lot of reserved + * memory for the different free lists, which degenerate in time. To avoid + * this, we partition each free list in pools and we share dynamically the + * reserved space between all free lists. This technique is quite efficient + * for memory intensive programs which allocate mainly small-sized blocks. + * + * For small requests we have the following table: + * + * Request in bytes Size of allocated block Size class idx + * ---------------------------------------------------------------- + * 1-8 8 0 + * 9-16 16 1 + * 17-24 24 2 + * 25-32 32 3 + * 33-40 40 4 + * 41-48 48 5 + * 49-56 56 6 + * 57-64 64 7 + * 65-72 72 8 + * ... ... ... + * 497-504 504 62 + * 505-512 512 63 + * + * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying + * allocator. + */ + +/*==========================================================================*/ + +/* + * -- Main tunable settings section -- + */ + +/* + * Alignment of addresses returned to the user. 8-bytes alignment works + * on most current architectures (with 32-bit or 64-bit address buses). + * The alignment value is also used for grouping small requests in size + * classes spaced ALIGNMENT bytes apart. + * + * You shouldn't change this unless you know what you are doing. + */ + +#if SIZEOF_VOID_P > 4 +#define ALIGNMENT 16 /* must be 2^N */ +#define ALIGNMENT_SHIFT 4 +#else +#define ALIGNMENT 8 /* must be 2^N */ +#define ALIGNMENT_SHIFT 3 +#endif + +/* Return the number of bytes in size class I, as a uint. */ +#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) + +/* + * Max size threshold below which malloc requests are considered to be + * small enough in order to use preallocated memory pools. You can tune + * this value according to your application behaviour and memory needs. + * + * Note: a size threshold of 512 guarantees that newly created dictionaries + * will be allocated from preallocated memory pools on 64-bit. + * + * The following invariants must hold: + * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512 + * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT + * + * Although not required, for better performance and space efficiency, + * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. + */ +#define SMALL_REQUEST_THRESHOLD 512 +#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) + +/* + * The system's VMM page size can be obtained on most unices with a + * getpagesize() call or deduced from various header files. To make + * things simpler, we assume that it is 4K, which is OK for most systems. + * It is probably better if this is the native page size, but it doesn't + * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page + * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation + * violation fault. 4K is apparently OK for all the platforms that python + * currently targets. + */ +#define SYSTEM_PAGE_SIZE (4 * 1024) + +/* + * Maximum amount of memory managed by the allocator for small requests. + */ +#ifdef WITH_MEMORY_LIMITS +#ifndef SMALL_MEMORY_LIMIT +#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ +#endif +#endif + +#if !defined(WITH_PYMALLOC_RADIX_TREE) +/* Use radix-tree to track arena memory regions, for address_in_range(). + * Enable by default since it allows larger pool sizes. Can be disabled + * using -DWITH_PYMALLOC_RADIX_TREE=0 */ +#define WITH_PYMALLOC_RADIX_TREE 1 +#endif + +#if SIZEOF_VOID_P > 4 +/* on 64-bit platforms use larger pools and arenas if we can */ +#define USE_LARGE_ARENAS +#if WITH_PYMALLOC_RADIX_TREE +/* large pools only supported if radix-tree is enabled */ +#define USE_LARGE_POOLS +#endif +#endif + +/* + * The allocator sub-allocates blocks of memory (called arenas) aligned + * on a page boundary. This is a reserved virtual address space for the + * current process (obtained through a malloc()/mmap() call). In no way this + * means that the memory arenas will be used entirely. A malloc() is + * usually an address range reservation for bytes, unless all pages within + * this space are referenced subsequently. So malloc'ing big blocks and not + * using them does not mean "wasting memory". It's an addressable range + * wastage... + * + * Arenas are allocated with mmap() on systems supporting anonymous memory + * mappings to reduce heap fragmentation. + */ +#ifdef USE_LARGE_ARENAS +#define ARENA_BITS 20 /* 1 MiB */ +#else +#define ARENA_BITS 18 /* 256 KiB */ +#endif +#define ARENA_SIZE (1 << ARENA_BITS) +#define ARENA_SIZE_MASK (ARENA_SIZE - 1) + +#ifdef WITH_MEMORY_LIMITS +#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) +#endif + +/* + * Size of the pools used for small blocks. Must be a power of 2. + */ +#ifdef USE_LARGE_POOLS +#define POOL_BITS 14 /* 16 KiB */ +#else +#define POOL_BITS 12 /* 4 KiB */ +#endif +#define POOL_SIZE (1 << POOL_BITS) +#define POOL_SIZE_MASK (POOL_SIZE - 1) + +#if !WITH_PYMALLOC_RADIX_TREE +#if POOL_SIZE != SYSTEM_PAGE_SIZE +# error "pool size must be equal to system page size" +#endif +#endif + +#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE) +#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE +# error "arena size not an exact multiple of pool size" +#endif + +/* + * -- End of tunable settings section -- + */ + +/*==========================================================================*/ + +/* When you say memory, my mind reasons in terms of (pointers to) blocks */ +typedef uint8_t pymem_block; + +/* Pool for small blocks. */ +struct pool_header { + union { pymem_block *_padding; + uint count; } ref; /* number of allocated blocks */ + pymem_block *freeblock; /* pool's free list head */ + struct pool_header *nextpool; /* next pool of this size class */ + struct pool_header *prevpool; /* previous pool "" */ + uint arenaindex; /* index into arenas of base adr */ + uint szidx; /* block size class index */ + uint nextoffset; /* bytes to virgin block */ + uint maxnextoffset; /* largest valid nextoffset */ +}; + +typedef struct pool_header *poolp; + +/* Record keeping for arenas. */ +struct arena_object { + /* The address of the arena, as returned by malloc. Note that 0 + * will never be returned by a successful malloc, and is used + * here to mark an arena_object that doesn't correspond to an + * allocated arena. + */ + uintptr_t address; + + /* Pool-aligned pointer to the next pool to be carved off. */ + pymem_block* pool_address; + + /* The number of available pools in the arena: free pools + never- + * allocated pools. + */ + uint nfreepools; + + /* The total number of pools in the arena, whether or not available. */ + uint ntotalpools; + + /* Singly-linked list of available pools. */ + struct pool_header* freepools; + + /* Whenever this arena_object is not associated with an allocated + * arena, the nextarena member is used to link all unassociated + * arena_objects in the singly-linked `unused_arena_objects` list. + * The prevarena member is unused in this case. + * + * When this arena_object is associated with an allocated arena + * with at least one available pool, both members are used in the + * doubly-linked `usable_arenas` list, which is maintained in + * increasing order of `nfreepools` values. + * + * Else this arena_object is associated with an allocated arena + * all of whose pools are in use. `nextarena` and `prevarena` + * are both meaningless in this case. + */ + struct arena_object* nextarena; + struct arena_object* prevarena; +}; + +#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT) + +#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ + +/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ +#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) + +/* Return total number of blocks in pool of size index I, as a uint. */ +#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) + +/*==========================================================================*/ + +/* + * Pool table -- headed, circular, doubly-linked lists of partially used pools. + +This is involved. For an index i, usedpools[i+i] is the header for a list of +all partially used pools holding small blocks with "size class idx" i. So +usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size +16, and so on: index 2*i <-> blocks of size (i+1)<freeblock points to +the start of a singly-linked list of free blocks within the pool. When a +block is freed, it's inserted at the front of its pool's freeblock list. Note +that the available blocks in a pool are *not* linked all together when a pool +is initialized. Instead only "the first two" (lowest addresses) blocks are +set up, returning the first such block, and setting pool->freeblock to a +one-block list holding the second such block. This is consistent with that +pymalloc strives at all levels (arena, pool, and block) never to touch a piece +of memory until it's actually needed. + +So long as a pool is in the used state, we're certain there *is* a block +available for allocating, and pool->freeblock is not NULL. If pool->freeblock +points to the end of the free list before we've carved the entire pool into +blocks, that means we simply haven't yet gotten to one of the higher-address +blocks. The offset from the pool_header to the start of "the next" virgin +block is stored in the pool_header nextoffset member, and the largest value +of nextoffset that makes sense is stored in the maxnextoffset member when a +pool is initialized. All the blocks in a pool have been passed out at least +once when and only when nextoffset > maxnextoffset. + + +Major obscurity: While the usedpools vector is declared to have poolp +entries, it doesn't really. It really contains two pointers per (conceptual) +poolp entry, the nextpool and prevpool members of a pool_header. The +excruciating initialization code below fools C so that + + usedpool[i+i] + +"acts like" a genuine poolp, but only so long as you only reference its +nextpool and prevpool members. The "- 2*sizeof(pymem_block *)" gibberish is +compensating for that a pool_header's nextpool and prevpool members +immediately follow a pool_header's first two members: + + union { pymem_block *_padding; + uint count; } ref; + pymem_block *freeblock; + +each of which consume sizeof(pymem_block *) bytes. So what usedpools[i+i] really +contains is a fudged-up pointer p such that *if* C believes it's a poolp +pointer, then p->nextpool and p->prevpool are both p (meaning that the headed +circular list is empty). + +It's unclear why the usedpools setup is so convoluted. It could be to +minimize the amount of cache required to hold this heavily-referenced table +(which only *needs* the two interpool pointer members of a pool_header). OTOH, +referencing code has to remember to "double the index" and doing so isn't +free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying +on that C doesn't insert any padding anywhere in a pool_header at or before +the prevpool member. +**************************************************************************** */ + +struct _obmalloc_pools { + poolp used[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8]; +}; + +#define usedpools (_PyRuntime.obmalloc.pools.used) + + +/*========================================================================== +Arena management. + +`arenas` is a vector of arena_objects. It contains maxarenas entries, some of +which may not be currently used (== they're arena_objects that aren't +currently associated with an allocated arena). Note that arenas proper are +separately malloc'ed. + +Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5, +we do try to free() arenas, and use some mild heuristic strategies to increase +the likelihood that arenas eventually can be freed. + +unused_arena_objects + + This is a singly-linked list of the arena_objects that are currently not + being used (no arena is associated with them). Objects are taken off the + head of the list in new_arena(), and are pushed on the head of the list in + PyObject_Free() when the arena is empty. Key invariant: an arena_object + is on this list if and only if its .address member is 0. + +usable_arenas + + This is a doubly-linked list of the arena_objects associated with arenas + that have pools available. These pools are either waiting to be reused, + or have not been used before. The list is sorted to have the most- + allocated arenas first (ascending order based on the nfreepools member). + This means that the next allocation will come from a heavily used arena, + which gives the nearly empty arenas a chance to be returned to the system. + In my unscientific tests this dramatically improved the number of arenas + that could be freed. + +Note that an arena_object associated with an arena all of whose pools are +currently in use isn't on either list. + +Changed in Python 3.8: keeping usable_arenas sorted by number of free pools +used to be done by one-at-a-time linear search when an arena's number of +free pools changed. That could, overall, consume time quadratic in the +number of arenas. That didn't really matter when there were only a few +hundred arenas (typical!), but could be a timing disaster when there were +hundreds of thousands. See bpo-37029. + +Now we have a vector of "search fingers" to eliminate the need to search: +nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas +with nfp free pools. This is NULL if and only if there is no arena with +nfp free pools in usable_arenas. +*/ + +/* How many arena_objects do we initially allocate? + * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the + * `arenas` vector. + */ +#define INITIAL_ARENA_OBJECTS 16 + +struct _obmalloc_mgmt { + /* Array of objects used to track chunks of memory (arenas). */ + struct arena_object* arenas; + /* Number of slots currently allocated in the `arenas` vector. */ + uint maxarenas; + + /* The head of the singly-linked, NULL-terminated list of available + * arena_objects. + */ + struct arena_object* unused_arena_objects; + + /* The head of the doubly-linked, NULL-terminated at each end, list of + * arena_objects associated with arenas that have pools available. + */ + struct arena_object* usable_arenas; + + /* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */ + struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1]; + + /* Number of arenas allocated that haven't been free()'d. */ + size_t narenas_currently_allocated; + + /* Total number of times malloc() called to allocate an arena. */ + size_t ntimes_arena_allocated; + /* High water mark (max value ever seen) for narenas_currently_allocated. */ + size_t narenas_highwater; + + Py_ssize_t raw_allocated_blocks; +}; + + +#if WITH_PYMALLOC_RADIX_TREE +/*==========================================================================*/ +/* radix tree for tracking arena usage. If enabled, used to implement + address_in_range(). + + memory address bit allocation for keys + + 64-bit pointers, IGNORE_BITS=0 and 2^20 arena size: + 15 -> MAP_TOP_BITS + 15 -> MAP_MID_BITS + 14 -> MAP_BOT_BITS + 20 -> ideal aligned arena + ---- + 64 + + 64-bit pointers, IGNORE_BITS=16, and 2^20 arena size: + 16 -> IGNORE_BITS + 10 -> MAP_TOP_BITS + 10 -> MAP_MID_BITS + 8 -> MAP_BOT_BITS + 20 -> ideal aligned arena + ---- + 64 + + 32-bit pointers and 2^18 arena size: + 14 -> MAP_BOT_BITS + 18 -> ideal aligned arena + ---- + 32 + +*/ + +#if SIZEOF_VOID_P == 8 + +/* number of bits in a pointer */ +#define POINTER_BITS 64 + +/* High bits of memory addresses that will be ignored when indexing into the + * radix tree. Setting this to zero is the safe default. For most 64-bit + * machines, setting this to 16 would be safe. The kernel would not give + * user-space virtual memory addresses that have significant information in + * those high bits. The main advantage to setting IGNORE_BITS > 0 is that less + * virtual memory will be used for the top and middle radix tree arrays. Those + * arrays are allocated in the BSS segment and so will typically consume real + * memory only if actually accessed. + */ +#define IGNORE_BITS 0 + +/* use the top and mid layers of the radix tree */ +#define USE_INTERIOR_NODES + +#elif SIZEOF_VOID_P == 4 + +#define POINTER_BITS 32 +#define IGNORE_BITS 0 + +#else + + /* Currently this code works for 64-bit or 32-bit pointers only. */ +#error "obmalloc radix tree requires 64-bit or 32-bit pointers." + +#endif /* SIZEOF_VOID_P */ + +/* arena_coverage_t members require this to be true */ +#if ARENA_BITS >= 32 +# error "arena size must be < 2^32" +#endif + +/* the lower bits of the address that are not ignored */ +#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS) + +#ifdef USE_INTERIOR_NODES +/* number of bits used for MAP_TOP and MAP_MID nodes */ +#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3) +#else +#define INTERIOR_BITS 0 +#endif + +#define MAP_TOP_BITS INTERIOR_BITS +#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS) +#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1) + +#define MAP_MID_BITS INTERIOR_BITS +#define MAP_MID_LENGTH (1 << MAP_MID_BITS) +#define MAP_MID_MASK (MAP_MID_LENGTH - 1) + +#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS) +#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS) +#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1) + +#define MAP_BOT_SHIFT ARENA_BITS +#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT) +#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT) + +#define AS_UINT(p) ((uintptr_t)(p)) +#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK) +#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK) +#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK) + +#if IGNORE_BITS > 0 +/* Return the ignored part of the pointer address. Those bits should be same + * for all valid pointers if IGNORE_BITS is set correctly. + */ +#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS) +#else +#define HIGH_BITS(p) 0 +#endif + + +/* This is the leaf of the radix tree. See arena_map_mark_used() for the + * meaning of these members. */ +typedef struct { + int32_t tail_hi; + int32_t tail_lo; +} arena_coverage_t; + +typedef struct arena_map_bot { + /* The members tail_hi and tail_lo are accessed together. So, it + * better to have them as an array of structs, rather than two + * arrays. + */ + arena_coverage_t arenas[MAP_BOT_LENGTH]; +} arena_map_bot_t; + +#ifdef USE_INTERIOR_NODES +typedef struct arena_map_mid { + struct arena_map_bot *ptrs[MAP_MID_LENGTH]; +} arena_map_mid_t; + +typedef struct arena_map_top { + struct arena_map_mid *ptrs[MAP_TOP_LENGTH]; +} arena_map_top_t; +#endif + +struct _obmalloc_usage { + /* The root of radix tree. Note that by initializing like this, the memory + * should be in the BSS. The OS will only memory map pages as the MAP_MID + * nodes get used (OS pages are demand loaded as needed). + */ +#ifdef USE_INTERIOR_NODES + arena_map_top_t arena_map_root; + /* accounting for number of used interior nodes */ + int arena_map_mid_count; + int arena_map_bot_count; +#else + arena_map_bot_t arena_map_root; +#endif +}; + +#endif /* WITH_PYMALLOC_RADIX_TREE */ + + +struct _obmalloc_state { + struct _obmalloc_pools pools; + struct _obmalloc_mgmt mgmt; + struct _obmalloc_usage usage; +}; + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_OBMALLOC_H diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h new file mode 100644 index 00000000000000..021fe8c88c21e9 --- /dev/null +++ b/Include/internal/pycore_obmalloc_init.h @@ -0,0 +1,57 @@ +#ifndef Py_INTERNAL_OBMALLOC_INIT_H +#define Py_INTERNAL_OBMALLOC_INIT_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(pymem_block *))) +#define PT(x) PTA(x), PTA(x) + +#define PT_8(start) \ + PT(start), PT(start+1), PT(start+2), PT(start+3), PT(start+4), PT(start+5), PT(start+6), PT(start+7) + +#if NB_SMALL_SIZE_CLASSES <= 8 +# define _obmalloc_pools_INIT \ + { PT_8(0) } +#elif NB_SMALL_SIZE_CLASSES <= 16 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8) } +#elif NB_SMALL_SIZE_CLASSES <= 24 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16) } +#elif NB_SMALL_SIZE_CLASSES <= 32 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16), PT_8(24) } +#elif NB_SMALL_SIZE_CLASSES <= 40 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32) } +#elif NB_SMALL_SIZE_CLASSES <= 48 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40) } +#elif NB_SMALL_SIZE_CLASSES <= 56 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40), PT_8(48) } +#elif NB_SMALL_SIZE_CLASSES <= 64 +# define _obmalloc_pools_INIT \ + { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40), PT_8(48), PT_8(56) } +#else +# error "NB_SMALL_SIZE_CLASSES should be less than 64" +#endif + +#define _obmalloc_state_INIT \ + { \ + .pools = { \ + .used = _obmalloc_pools_INIT, \ + }, \ + } + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_OBMALLOC_INIT_H diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index 4b0b17c10f82ce..33e845f0bea361 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -13,6 +13,7 @@ extern "C" { #include "pycore_global_objects.h" // struct _Py_global_objects #include "pycore_interp.h" // PyInterpreterState #include "pycore_pymem.h" // struct _pymem_allocators +#include "pycore_obmalloc.h" // struct obmalloc_state #include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_ids struct _getargs_runtime_state { @@ -87,6 +88,7 @@ typedef struct pyruntimestate { _Py_atomic_address _finalizing; struct _pymem_allocators allocators; + struct _obmalloc_state obmalloc; struct pyinterpreters { PyThread_type_lock mutex; diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 4b812987c13bf5..ee3a60403eb65a 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -10,6 +10,7 @@ extern "C" { #include "pycore_object.h" #include "pycore_pymem_init.h" +#include "pycore_obmalloc_init.h" /* The static initializers defined here should only be used @@ -29,6 +30,7 @@ extern "C" { _pymem_allocators_debug_INIT, \ _pymem_allocators_obj_arena_INIT, \ }, \ + .obmalloc = _obmalloc_state_INIT, \ .interpreters = { \ /* This prevents interpreters from getting created \ until _PyInterpreterState_Enable() is called. */ \ diff --git a/Makefile.pre.in b/Makefile.pre.in index 192411e892ae9b..a2930a4179013f 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1651,6 +1651,8 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_moduleobject.h \ $(srcdir)/Include/internal/pycore_namespace.h \ $(srcdir)/Include/internal/pycore_object.h \ + $(srcdir)/Include/internal/pycore_obmalloc.h \ + $(srcdir)/Include/internal/pycore_obmalloc_init.h \ $(srcdir)/Include/internal/pycore_pathconfig.h \ $(srcdir)/Include/internal/pycore_pyarena.h \ $(srcdir)/Include/internal/pycore_pyerrors.h \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index c7309fc05a5324..a03199e90f64b1 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -588,516 +588,15 @@ static int running_on_valgrind = -1; #endif -/* An object allocator for Python. - - Here is an introduction to the layers of the Python memory architecture, - showing where the object allocator is actually used (layer +2), It is - called for every object allocation and deallocation (PyObject_New/Del), - unless the object-specific allocators implement a proprietary allocation - scheme (ex.: ints use a simple free list). This is also the place where - the cyclic garbage collector operates selectively on container objects. - - - Object-specific allocators - _____ ______ ______ ________ - [ int ] [ dict ] [ list ] ... [ string ] Python core | -+3 | <----- Object-specific memory -----> | <-- Non-object memory --> | - _______________________________ | | - [ Python's object allocator ] | | -+2 | ####### Object memory ####### | <------ Internal buffers ------> | - ______________________________________________________________ | - [ Python's raw memory allocator (PyMem_ API) ] | -+1 | <----- Python memory (under PyMem manager's control) ------> | | - __________________________________________________________________ - [ Underlying general-purpose allocator (ex: C library malloc) ] - 0 | <------ Virtual memory allocated for the python process -------> | - - ========================================================================= - _______________________________________________________________________ - [ OS-specific Virtual Memory Manager (VMM) ] --1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | - __________________________________ __________________________________ - [ ] [ ] --2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | - -*/ -/*==========================================================================*/ - -/* A fast, special-purpose memory allocator for small blocks, to be used - on top of a general-purpose malloc -- heavily based on previous art. */ - -/* Vladimir Marangozov -- August 2000 */ - -/* - * "Memory management is where the rubber meets the road -- if we do the wrong - * thing at any level, the results will not be good. And if we don't make the - * levels work well together, we are in serious trouble." (1) - * - * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, - * "Dynamic Storage Allocation: A Survey and Critical Review", - * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. - */ - -/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ - -/*==========================================================================*/ - -/* - * Allocation strategy abstract: - * - * For small requests, the allocator sub-allocates blocks of memory. - * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the - * system's allocator. - * - * Small requests are grouped in size classes spaced 8 bytes apart, due - * to the required valid alignment of the returned address. Requests of - * a particular size are serviced from memory pools of 4K (one VMM page). - * Pools are fragmented on demand and contain free lists of blocks of one - * particular size class. In other words, there is a fixed-size allocator - * for each size class. Free pools are shared by the different allocators - * thus minimizing the space reserved for a particular size class. - * - * This allocation strategy is a variant of what is known as "simple - * segregated storage based on array of free lists". The main drawback of - * simple segregated storage is that we might end up with lot of reserved - * memory for the different free lists, which degenerate in time. To avoid - * this, we partition each free list in pools and we share dynamically the - * reserved space between all free lists. This technique is quite efficient - * for memory intensive programs which allocate mainly small-sized blocks. - * - * For small requests we have the following table: - * - * Request in bytes Size of allocated block Size class idx - * ---------------------------------------------------------------- - * 1-8 8 0 - * 9-16 16 1 - * 17-24 24 2 - * 25-32 32 3 - * 33-40 40 4 - * 41-48 48 5 - * 49-56 56 6 - * 57-64 64 7 - * 65-72 72 8 - * ... ... ... - * 497-504 504 62 - * 505-512 512 63 - * - * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying - * allocator. - */ - -/*==========================================================================*/ - -/* - * -- Main tunable settings section -- - */ - -/* - * Alignment of addresses returned to the user. 8-bytes alignment works - * on most current architectures (with 32-bit or 64-bit address buses). - * The alignment value is also used for grouping small requests in size - * classes spaced ALIGNMENT bytes apart. - * - * You shouldn't change this unless you know what you are doing. - */ - -#if SIZEOF_VOID_P > 4 -#define ALIGNMENT 16 /* must be 2^N */ -#define ALIGNMENT_SHIFT 4 -#else -#define ALIGNMENT 8 /* must be 2^N */ -#define ALIGNMENT_SHIFT 3 -#endif - -/* Return the number of bytes in size class I, as a uint. */ -#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) - -/* - * Max size threshold below which malloc requests are considered to be - * small enough in order to use preallocated memory pools. You can tune - * this value according to your application behaviour and memory needs. - * - * Note: a size threshold of 512 guarantees that newly created dictionaries - * will be allocated from preallocated memory pools on 64-bit. - * - * The following invariants must hold: - * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512 - * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT - * - * Although not required, for better performance and space efficiency, - * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. - */ -#define SMALL_REQUEST_THRESHOLD 512 -#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) - -/* - * The system's VMM page size can be obtained on most unices with a - * getpagesize() call or deduced from various header files. To make - * things simpler, we assume that it is 4K, which is OK for most systems. - * It is probably better if this is the native page size, but it doesn't - * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page - * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation - * violation fault. 4K is apparently OK for all the platforms that python - * currently targets. - */ -#define SYSTEM_PAGE_SIZE (4 * 1024) - -/* - * Maximum amount of memory managed by the allocator for small requests. - */ -#ifdef WITH_MEMORY_LIMITS -#ifndef SMALL_MEMORY_LIMIT -#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ -#endif -#endif - -#if !defined(WITH_PYMALLOC_RADIX_TREE) -/* Use radix-tree to track arena memory regions, for address_in_range(). - * Enable by default since it allows larger pool sizes. Can be disabled - * using -DWITH_PYMALLOC_RADIX_TREE=0 */ -#define WITH_PYMALLOC_RADIX_TREE 1 -#endif - -#if SIZEOF_VOID_P > 4 -/* on 64-bit platforms use larger pools and arenas if we can */ -#define USE_LARGE_ARENAS -#if WITH_PYMALLOC_RADIX_TREE -/* large pools only supported if radix-tree is enabled */ -#define USE_LARGE_POOLS -#endif -#endif - -/* - * The allocator sub-allocates blocks of memory (called arenas) aligned - * on a page boundary. This is a reserved virtual address space for the - * current process (obtained through a malloc()/mmap() call). In no way this - * means that the memory arenas will be used entirely. A malloc() is - * usually an address range reservation for bytes, unless all pages within - * this space are referenced subsequently. So malloc'ing big blocks and not - * using them does not mean "wasting memory". It's an addressable range - * wastage... - * - * Arenas are allocated with mmap() on systems supporting anonymous memory - * mappings to reduce heap fragmentation. - */ -#ifdef USE_LARGE_ARENAS -#define ARENA_BITS 20 /* 1 MiB */ -#else -#define ARENA_BITS 18 /* 256 KiB */ -#endif -#define ARENA_SIZE (1 << ARENA_BITS) -#define ARENA_SIZE_MASK (ARENA_SIZE - 1) - -#ifdef WITH_MEMORY_LIMITS -#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) -#endif - -/* - * Size of the pools used for small blocks. Must be a power of 2. - */ -#ifdef USE_LARGE_POOLS -#define POOL_BITS 14 /* 16 KiB */ -#else -#define POOL_BITS 12 /* 4 KiB */ -#endif -#define POOL_SIZE (1 << POOL_BITS) -#define POOL_SIZE_MASK (POOL_SIZE - 1) - -#if !WITH_PYMALLOC_RADIX_TREE -#if POOL_SIZE != SYSTEM_PAGE_SIZE -# error "pool size must be equal to system page size" -#endif -#endif - -#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE) -#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE -# error "arena size not an exact multiple of pool size" -#endif - -/* - * -- End of tunable settings section -- - */ - -/*==========================================================================*/ - -/* When you say memory, my mind reasons in terms of (pointers to) blocks */ -typedef uint8_t block; - -/* Pool for small blocks. */ -struct pool_header { - union { block *_padding; - uint count; } ref; /* number of allocated blocks */ - block *freeblock; /* pool's free list head */ - struct pool_header *nextpool; /* next pool of this size class */ - struct pool_header *prevpool; /* previous pool "" */ - uint arenaindex; /* index into arenas of base adr */ - uint szidx; /* block size class index */ - uint nextoffset; /* bytes to virgin block */ - uint maxnextoffset; /* largest valid nextoffset */ -}; - -typedef struct pool_header *poolp; - -/* Record keeping for arenas. */ -struct arena_object { - /* The address of the arena, as returned by malloc. Note that 0 - * will never be returned by a successful malloc, and is used - * here to mark an arena_object that doesn't correspond to an - * allocated arena. - */ - uintptr_t address; - - /* Pool-aligned pointer to the next pool to be carved off. */ - block* pool_address; - - /* The number of available pools in the arena: free pools + never- - * allocated pools. - */ - uint nfreepools; - - /* The total number of pools in the arena, whether or not available. */ - uint ntotalpools; - - /* Singly-linked list of available pools. */ - struct pool_header* freepools; - - /* Whenever this arena_object is not associated with an allocated - * arena, the nextarena member is used to link all unassociated - * arena_objects in the singly-linked `unused_arena_objects` list. - * The prevarena member is unused in this case. - * - * When this arena_object is associated with an allocated arena - * with at least one available pool, both members are used in the - * doubly-linked `usable_arenas` list, which is maintained in - * increasing order of `nfreepools` values. - * - * Else this arena_object is associated with an allocated arena - * all of whose pools are in use. `nextarena` and `prevarena` - * are both meaningless in this case. - */ - struct arena_object* nextarena; - struct arena_object* prevarena; -}; - -#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT) - -#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ - -/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ -#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) - -/* Return total number of blocks in pool of size index I, as a uint. */ -#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) - -/*==========================================================================*/ - -/* - * Pool table -- headed, circular, doubly-linked lists of partially used pools. - -This is involved. For an index i, usedpools[i+i] is the header for a list of -all partially used pools holding small blocks with "size class idx" i. So -usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size -16, and so on: index 2*i <-> blocks of size (i+1)<freeblock points to -the start of a singly-linked list of free blocks within the pool. When a -block is freed, it's inserted at the front of its pool's freeblock list. Note -that the available blocks in a pool are *not* linked all together when a pool -is initialized. Instead only "the first two" (lowest addresses) blocks are -set up, returning the first such block, and setting pool->freeblock to a -one-block list holding the second such block. This is consistent with that -pymalloc strives at all levels (arena, pool, and block) never to touch a piece -of memory until it's actually needed. - -So long as a pool is in the used state, we're certain there *is* a block -available for allocating, and pool->freeblock is not NULL. If pool->freeblock -points to the end of the free list before we've carved the entire pool into -blocks, that means we simply haven't yet gotten to one of the higher-address -blocks. The offset from the pool_header to the start of "the next" virgin -block is stored in the pool_header nextoffset member, and the largest value -of nextoffset that makes sense is stored in the maxnextoffset member when a -pool is initialized. All the blocks in a pool have been passed out at least -once when and only when nextoffset > maxnextoffset. - - -Major obscurity: While the usedpools vector is declared to have poolp -entries, it doesn't really. It really contains two pointers per (conceptual) -poolp entry, the nextpool and prevpool members of a pool_header. The -excruciating initialization code below fools C so that - - usedpool[i+i] - -"acts like" a genuine poolp, but only so long as you only reference its -nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is -compensating for that a pool_header's nextpool and prevpool members -immediately follow a pool_header's first two members: - - union { block *_padding; - uint count; } ref; - block *freeblock; - -each of which consume sizeof(block *) bytes. So what usedpools[i+i] really -contains is a fudged-up pointer p such that *if* C believes it's a poolp -pointer, then p->nextpool and p->prevpool are both p (meaning that the headed -circular list is empty). - -It's unclear why the usedpools setup is so convoluted. It could be to -minimize the amount of cache required to hold this heavily-referenced table -(which only *needs* the two interpool pointer members of a pool_header). OTOH, -referencing code has to remember to "double the index" and doing so isn't -free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying -on that C doesn't insert any padding anywhere in a pool_header at or before -the prevpool member. -**************************************************************************** */ - -#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *))) -#define PT(x) PTA(x), PTA(x) - -static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { - PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) -#if NB_SMALL_SIZE_CLASSES > 8 - , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) -#if NB_SMALL_SIZE_CLASSES > 16 - , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) -#if NB_SMALL_SIZE_CLASSES > 24 - , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) -#if NB_SMALL_SIZE_CLASSES > 32 - , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) -#if NB_SMALL_SIZE_CLASSES > 40 - , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) -#if NB_SMALL_SIZE_CLASSES > 48 - , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) -#if NB_SMALL_SIZE_CLASSES > 56 - , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) -#if NB_SMALL_SIZE_CLASSES > 64 -#error "NB_SMALL_SIZE_CLASSES should be less than 64" -#endif /* NB_SMALL_SIZE_CLASSES > 64 */ -#endif /* NB_SMALL_SIZE_CLASSES > 56 */ -#endif /* NB_SMALL_SIZE_CLASSES > 48 */ -#endif /* NB_SMALL_SIZE_CLASSES > 40 */ -#endif /* NB_SMALL_SIZE_CLASSES > 32 */ -#endif /* NB_SMALL_SIZE_CLASSES > 24 */ -#endif /* NB_SMALL_SIZE_CLASSES > 16 */ -#endif /* NB_SMALL_SIZE_CLASSES > 8 */ -}; - -/*========================================================================== -Arena management. - -`arenas` is a vector of arena_objects. It contains maxarenas entries, some of -which may not be currently used (== they're arena_objects that aren't -currently associated with an allocated arena). Note that arenas proper are -separately malloc'ed. - -Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5, -we do try to free() arenas, and use some mild heuristic strategies to increase -the likelihood that arenas eventually can be freed. - -unused_arena_objects - - This is a singly-linked list of the arena_objects that are currently not - being used (no arena is associated with them). Objects are taken off the - head of the list in new_arena(), and are pushed on the head of the list in - PyObject_Free() when the arena is empty. Key invariant: an arena_object - is on this list if and only if its .address member is 0. - -usable_arenas - - This is a doubly-linked list of the arena_objects associated with arenas - that have pools available. These pools are either waiting to be reused, - or have not been used before. The list is sorted to have the most- - allocated arenas first (ascending order based on the nfreepools member). - This means that the next allocation will come from a heavily used arena, - which gives the nearly empty arenas a chance to be returned to the system. - In my unscientific tests this dramatically improved the number of arenas - that could be freed. - -Note that an arena_object associated with an arena all of whose pools are -currently in use isn't on either list. - -Changed in Python 3.8: keeping usable_arenas sorted by number of free pools -used to be done by one-at-a-time linear search when an arena's number of -free pools changed. That could, overall, consume time quadratic in the -number of arenas. That didn't really matter when there were only a few -hundred arenas (typical!), but could be a timing disaster when there were -hundreds of thousands. See bpo-37029. - -Now we have a vector of "search fingers" to eliminate the need to search: -nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas -with nfp free pools. This is NULL if and only if there is no arena with -nfp free pools in usable_arenas. -*/ - -/* Array of objects used to track chunks of memory (arenas). */ -static struct arena_object* arenas = NULL; -/* Number of slots currently allocated in the `arenas` vector. */ -static uint maxarenas = 0; - -/* The head of the singly-linked, NULL-terminated list of available - * arena_objects. - */ -static struct arena_object* unused_arena_objects = NULL; - -/* The head of the doubly-linked, NULL-terminated at each end, list of - * arena_objects associated with arenas that have pools available. - */ -static struct arena_object* usable_arenas = NULL; - -/* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */ -static struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1] = { NULL }; - -/* How many arena_objects do we initially allocate? - * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the - * `arenas` vector. - */ -#define INITIAL_ARENA_OBJECTS 16 - -/* Number of arenas allocated that haven't been free()'d. */ -static size_t narenas_currently_allocated = 0; - -/* Total number of times malloc() called to allocate an arena. */ -static size_t ntimes_arena_allocated = 0; -/* High water mark (max value ever seen) for narenas_currently_allocated. */ -static size_t narenas_highwater = 0; - -static Py_ssize_t raw_allocated_blocks; +#define allarenas (_PyRuntime.obmalloc.mgmt.arenas) +#define maxarenas (_PyRuntime.obmalloc.mgmt.maxarenas) +#define unused_arena_objects (_PyRuntime.obmalloc.mgmt.unused_arena_objects) +#define usable_arenas (_PyRuntime.obmalloc.mgmt.usable_arenas) +#define nfp2lasta (_PyRuntime.obmalloc.mgmt.nfp2lasta) +#define narenas_currently_allocated (_PyRuntime.obmalloc.mgmt.narenas_currently_allocated) +#define ntimes_arena_allocated (_PyRuntime.obmalloc.mgmt.ntimes_arena_allocated) +#define narenas_highwater (_PyRuntime.obmalloc.mgmt.narenas_highwater) +#define raw_allocated_blocks (_PyRuntime.obmalloc.mgmt.raw_allocated_blocks) Py_ssize_t _Py_GetAllocatedBlocks(void) @@ -1106,15 +605,15 @@ _Py_GetAllocatedBlocks(void) /* add up allocated blocks for used pools */ for (uint i = 0; i < maxarenas; ++i) { /* Skip arenas which are not allocated. */ - if (arenas[i].address == 0) { + if (allarenas[i].address == 0) { continue; } - uintptr_t base = (uintptr_t)_Py_ALIGN_UP(arenas[i].address, POOL_SIZE); + uintptr_t base = (uintptr_t)_Py_ALIGN_UP(allarenas[i].address, POOL_SIZE); /* visit every pool in the arena */ - assert(base <= (uintptr_t) arenas[i].pool_address); - for (; base < (uintptr_t) arenas[i].pool_address; base += POOL_SIZE) { + assert(base <= (uintptr_t) allarenas[i].pool_address); + for (; base < (uintptr_t) allarenas[i].pool_address; base += POOL_SIZE) { poolp p = (poolp)base; n += p->ref.count; } @@ -1124,155 +623,18 @@ _Py_GetAllocatedBlocks(void) #if WITH_PYMALLOC_RADIX_TREE /*==========================================================================*/ -/* radix tree for tracking arena usage. If enabled, used to implement - address_in_range(). - - memory address bit allocation for keys - - 64-bit pointers, IGNORE_BITS=0 and 2^20 arena size: - 15 -> MAP_TOP_BITS - 15 -> MAP_MID_BITS - 14 -> MAP_BOT_BITS - 20 -> ideal aligned arena - ---- - 64 - - 64-bit pointers, IGNORE_BITS=16, and 2^20 arena size: - 16 -> IGNORE_BITS - 10 -> MAP_TOP_BITS - 10 -> MAP_MID_BITS - 8 -> MAP_BOT_BITS - 20 -> ideal aligned arena - ---- - 64 - - 32-bit pointers and 2^18 arena size: - 14 -> MAP_BOT_BITS - 18 -> ideal aligned arena - ---- - 32 - -*/ - -#if SIZEOF_VOID_P == 8 - -/* number of bits in a pointer */ -#define POINTER_BITS 64 - -/* High bits of memory addresses that will be ignored when indexing into the - * radix tree. Setting this to zero is the safe default. For most 64-bit - * machines, setting this to 16 would be safe. The kernel would not give - * user-space virtual memory addresses that have significant information in - * those high bits. The main advantage to setting IGNORE_BITS > 0 is that less - * virtual memory will be used for the top and middle radix tree arrays. Those - * arrays are allocated in the BSS segment and so will typically consume real - * memory only if actually accessed. - */ -#define IGNORE_BITS 0 - -/* use the top and mid layers of the radix tree */ -#define USE_INTERIOR_NODES - -#elif SIZEOF_VOID_P == 4 - -#define POINTER_BITS 32 -#define IGNORE_BITS 0 - -#else - - /* Currently this code works for 64-bit or 32-bit pointers only. */ -#error "obmalloc radix tree requires 64-bit or 32-bit pointers." - -#endif /* SIZEOF_VOID_P */ - -/* arena_coverage_t members require this to be true */ -#if ARENA_BITS >= 32 -# error "arena size must be < 2^32" -#endif - -/* the lower bits of the address that are not ignored */ -#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS) - -#ifdef USE_INTERIOR_NODES -/* number of bits used for MAP_TOP and MAP_MID nodes */ -#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3) -#else -#define INTERIOR_BITS 0 -#endif - -#define MAP_TOP_BITS INTERIOR_BITS -#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS) -#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1) - -#define MAP_MID_BITS INTERIOR_BITS -#define MAP_MID_LENGTH (1 << MAP_MID_BITS) -#define MAP_MID_MASK (MAP_MID_LENGTH - 1) - -#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS) -#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS) -#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1) - -#define MAP_BOT_SHIFT ARENA_BITS -#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT) -#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT) - -#define AS_UINT(p) ((uintptr_t)(p)) -#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK) -#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK) -#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK) - -#if IGNORE_BITS > 0 -/* Return the ignored part of the pointer address. Those bits should be same - * for all valid pointers if IGNORE_BITS is set correctly. - */ -#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS) -#else -#define HIGH_BITS(p) 0 -#endif - - -/* This is the leaf of the radix tree. See arena_map_mark_used() for the - * meaning of these members. */ -typedef struct { - int32_t tail_hi; - int32_t tail_lo; -} arena_coverage_t; - -typedef struct arena_map_bot { - /* The members tail_hi and tail_lo are accessed together. So, it - * better to have them as an array of structs, rather than two - * arrays. - */ - arena_coverage_t arenas[MAP_BOT_LENGTH]; -} arena_map_bot_t; +/* radix tree for tracking arena usage. */ +#define arena_map_root (_PyRuntime.obmalloc.usage.arena_map_root) #ifdef USE_INTERIOR_NODES -typedef struct arena_map_mid { - struct arena_map_bot *ptrs[MAP_MID_LENGTH]; -} arena_map_mid_t; - -typedef struct arena_map_top { - struct arena_map_mid *ptrs[MAP_TOP_LENGTH]; -} arena_map_top_t; -#endif - -/* The root of radix tree. Note that by initializing like this, the memory - * should be in the BSS. The OS will only memory map pages as the MAP_MID - * nodes get used (OS pages are demand loaded as needed). - */ -#ifdef USE_INTERIOR_NODES -static arena_map_top_t arena_map_root; -/* accounting for number of used interior nodes */ -static int arena_map_mid_count; -static int arena_map_bot_count; -#else -static arena_map_bot_t arena_map_root; +#define arena_map_mid_count (_PyRuntime.obmalloc.usage.arena_map_mid_count) +#define arena_map_bot_count (_PyRuntime.obmalloc.usage.arena_map_bot_count) #endif /* Return a pointer to a bottom tree node, return NULL if it doesn't exist or * it cannot be created */ static Py_ALWAYS_INLINE arena_map_bot_t * -arena_map_get(block *p, int create) +arena_map_get(pymem_block *p, int create) { #ifdef USE_INTERIOR_NODES /* sanity check that IGNORE_BITS is correct */ @@ -1337,12 +699,12 @@ arena_map_mark_used(uintptr_t arena_base, int is_used) { /* sanity check that IGNORE_BITS is correct */ assert(HIGH_BITS(arena_base) == HIGH_BITS(&arena_map_root)); - arena_map_bot_t *n_hi = arena_map_get((block *)arena_base, is_used); + arena_map_bot_t *n_hi = arena_map_get((pymem_block *)arena_base, is_used); if (n_hi == NULL) { assert(is_used); /* otherwise node should already exist */ return 0; /* failed to allocate space for node */ } - int i3 = MAP_BOT_INDEX((block *)arena_base); + int i3 = MAP_BOT_INDEX((pymem_block *)arena_base); int32_t tail = (int32_t)(arena_base & ARENA_SIZE_MASK); if (tail == 0) { /* is ideal arena address */ @@ -1362,7 +724,7 @@ arena_map_mark_used(uintptr_t arena_base, int is_used) * must overflow to 0. However, that would mean arena_base was * "ideal" and we should not be in this case. */ assert(arena_base < arena_base_next); - arena_map_bot_t *n_lo = arena_map_get((block *)arena_base_next, is_used); + arena_map_bot_t *n_lo = arena_map_get((pymem_block *)arena_base_next, is_used); if (n_lo == NULL) { assert(is_used); /* otherwise should already exist */ n_hi->arenas[i3].tail_hi = 0; @@ -1377,7 +739,7 @@ arena_map_mark_used(uintptr_t arena_base, int is_used) /* Return true if 'p' is a pointer inside an obmalloc arena. * _PyObject_Free() calls this so it needs to be very fast. */ static int -arena_map_is_used(block *p) +arena_map_is_used(pymem_block *p) { arena_map_bot_t *n = arena_map_get(p, 0); if (n == NULL) { @@ -1429,14 +791,14 @@ new_arena(void) if (numarenas <= maxarenas) return NULL; /* overflow */ #if SIZEOF_SIZE_T <= SIZEOF_INT - if (numarenas > SIZE_MAX / sizeof(*arenas)) + if (numarenas > SIZE_MAX / sizeof(*allarenas)) return NULL; /* overflow */ #endif - nbytes = numarenas * sizeof(*arenas); - arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes); + nbytes = numarenas * sizeof(*allarenas); + arenaobj = (struct arena_object *)PyMem_RawRealloc(allarenas, nbytes); if (arenaobj == NULL) return NULL; - arenas = arenaobj; + allarenas = arenaobj; /* We might need to fix pointers that were copied. However, * new_arena only gets called when all the pages in the @@ -1449,13 +811,13 @@ new_arena(void) /* Put the new arenas on the unused_arena_objects list. */ for (i = maxarenas; i < numarenas; ++i) { - arenas[i].address = 0; /* mark as unassociated */ - arenas[i].nextarena = i < numarenas - 1 ? - &arenas[i+1] : NULL; + allarenas[i].address = 0; /* mark as unassociated */ + allarenas[i].nextarena = i < numarenas - 1 ? + &allarenas[i+1] : NULL; } /* Update globals. */ - unused_arena_objects = &arenas[maxarenas]; + unused_arena_objects = &allarenas[maxarenas]; maxarenas = numarenas; } @@ -1491,7 +853,7 @@ new_arena(void) arenaobj->freepools = NULL; /* pool_address <- first pool-aligned address in the arena nfreepools <- number of whole pools that fit after alignment */ - arenaobj->pool_address = (block*)arenaobj->address; + arenaobj->pool_address = (pymem_block*)arenaobj->address; arenaobj->nfreepools = MAX_POOLS_IN_ARENA; excess = (uint)(arenaobj->address & POOL_SIZE_MASK); if (excess != 0) { @@ -1602,8 +964,8 @@ address_in_range(void *p, poolp pool) // only once. uint arenaindex = *((volatile uint *)&pool->arenaindex); return arenaindex < maxarenas && - (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE && - arenas[arenaindex].address != 0; + (uintptr_t)p - allarenas[arenaindex].address < ARENA_SIZE && + allarenas[arenaindex].address != 0; } #endif /* !WITH_PYMALLOC_RADIX_TREE */ @@ -1617,9 +979,9 @@ pymalloc_pool_extend(poolp pool, uint size) { if (UNLIKELY(pool->nextoffset <= pool->maxnextoffset)) { /* There is room for another block. */ - pool->freeblock = (block*)pool + pool->nextoffset; + pool->freeblock = (pymem_block*)pool + pool->nextoffset; pool->nextoffset += INDEX2SIZE(size); - *(block **)(pool->freeblock) = NULL; + *(pymem_block **)(pool->freeblock) = NULL; return; } @@ -1699,7 +1061,7 @@ allocate_from_new_pool(uint size) */ assert(usable_arenas->freepools != NULL || usable_arenas->pool_address <= - (block*)usable_arenas->address + + (pymem_block*)usable_arenas->address + ARENA_SIZE - POOL_SIZE); } } @@ -1708,10 +1070,10 @@ allocate_from_new_pool(uint size) assert(usable_arenas->nfreepools > 0); assert(usable_arenas->freepools == NULL); pool = (poolp)usable_arenas->pool_address; - assert((block*)pool <= (block*)usable_arenas->address + + assert((pymem_block*)pool <= (pymem_block*)usable_arenas->address + ARENA_SIZE - POOL_SIZE); - pool->arenaindex = (uint)(usable_arenas - arenas); - assert(&arenas[pool->arenaindex] == usable_arenas); + pool->arenaindex = (uint)(usable_arenas - allarenas); + assert(&allarenas[pool->arenaindex] == usable_arenas); pool->szidx = DUMMY_SIZE_IDX; usable_arenas->pool_address += POOL_SIZE; --usable_arenas->nfreepools; @@ -1730,7 +1092,7 @@ allocate_from_new_pool(uint size) } /* Frontlink to used pools. */ - block *bp; + pymem_block *bp; poolp next = usedpools[size + size]; /* == prev */ pool->nextpool = next; pool->prevpool = next; @@ -1744,7 +1106,7 @@ allocate_from_new_pool(uint size) */ bp = pool->freeblock; assert(bp != NULL); - pool->freeblock = *(block **)bp; + pool->freeblock = *(pymem_block **)bp; return bp; } /* @@ -1754,11 +1116,11 @@ allocate_from_new_pool(uint size) */ pool->szidx = size; size = INDEX2SIZE(size); - bp = (block *)pool + POOL_OVERHEAD; + bp = (pymem_block *)pool + POOL_OVERHEAD; pool->nextoffset = POOL_OVERHEAD + (size << 1); pool->maxnextoffset = POOL_SIZE - size; pool->freeblock = bp + size; - *(block **)(pool->freeblock) = NULL; + *(pymem_block **)(pool->freeblock) = NULL; return bp; } @@ -1791,7 +1153,7 @@ pymalloc_alloc(void *Py_UNUSED(ctx), size_t nbytes) uint size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; poolp pool = usedpools[size + size]; - block *bp; + pymem_block *bp; if (LIKELY(pool != pool->nextpool)) { /* @@ -1802,7 +1164,7 @@ pymalloc_alloc(void *Py_UNUSED(ctx), size_t nbytes) bp = pool->freeblock; assert(bp != NULL); - if (UNLIKELY((pool->freeblock = *(block **)bp) == NULL)) { + if (UNLIKELY((pool->freeblock = *(pymem_block **)bp) == NULL)) { // Reached the end of the free list, try to extend it. pymalloc_pool_extend(pool, size); } @@ -1881,7 +1243,7 @@ insert_to_freepool(poolp pool) /* Link the pool to freepools. This is a singly-linked * list, and pool->prevpool isn't used there. */ - struct arena_object *ao = &arenas[pool->arenaindex]; + struct arena_object *ao = &allarenas[pool->arenaindex]; pool->nextpool = ao->freepools; ao->freepools = pool; uint nf = ao->nfreepools; @@ -2064,9 +1426,9 @@ pymalloc_free(void *Py_UNUSED(ctx), void *p) * list in any case). */ assert(pool->ref.count > 0); /* else it was empty */ - block *lastfree = pool->freeblock; - *(block **)p = lastfree; - pool->freeblock = (block *)p; + pymem_block *lastfree = pool->freeblock; + *(pymem_block **)p = lastfree; + pool->freeblock = (pymem_block *)p; pool->ref.count--; if (UNLIKELY(lastfree == NULL)) { @@ -2825,14 +2187,14 @@ _PyObject_DebugMallocStats(FILE *out) * will be living in full pools -- would be a shame to miss them. */ for (i = 0; i < maxarenas; ++i) { - uintptr_t base = arenas[i].address; + uintptr_t base = allarenas[i].address; /* Skip arenas which are not allocated. */ - if (arenas[i].address == (uintptr_t)NULL) + if (allarenas[i].address == (uintptr_t)NULL) continue; narenas += 1; - numfreepools += arenas[i].nfreepools; + numfreepools += allarenas[i].nfreepools; /* round up to pool alignment */ if (base & (uintptr_t)POOL_SIZE_MASK) { @@ -2842,8 +2204,8 @@ _PyObject_DebugMallocStats(FILE *out) } /* visit every pool in the arena */ - assert(base <= (uintptr_t) arenas[i].pool_address); - for (; base < (uintptr_t) arenas[i].pool_address; base += POOL_SIZE) { + assert(base <= (uintptr_t) allarenas[i].pool_address); + for (; base < (uintptr_t) allarenas[i].pool_address; base += POOL_SIZE) { poolp p = (poolp)base; const uint sz = p->szidx; uint freeblocks; @@ -2851,7 +2213,7 @@ _PyObject_DebugMallocStats(FILE *out) if (p->ref.count == 0) { /* currently unused */ #ifdef Py_DEBUG - assert(pool_is_in_list(p, arenas[i].freepools)); + assert(pool_is_in_list(p, allarenas[i].freepools)); #endif continue; } diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 3ebf1947165873..062ab91ebac020 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -232,6 +232,8 @@ + + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 2f01b1482b3b1c..883634425e5e17 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -600,6 +600,12 @@ Include\internal + + Include\internal + + + Include\internal + Include\internal diff --git a/Tools/c-analyzer/cpython/globals-to-fix.tsv b/Tools/c-analyzer/cpython/globals-to-fix.tsv index 964ad927e06cfe..7318326ec519b3 100644 --- a/Tools/c-analyzer/cpython/globals-to-fix.tsv +++ b/Tools/c-analyzer/cpython/globals-to-fix.tsv @@ -423,19 +423,6 @@ Python/pathconfig.c - _Py_path_config - # object allocator Objects/obmalloc.c - _Py_tracemalloc_config - -Objects/obmalloc.c - arena_map_bot_count - -Objects/obmalloc.c - arena_map_mid_count - -Objects/obmalloc.c - arena_map_root - -Objects/obmalloc.c - arenas - -Objects/obmalloc.c - maxarenas - -Objects/obmalloc.c - narenas_currently_allocated - -Objects/obmalloc.c - narenas_highwater - -Objects/obmalloc.c - nfp2lasta - -Objects/obmalloc.c - ntimes_arena_allocated - -Objects/obmalloc.c - raw_allocated_blocks - -Objects/obmalloc.c - unused_arena_objects - -Objects/obmalloc.c - usable_arenas - -Objects/obmalloc.c - usedpools - Objects/obmalloc.c new_arena debug_stats - # pre-allocated memory From 83d397fdc8655f5d5b5a102985a74f2aad80fc15 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 7 Oct 2022 12:31:11 -0600 Subject: [PATCH 08/19] Pass the context to initializers explicitly. --- Include/internal/pycore_obmalloc.h | 2 - Include/internal/pycore_obmalloc_init.h | 52 ++++++++++++++----------- Include/internal/pycore_runtime_init.h | 4 +- Objects/obmalloc.c | 2 + Python/pylifecycle.c | 2 +- Python/pystate.c | 7 +++- 6 files changed, 40 insertions(+), 29 deletions(-) diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index 94e6abf3df5a07..0315bddbf88965 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -411,8 +411,6 @@ struct _obmalloc_pools { poolp used[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8]; }; -#define usedpools (_PyRuntime.obmalloc.pools.used) - /*========================================================================== Arena management. diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h index 021fe8c88c21e9..83e72d0c3e08b7 100644 --- a/Include/internal/pycore_obmalloc_init.h +++ b/Include/internal/pycore_obmalloc_init.h @@ -9,44 +9,52 @@ extern "C" { #endif -#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(pymem_block *))) -#define PT(x) PTA(x), PTA(x) +#define PTA(pools, x) \ + ((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *))) +#define PT(p, x) PTA(p, x), PTA(p, x) -#define PT_8(start) \ - PT(start), PT(start+1), PT(start+2), PT(start+3), PT(start+4), PT(start+5), PT(start+6), PT(start+7) +#define PT_8(p, start) \ + PT(p, start), \ + PT(p, start+1), \ + PT(p, start+2), \ + PT(p, start+3), \ + PT(p, start+4), \ + PT(p, start+5), \ + PT(p, start+6), \ + PT(p, start+7) #if NB_SMALL_SIZE_CLASSES <= 8 -# define _obmalloc_pools_INIT \ - { PT_8(0) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0) } #elif NB_SMALL_SIZE_CLASSES <= 16 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8) } #elif NB_SMALL_SIZE_CLASSES <= 24 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16) } #elif NB_SMALL_SIZE_CLASSES <= 32 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16), PT_8(24) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24) } #elif NB_SMALL_SIZE_CLASSES <= 40 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32) } #elif NB_SMALL_SIZE_CLASSES <= 48 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40) } #elif NB_SMALL_SIZE_CLASSES <= 56 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40), PT_8(48) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48) } #elif NB_SMALL_SIZE_CLASSES <= 64 -# define _obmalloc_pools_INIT \ - { PT_8(0), PT_8(8), PT_8(16), PT_8(24), PT_8(32), PT_8(40), PT_8(48), PT_8(56) } +# define _obmalloc_pools_INIT(p) \ + { PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48), PT_8(p, 56) } #else # error "NB_SMALL_SIZE_CLASSES should be less than 64" #endif -#define _obmalloc_state_INIT \ +#define _obmalloc_state_INIT(obmalloc) \ { \ .pools = { \ - .used = _obmalloc_pools_INIT, \ + .used = _obmalloc_pools_INIT(obmalloc.pools), \ }, \ } diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index ee3a60403eb65a..a7fc29b17fe1b5 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -17,7 +17,7 @@ extern "C" { in the runtime init code (in pystate.c and pylifecycle.c). */ -#define _PyRuntimeState_INIT \ +#define _PyRuntimeState_INIT(runtime) \ { \ .gilstate = { \ .check_enabled = 1, \ @@ -30,7 +30,7 @@ extern "C" { _pymem_allocators_debug_INIT, \ _pymem_allocators_obj_arena_INIT, \ }, \ - .obmalloc = _obmalloc_state_INIT, \ + .obmalloc = _obmalloc_state_INIT(runtime.obmalloc), \ .interpreters = { \ /* This prevents interpreters from getting created \ until _PyInterpreterState_Enable() is called. */ \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index a03199e90f64b1..bf0e909977c33b 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -972,6 +972,8 @@ address_in_range(void *p, poolp pool) /*==========================================================================*/ +#define usedpools (_PyRuntime.obmalloc.pools.used) + // Called when freelist is exhausted. Extend the freelist if there is // space for a block. Otherwise, remove this pool from usedpools. static void diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index f3b64fe6d72880..a98374ab874cd5 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -103,7 +103,7 @@ _PyRuntimeState _PyRuntime #if defined(__linux__) && (defined(__GNUC__) || defined(__clang__)) __attribute__ ((section (".PyRuntime"))) #endif -= _PyRuntimeState_INIT; += _PyRuntimeState_INIT(_PyRuntime); _Py_COMP_DIAG_POP static int runtime_initialized = 0; diff --git a/Python/pystate.c b/Python/pystate.c index dd6d6e92eca89a..ef4e74d6243dcf 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -51,8 +51,11 @@ static void _PyThreadState_Delete(PyThreadState *tstate, int check_current); _Py_COMP_DIAG_PUSH _Py_COMP_DIAG_IGNORE_DEPR_DECLS /* We use "initial" if the runtime gets re-used - (e.g. Py_Finalize() followed by Py_Initialize(). */ -static const _PyRuntimeState initial = _PyRuntimeState_INIT; + (e.g. Py_Finalize() followed by Py_Initialize(). + Note that we initialize "initial" relative to _PyRuntime, + to ensure pre-initialized pointers point to the active + runtime state (and not "initial"). */ +static const _PyRuntimeState initial = _PyRuntimeState_INIT(_PyRuntime); _Py_COMP_DIAG_POP static int From 991dfc8507681b90a4cc4ee721b92267f3e67065 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 7 Nov 2022 17:06:44 -0700 Subject: [PATCH 09/19] Move the allocator groupings to pycore_allocators.h. --- Include/internal/pycore_allocators.h | 37 ++++++++++++++++++++++++++++ Include/internal/pycore_pymem_init.h | 37 ++-------------------------- Objects/obmalloc.c | 2 +- 3 files changed, 40 insertions(+), 36 deletions(-) diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h index 6c4860eb483892..b522b497f2373f 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_allocators.h @@ -54,6 +54,43 @@ _PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) free(ptr); } +#ifdef WITH_PYMALLOC +void* _PyObject_Malloc(void *ctx, size_t size); +void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); +void _PyObject_Free(void *ctx, void *p); +void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); +#endif + +#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} +#ifdef WITH_PYMALLOC +# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +#endif + +#define PYRAW_ALLOC MALLOC_ALLOC +#ifdef WITH_PYMALLOC +# define PYOBJ_ALLOC PYMALLOC_ALLOC +#else +# define PYOBJ_ALLOC MALLOC_ALLOC +#endif +#define PYMEM_ALLOC PYOBJ_ALLOC + +void* _PyMem_DebugRawMalloc(void *ctx, size_t size); +void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugRawFree(void *ctx, void *ptr); + +void* _PyMem_DebugMalloc(void *ctx, size_t size); +void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugFree(void *ctx, void *p); + +#define PYDBGRAW_ALLOC \ + {&_PyRuntime.allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC \ + {&_PyRuntime.allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC \ + {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + /***************************************/ /* the object allocator implementation */ diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index 87a7f38ad7858f..4962a87dd4d6ca 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -10,42 +10,9 @@ extern "C" { #include "pycore_allocators.h" -#ifdef WITH_PYMALLOC -void* _PyObject_Malloc(void *ctx, size_t size); -void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); -void _PyObject_Free(void *ctx, void *p); -void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); -#endif - -#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} -#ifdef WITH_PYMALLOC -# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} -#endif - -#define PYRAW_ALLOC MALLOC_ALLOC -#ifdef WITH_PYMALLOC -# define PYOBJ_ALLOC PYMALLOC_ALLOC -#else -# define PYOBJ_ALLOC MALLOC_ALLOC -#endif -#define PYMEM_ALLOC PYOBJ_ALLOC - -void* _PyMem_DebugRawMalloc(void *ctx, size_t size); -void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); -void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); -void _PyMem_DebugRawFree(void *ctx, void *ptr); - -void* _PyMem_DebugMalloc(void *ctx, size_t size); -void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); -void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); -void _PyMem_DebugFree(void *ctx, void *p); -#define PYDBGRAW_ALLOC \ - {&_PyRuntime.allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} -#define PYDBGMEM_ALLOC \ - {&_PyRuntime.allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} -#define PYDBGOBJ_ALLOC \ - {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +/********************************/ +/* the allocators' initializers */ #ifdef Py_DEBUG #define _pymem_allocators_standard_INIT \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index bf0e909977c33b..6c09b7e6592ef9 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -3,7 +3,7 @@ #include "pycore_code.h" // stats #include "pycore_pystate.h" // _PyInterpreterState_GET #include "pycore_pymem.h" -#include "pycore_pymem_init.h" +#include "pycore_allocators.h" #include From ac5cfb0e036fff326ae31c64acb590b17f50dec9 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 7 Nov 2022 17:08:07 -0700 Subject: [PATCH 10/19] Move the arena allocator to pycore_pymem_init.h. --- Include/internal/pycore_allocators.h | 60 +------------------------- Include/internal/pycore_pymem_init.h | 63 ++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 58 deletions(-) diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_allocators.h index b522b497f2373f..213795cd20a42e 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_allocators.h @@ -95,64 +95,8 @@ void _PyMem_DebugFree(void *ctx, void *p); /***************************************/ /* the object allocator implementation */ -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif +// It is only used to initialize the runtime, +// so it lives in pycore_pymem_init.h. #ifdef __cplusplus diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index 4962a87dd4d6ca..88be76b000cd9a 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -11,6 +11,69 @@ extern "C" { #include "pycore_allocators.h" +/***************************************/ +/* the object allocator implementation */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif + + /********************************/ /* the allocators' initializers */ From 9f32218a4e56a6cb0062976d8fc73edd3b50213c Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 7 Nov 2022 14:17:07 -0700 Subject: [PATCH 11/19] Add a NEWS entry. --- .../2022-11-07-14-16-59.gh-issue-81057.3uKlLQ.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 Misc/NEWS.d/next/Core and Builtins/2022-11-07-14-16-59.gh-issue-81057.3uKlLQ.rst diff --git a/Misc/NEWS.d/next/Core and Builtins/2022-11-07-14-16-59.gh-issue-81057.3uKlLQ.rst b/Misc/NEWS.d/next/Core and Builtins/2022-11-07-14-16-59.gh-issue-81057.3uKlLQ.rst new file mode 100644 index 00000000000000..90bc2401fd3971 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2022-11-07-14-16-59.gh-issue-81057.3uKlLQ.rst @@ -0,0 +1,3 @@ +The 18 global C variables holding the state of the allocators have been +moved to ``_PyRuntimeState``. This is a strictly internal change with no +change in behavior. From e5fc1394449dc1eed7c126d204817a2d14837367 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 10:31:12 -0700 Subject: [PATCH 12/19] Move the object allocator definitions to pycore_obmalloc*.h. --- Include/internal/pycore_obmalloc.h | 10 +++- Include/internal/pycore_obmalloc_init.h | 78 +++++++++++++++++++++++++ Include/internal/pycore_pymem.h | 5 -- Include/internal/pycore_pymem_init.h | 74 ----------------------- 4 files changed, 87 insertions(+), 80 deletions(-) diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index 0315bddbf88965..ba7d73e168f6f7 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -407,8 +407,10 @@ on that C doesn't insert any padding anywhere in a pool_header at or before the prevpool member. **************************************************************************** */ +#define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8) + struct _obmalloc_pools { - poolp used[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8]; + poolp used[OBMALLOC_USED_POOLS_SIZE]; }; @@ -654,6 +656,12 @@ struct _obmalloc_state { struct _obmalloc_usage usage; }; + +/* Allocate memory directly from the O/S virtual memory system, + * where supported. Otherwise fallback on malloc */ +void *_PyObject_VirtualAlloc(size_t size); +void _PyObject_VirtualFree(void *, size_t size); + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h index 83e72d0c3e08b7..130997e16cb13b 100644 --- a/Include/internal/pycore_obmalloc_init.h +++ b/Include/internal/pycore_obmalloc_init.h @@ -59,6 +59,84 @@ extern "C" { } +/***************************************/ +/* the low-level object allocator implementation */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif + + +/**************************************/ +/* the object allocator's initializer */ + +#ifdef MS_WINDOWS +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree } +#elif defined(ARENAS_USE_MMAP) +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaMmap, _PyObject_ArenaMunmap } +#else +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyObject_ArenaMalloc, _PyObject_ArenaFree } +#endif + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index ef40f16c972f29..56d5857f7a5388 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -115,11 +115,6 @@ struct _PyTraceMalloc_Config { PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config; -/* Allocate memory directly from the O/S virtual memory system, - * where supported. Otherwise fallback on malloc */ -void *_PyObject_VirtualAlloc(size_t size); -void _PyObject_VirtualFree(void *, size_t size); - /* This function returns the number of allocated memory blocks, regardless of size */ PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index 88be76b000cd9a..e46c4283e2029c 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -11,69 +11,6 @@ extern "C" { #include "pycore_allocators.h" -/***************************************/ -/* the object allocator implementation */ - -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - - /********************************/ /* the allocators' initializers */ @@ -100,17 +37,6 @@ _PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) {'o', PYOBJ_ALLOC}, \ } -#ifdef MS_WINDOWS -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree } -#elif defined(ARENAS_USE_MMAP) -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaMmap, _PyObject_ArenaMunmap } -#else -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaMalloc, _PyObject_ArenaFree } -#endif - #ifdef __cplusplus } From d61b4101c4b5e6ff0827bb13008ebea8c07bf979 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 10:37:33 -0700 Subject: [PATCH 13/19] pycore_allocators.h -> pycore_pymem_allocators.h --- .../{pycore_allocators.h => pycore_pymem_allocators.h} | 6 +++--- Include/internal/pycore_pymem_init.h | 2 +- Makefile.pre.in | 2 +- Objects/obmalloc.c | 2 +- PCbuild/pythoncore.vcxproj | 2 +- PCbuild/pythoncore.vcxproj.filters | 6 +++--- 6 files changed, 10 insertions(+), 10 deletions(-) rename Include/internal/{pycore_allocators.h => pycore_pymem_allocators.h} (96%) diff --git a/Include/internal/pycore_allocators.h b/Include/internal/pycore_pymem_allocators.h similarity index 96% rename from Include/internal/pycore_allocators.h rename to Include/internal/pycore_pymem_allocators.h index 213795cd20a42e..85eb1243e60e75 100644 --- a/Include/internal/pycore_allocators.h +++ b/Include/internal/pycore_pymem_allocators.h @@ -1,5 +1,5 @@ -#ifndef Py_INTERNAL_ALLOCATORS_H -#define Py_INTERNAL_ALLOCATORS_H +#ifndef Py_INTERNAL_PYMEM_ALLOCATORS_H +#define Py_INTERNAL_PYMEM_ALLOCATORS_H #ifdef __cplusplus extern "C" { #endif @@ -102,4 +102,4 @@ void _PyMem_DebugFree(void *ctx, void *p); #ifdef __cplusplus } #endif -#endif /* !Py_INTERNAL_ALLOCATORS_H */ +#endif /* !Py_INTERNAL_PYMEM_ALLOCATORS_H */ diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index e46c4283e2029c..9760f792c035c3 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -8,7 +8,7 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_allocators.h" +#include "pycore_pymem_allocators.h" /********************************/ diff --git a/Makefile.pre.in b/Makefile.pre.in index a2930a4179013f..08693a2e86c202 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1612,7 +1612,6 @@ PYTHON_HEADERS= \ $(srcdir)/Include/cpython/weakrefobject.h \ \ $(srcdir)/Include/internal/pycore_abstract.h \ - $(srcdir)/Include/internal/pycore_allocators.h \ $(srcdir)/Include/internal/pycore_asdl.h \ $(srcdir)/Include/internal/pycore_ast.h \ $(srcdir)/Include/internal/pycore_ast_state.h \ @@ -1659,6 +1658,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_pyhash.h \ $(srcdir)/Include/internal/pycore_pylifecycle.h \ $(srcdir)/Include/internal/pycore_pymem.h \ + $(srcdir)/Include/internal/pycore_pymem_allocators.h \ $(srcdir)/Include/internal/pycore_pymem_init.h \ $(srcdir)/Include/internal/pycore_pystate.h \ $(srcdir)/Include/internal/pycore_range.h \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 6c09b7e6592ef9..847b7da84a229b 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -3,7 +3,7 @@ #include "pycore_code.h" // stats #include "pycore_pystate.h" // _PyInterpreterState_GET #include "pycore_pymem.h" -#include "pycore_allocators.h" +#include "pycore_pymem_allocators.h" #include diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 062ab91ebac020..432a60b3589855 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -193,7 +193,6 @@ - @@ -240,6 +239,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 883634425e5e17..58405ed386285c 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -486,9 +486,6 @@ Include\internal - - Include\internal - Include\internal @@ -624,6 +621,9 @@ Include\internal + + Include\internal + Include\internal From 0efcef7722877ddf7da1424405038d9e579121ca Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 10:46:01 -0700 Subject: [PATCH 14/19] Move the arena allocator impl to its own file. --- Include/internal/pycore_obmalloc_allocators.h | 78 +++++++++++++++++++ Include/internal/pycore_obmalloc_init.h | 68 ++-------------- Include/internal/pycore_pymem_allocators.h | 7 -- Makefile.pre.in | 1 + PCbuild/pythoncore.vcxproj | 1 + PCbuild/pythoncore.vcxproj.filters | 3 + 6 files changed, 88 insertions(+), 70 deletions(-) create mode 100644 Include/internal/pycore_obmalloc_allocators.h diff --git a/Include/internal/pycore_obmalloc_allocators.h b/Include/internal/pycore_obmalloc_allocators.h new file mode 100644 index 00000000000000..4795a01648b258 --- /dev/null +++ b/Include/internal/pycore_obmalloc_allocators.h @@ -0,0 +1,78 @@ +#ifndef Py_INTERNAL_OBMALLOC_ALLOCATORS_H +#define Py_INTERNAL_OBMALLOC_ALLOCATORS_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef Py_BUILD_CORE +# error "this header requires Py_BUILD_CORE define" +#endif + + +/***************************************/ +/* the low-level object allocator implementation */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +#ifdef MS_WINDOWS +static void * +_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) +{ + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +} + +static void +_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, + size_t Py_UNUSED(size)) +{ + VirtualFree(ptr, 0, MEM_RELEASE); +} + +#elif defined(ARENAS_USE_MMAP) +static void * +_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) +{ + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +} + +static void +_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + munmap(ptr, size); +} + +#else +static void * +_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) +{ + return malloc(size); +} + +static void +_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) +{ + free(ptr); +} +#endif + + +#ifdef __cplusplus +} +#endif +#endif // !Py_INTERNAL_OBMALLOC_ALLOCATORS_H diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h index 130997e16cb13b..804b8a2c5ce10e 100644 --- a/Include/internal/pycore_obmalloc_init.h +++ b/Include/internal/pycore_obmalloc_init.h @@ -8,6 +8,11 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_obmalloc_allocators.h" + + +/****************************************************/ +/* the default object allocator's state initializer */ #define PTA(pools, x) \ ((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *))) @@ -59,69 +64,6 @@ extern "C" { } -/***************************************/ -/* the low-level object allocator implementation */ - -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - - /**************************************/ /* the object allocator's initializer */ diff --git a/Include/internal/pycore_pymem_allocators.h b/Include/internal/pycore_pymem_allocators.h index 85eb1243e60e75..acfb8d350391d5 100644 --- a/Include/internal/pycore_pymem_allocators.h +++ b/Include/internal/pycore_pymem_allocators.h @@ -92,13 +92,6 @@ void _PyMem_DebugFree(void *ctx, void *p); {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} -/***************************************/ -/* the object allocator implementation */ - -// It is only used to initialize the runtime, -// so it lives in pycore_pymem_init.h. - - #ifdef __cplusplus } #endif diff --git a/Makefile.pre.in b/Makefile.pre.in index 08693a2e86c202..26ba20574e1cc0 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1651,6 +1651,7 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_namespace.h \ $(srcdir)/Include/internal/pycore_object.h \ $(srcdir)/Include/internal/pycore_obmalloc.h \ + $(srcdir)/Include/internal/pycore_obmalloc_allocators.h \ $(srcdir)/Include/internal/pycore_obmalloc_init.h \ $(srcdir)/Include/internal/pycore_pathconfig.h \ $(srcdir)/Include/internal/pycore_pyarena.h \ diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index 432a60b3589855..ed2cbaaeaede5e 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -232,6 +232,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 58405ed386285c..1ee47533d95708 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -600,6 +600,9 @@ Include\internal + + Include\internal + Include\internal From ec01f9954c3d7b9bf66f3fc4e0138ad159c02a18 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 10:50:12 -0700 Subject: [PATCH 15/19] Move _Py_GetAllocatedBlocks() over. --- Include/internal/pycore_obmalloc.h | 5 +++++ Include/internal/pycore_pymem.h | 3 --- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index ba7d73e168f6f7..15e7bace424e84 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -662,6 +662,11 @@ struct _obmalloc_state { void *_PyObject_VirtualAlloc(size_t size); void _PyObject_VirtualFree(void *, size_t size); + +/* This function returns the number of allocated memory blocks, regardless of size */ +PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index 56d5857f7a5388..9e110a59a5039d 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -115,9 +115,6 @@ struct _PyTraceMalloc_Config { PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config; -/* This function returns the number of allocated memory blocks, regardless of size */ -PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); - /* Macros */ #ifdef WITH_PYMALLOC // Export the symbol for the 3rd party guppy3 project From a1ff9b0deed26a0ac653ce44285ff3d1bf796424 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 10:55:22 -0700 Subject: [PATCH 16/19] Move _PyObject_DebugMallocStat() over. --- Include/internal/pycore_obmalloc.h | 6 ++++++ Include/internal/pycore_pymem.h | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index 15e7bace424e84..1f79797348b452 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -667,6 +667,12 @@ void _PyObject_VirtualFree(void *, size_t size); PyAPI_FUNC(Py_ssize_t) _Py_GetAllocatedBlocks(void); +#ifdef WITH_PYMALLOC +// Export the symbol for the 3rd party guppy3 project +PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out); +#endif + + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index 9e110a59a5039d..b042a4cb268e54 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -115,12 +115,6 @@ struct _PyTraceMalloc_Config { PyAPI_DATA(struct _PyTraceMalloc_Config) _Py_tracemalloc_config; -/* Macros */ -#ifdef WITH_PYMALLOC -// Export the symbol for the 3rd party guppy3 project -PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out); -#endif - #ifdef __cplusplus } #endif From 3172cbeaf0b6efa6152fa7b6ce67f3f16e10b3cf Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 8 Nov 2022 11:26:42 -0700 Subject: [PATCH 17/19] Restore uint for obmalloc. --- Include/internal/pycore_obmalloc.h | 14 ++++++++++++-- Objects/obmalloc.c | 10 ++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index 1f79797348b452..93349d89c6ab52 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -8,6 +8,13 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif + +typedef unsigned int pymem_uint; /* assuming >= 16 bits */ + +#undef uint +#define uint pymem_uint + + /* An object allocator for Python. Here is an introduction to the layers of the Python memory architecture, @@ -130,7 +137,7 @@ extern "C" { #endif /* Return the number of bytes in size class I, as a uint. */ -#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) +#define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT) /* * Max size threshold below which malloc requests are considered to be @@ -307,7 +314,7 @@ struct arena_object { #define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) /* Return total number of blocks in pool of size index I, as a uint. */ -#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) +#define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) /*==========================================================================*/ @@ -657,6 +664,9 @@ struct _obmalloc_state { }; +#undef uint + + /* Allocate memory directly from the O/S virtual memory system, * where supported. Otherwise fallback on malloc */ void *_PyObject_VirtualAlloc(size_t size); diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 847b7da84a229b..6694de1d4ed850 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -1,22 +1,24 @@ #include "Python.h" -#include "pycore_pymem.h" // _PyTraceMalloc_Config #include "pycore_code.h" // stats #include "pycore_pystate.h" // _PyInterpreterState_GET + +#include "pycore_obmalloc.h" #include "pycore_pymem.h" #include "pycore_pymem_allocators.h" #include +#undef uint +#define uint pymem_uint + + /* Defined in tracemalloc.c */ extern void _PyMem_DumpTraceback(int fd, const void *ptr); /* Python's malloc wrappers (see pymem.h) */ -#undef uint -#define uint unsigned int /* assuming >= 16 bits */ - static void _PyObject_DebugDumpAddress(const void *p); static void _PyMem_DebugCheckAddress(const char *func, char api_id, const void *p); From adc60e2fd0604f2b688b3a6d999346a6f1b30fb2 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 9 Nov 2022 14:28:31 -0700 Subject: [PATCH 18/19] Do not define the functions in header files. --- Include/internal/pycore_obmalloc_init.h | 17 --- Include/internal/pycore_pymem_init.h | 53 ++++++++- Include/internal/pycore_runtime_init.h | 2 +- Objects/obmalloc.c | 145 +++++++++++++++++++++++- 4 files changed, 192 insertions(+), 25 deletions(-) diff --git a/Include/internal/pycore_obmalloc_init.h b/Include/internal/pycore_obmalloc_init.h index 804b8a2c5ce10e..c0fb057d06652b 100644 --- a/Include/internal/pycore_obmalloc_init.h +++ b/Include/internal/pycore_obmalloc_init.h @@ -8,8 +8,6 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_obmalloc_allocators.h" - /****************************************************/ /* the default object allocator's state initializer */ @@ -64,21 +62,6 @@ extern "C" { } -/**************************************/ -/* the object allocator's initializer */ - -#ifdef MS_WINDOWS -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree } -#elif defined(ARENAS_USE_MMAP) -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaMmap, _PyObject_ArenaMunmap } -#else -# define _pymem_allocators_obj_arena_INIT \ - { NULL, _PyObject_ArenaMalloc, _PyObject_ArenaFree } -#endif - - #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_pymem_init.h b/Include/internal/pycore_pymem_init.h index 9760f792c035c3..78232738cb09d5 100644 --- a/Include/internal/pycore_pymem_init.h +++ b/Include/internal/pycore_pymem_init.h @@ -8,21 +8,59 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_pymem_allocators.h" +#include "pycore_pymem.h" /********************************/ /* the allocators' initializers */ +extern void * _PyMem_RawMalloc(void *, size_t); +extern void * _PyMem_RawCalloc(void *, size_t, size_t); +extern void * _PyMem_RawRealloc(void *, void *, size_t); +extern void _PyMem_RawFree(void *, void *); +#define PYRAW_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} + +#ifdef WITH_PYMALLOC +extern void* _PyObject_Malloc(void *, size_t); +extern void* _PyObject_Calloc(void *, size_t, size_t); +extern void _PyObject_Free(void *, void *); +extern void* _PyObject_Realloc(void *, void *, size_t); +# define PYOBJ_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +#else +# define PYOBJ_ALLOC PYRAW_ALLOC +#endif // WITH_PYMALLOC + +#define PYMEM_ALLOC PYOBJ_ALLOC + +extern void* _PyMem_DebugRawMalloc(void *, size_t); +extern void* _PyMem_DebugRawCalloc(void *, size_t, size_t); +extern void* _PyMem_DebugRawRealloc(void *, void *, size_t); +extern void _PyMem_DebugRawFree(void *, void *); + +extern void* _PyMem_DebugMalloc(void *, size_t); +extern void* _PyMem_DebugCalloc(void *, size_t, size_t); +extern void* _PyMem_DebugRealloc(void *, void *, size_t); +extern void _PyMem_DebugFree(void *, void *); + +#define PYDBGRAW_ALLOC(runtime) \ + {&(runtime).allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC(runtime) \ + {&(runtime).allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC(runtime) \ + {&(runtime).allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + +extern void * _PyMem_ArenaAlloc(void *, size_t); +extern void _PyMem_ArenaFree(void *, void *, size_t); + #ifdef Py_DEBUG -#define _pymem_allocators_standard_INIT \ +# define _pymem_allocators_standard_INIT(runtime) \ { \ - PYDBGRAW_ALLOC, \ - PYDBGMEM_ALLOC, \ - PYDBGOBJ_ALLOC, \ + PYDBGRAW_ALLOC(runtime), \ + PYDBGMEM_ALLOC(runtime), \ + PYDBGOBJ_ALLOC(runtime), \ } #else -#define _pymem_allocators_standard_INIT \ +# define _pymem_allocators_standard_INIT(runtime) \ { \ PYRAW_ALLOC, \ PYMEM_ALLOC, \ @@ -37,6 +75,9 @@ extern "C" { {'o', PYOBJ_ALLOC}, \ } +# define _pymem_allocators_obj_arena_INIT \ + { NULL, _PyMem_ArenaAlloc, _PyMem_ArenaFree } + #ifdef __cplusplus } diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index a7fc29b17fe1b5..80dff64fb2f027 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -26,7 +26,7 @@ extern "C" { .autoTSSkey = Py_tss_NEEDS_INIT, \ }, \ .allocators = { \ - _pymem_allocators_standard_INIT, \ + _pymem_allocators_standard_INIT(runtime), \ _pymem_allocators_debug_INIT, \ _pymem_allocators_obj_arena_INIT, \ }, \ diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 6694de1d4ed850..481cbde9fd3170 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -4,8 +4,8 @@ #include "pycore_obmalloc.h" #include "pycore_pymem.h" -#include "pycore_pymem_allocators.h" +#include // malloc() #include @@ -24,6 +24,149 @@ static void _PyMem_DebugCheckAddress(const char *func, char api_id, const void * static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain); + +/***************************************/ +/* low-level allocator implementations */ +/***************************************/ + +/* the default raw allocator (wraps malloc) */ + +void * +_PyMem_RawMalloc(void *Py_UNUSED(ctx), size_t size) +{ + /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL + for malloc(0), which would be treated as an error. Some platforms would + return a pointer with no memory behind it, which would break pymalloc. + To solve these problems, allocate an extra byte. */ + if (size == 0) + size = 1; + return malloc(size); +} + +void * +_PyMem_RawCalloc(void *Py_UNUSED(ctx), size_t nelem, size_t elsize) +{ + /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL + for calloc(0, 0), which would be treated as an error. Some platforms + would return a pointer with no memory behind it, which would break + pymalloc. To solve these problems, allocate an extra byte. */ + if (nelem == 0 || elsize == 0) { + nelem = 1; + elsize = 1; + } + return calloc(nelem, elsize); +} + +void * +_PyMem_RawRealloc(void *Py_UNUSED(ctx), void *ptr, size_t size) +{ + if (size == 0) + size = 1; + return realloc(ptr, size); +} + +void +_PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) +{ + free(ptr); +} + +#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} +#define PYRAW_ALLOC MALLOC_ALLOC + +/* the default object allocator */ + +// The actual implementation is further down. + +#ifdef WITH_PYMALLOC +void* _PyObject_Malloc(void *ctx, size_t size); +void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); +void _PyObject_Free(void *ctx, void *p); +void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); +# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} +# define PYOBJ_ALLOC PYMALLOC_ALLOC +#else +# define PYOBJ_ALLOC MALLOC_ALLOC +#endif // WITH_PYMALLOC + +#define PYMEM_ALLOC PYOBJ_ALLOC + +/* the default debug allocators */ + +// The actual implementation is further down. + +void* _PyMem_DebugRawMalloc(void *ctx, size_t size); +void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugRawFree(void *ctx, void *ptr); + +void* _PyMem_DebugMalloc(void *ctx, size_t size); +void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); +void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); +void _PyMem_DebugFree(void *ctx, void *p); + +#define PYDBGRAW_ALLOC \ + {&_PyRuntime.allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} +#define PYDBGMEM_ALLOC \ + {&_PyRuntime.allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} +#define PYDBGOBJ_ALLOC \ + {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} + +/* the low-level virtual memory allocator */ + +#ifdef WITH_PYMALLOC +# ifdef MS_WINDOWS +# include +# elif defined(HAVE_MMAP) +# include +# ifdef MAP_ANONYMOUS +# define ARENAS_USE_MMAP +# endif +# endif +#endif + +void * +_PyMem_ArenaAlloc(void *Py_UNUSED(ctx), size_t size) +{ +#ifdef MS_WINDOWS + return VirtualAlloc(NULL, size, + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#elif defined(ARENAS_USE_MMAP) + void *ptr; + ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return NULL; + assert(ptr != NULL); + return ptr; +#else + return malloc(size); +#endif +} + +void +_PyMem_ArenaFree(void *Py_UNUSED(ctx), void *ptr, +#if defined(ARENAS_USE_MMAP) + size_t size +#else + size_t Py_UNUSED(size) +#endif +) +{ +#ifdef MS_WINDOWS + VirtualFree(ptr, 0, MEM_RELEASE); +#elif defined(ARENAS_USE_MMAP) + munmap(ptr, size); +#else + free(ptr); +#endif +} + +/*******************************************/ +/* end low-level allocator implementations */ +/*******************************************/ + + #if defined(__has_feature) /* Clang */ # if __has_feature(address_sanitizer) /* is ASAN enabled? */ # define _Py_NO_SANITIZE_ADDRESS \ From 5262207d2f8906bee1c46b3a6f178b2bb0bb4124 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Wed, 9 Nov 2022 12:14:17 -0700 Subject: [PATCH 19/19] Drop pycore_*_allocators.h. --- Include/internal/pycore_obmalloc_allocators.h | 78 --------------- Include/internal/pycore_pymem_allocators.h | 98 ------------------- Makefile.pre.in | 2 - PCbuild/pythoncore.vcxproj | 2 - PCbuild/pythoncore.vcxproj.filters | 6 -- 5 files changed, 186 deletions(-) delete mode 100644 Include/internal/pycore_obmalloc_allocators.h delete mode 100644 Include/internal/pycore_pymem_allocators.h diff --git a/Include/internal/pycore_obmalloc_allocators.h b/Include/internal/pycore_obmalloc_allocators.h deleted file mode 100644 index 4795a01648b258..00000000000000 --- a/Include/internal/pycore_obmalloc_allocators.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef Py_INTERNAL_OBMALLOC_ALLOCATORS_H -#define Py_INTERNAL_OBMALLOC_ALLOCATORS_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - - -/***************************************/ -/* the low-level object allocator implementation */ - -#ifdef WITH_PYMALLOC -# ifdef MS_WINDOWS -# include -# elif defined(HAVE_MMAP) -# include -# ifdef MAP_ANONYMOUS -# define ARENAS_USE_MMAP -# endif -# endif -#endif - -#ifdef MS_WINDOWS -static void * -_PyObject_ArenaVirtualAlloc(void *Py_UNUSED(ctx), size_t size) -{ - return VirtualAlloc(NULL, size, - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} - -static void -_PyObject_ArenaVirtualFree(void *Py_UNUSED(ctx), void *ptr, - size_t Py_UNUSED(size)) -{ - VirtualFree(ptr, 0, MEM_RELEASE); -} - -#elif defined(ARENAS_USE_MMAP) -static void * -_PyObject_ArenaMmap(void *Py_UNUSED(ctx), size_t size) -{ - void *ptr; - ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ptr == MAP_FAILED) - return NULL; - assert(ptr != NULL); - return ptr; -} - -static void -_PyObject_ArenaMunmap(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - munmap(ptr, size); -} - -#else -static void * -_PyObject_ArenaMalloc(void *Py_UNUSED(ctx), size_t size) -{ - return malloc(size); -} - -static void -_PyObject_ArenaFree(void *Py_UNUSED(ctx), void *ptr, size_t Py_UNUSED(size)) -{ - free(ptr); -} -#endif - - -#ifdef __cplusplus -} -#endif -#endif // !Py_INTERNAL_OBMALLOC_ALLOCATORS_H diff --git a/Include/internal/pycore_pymem_allocators.h b/Include/internal/pycore_pymem_allocators.h deleted file mode 100644 index acfb8d350391d5..00000000000000 --- a/Include/internal/pycore_pymem_allocators.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef Py_INTERNAL_PYMEM_ALLOCATORS_H -#define Py_INTERNAL_PYMEM_ALLOCATORS_H -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef Py_BUILD_CORE -# error "this header requires Py_BUILD_CORE define" -#endif - -#include // malloc() - - -/*********************************************/ -/* the (raw) malloc allocator implementation */ - -static void * -_PyMem_RawMalloc(void *Py_UNUSED(ctx), size_t size) -{ - /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL - for malloc(0), which would be treated as an error. Some platforms would - return a pointer with no memory behind it, which would break pymalloc. - To solve these problems, allocate an extra byte. */ - if (size == 0) - size = 1; - return malloc(size); -} - -static void * -_PyMem_RawCalloc(void *Py_UNUSED(ctx), size_t nelem, size_t elsize) -{ - /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL - for calloc(0, 0), which would be treated as an error. Some platforms - would return a pointer with no memory behind it, which would break - pymalloc. To solve these problems, allocate an extra byte. */ - if (nelem == 0 || elsize == 0) { - nelem = 1; - elsize = 1; - } - return calloc(nelem, elsize); -} - -static void * -_PyMem_RawRealloc(void *Py_UNUSED(ctx), void *ptr, size_t size) -{ - if (size == 0) - size = 1; - return realloc(ptr, size); -} - -static void -_PyMem_RawFree(void *Py_UNUSED(ctx), void *ptr) -{ - free(ptr); -} - -#ifdef WITH_PYMALLOC -void* _PyObject_Malloc(void *ctx, size_t size); -void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); -void _PyObject_Free(void *ctx, void *p); -void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); -#endif - -#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} -#ifdef WITH_PYMALLOC -# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} -#endif - -#define PYRAW_ALLOC MALLOC_ALLOC -#ifdef WITH_PYMALLOC -# define PYOBJ_ALLOC PYMALLOC_ALLOC -#else -# define PYOBJ_ALLOC MALLOC_ALLOC -#endif -#define PYMEM_ALLOC PYOBJ_ALLOC - -void* _PyMem_DebugRawMalloc(void *ctx, size_t size); -void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); -void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); -void _PyMem_DebugRawFree(void *ctx, void *ptr); - -void* _PyMem_DebugMalloc(void *ctx, size_t size); -void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); -void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); -void _PyMem_DebugFree(void *ctx, void *p); - -#define PYDBGRAW_ALLOC \ - {&_PyRuntime.allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} -#define PYDBGMEM_ALLOC \ - {&_PyRuntime.allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} -#define PYDBGOBJ_ALLOC \ - {&_PyRuntime.allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} - - -#ifdef __cplusplus -} -#endif -#endif /* !Py_INTERNAL_PYMEM_ALLOCATORS_H */ diff --git a/Makefile.pre.in b/Makefile.pre.in index 26ba20574e1cc0..5bd7c017728651 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1651,7 +1651,6 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_namespace.h \ $(srcdir)/Include/internal/pycore_object.h \ $(srcdir)/Include/internal/pycore_obmalloc.h \ - $(srcdir)/Include/internal/pycore_obmalloc_allocators.h \ $(srcdir)/Include/internal/pycore_obmalloc_init.h \ $(srcdir)/Include/internal/pycore_pathconfig.h \ $(srcdir)/Include/internal/pycore_pyarena.h \ @@ -1659,7 +1658,6 @@ PYTHON_HEADERS= \ $(srcdir)/Include/internal/pycore_pyhash.h \ $(srcdir)/Include/internal/pycore_pylifecycle.h \ $(srcdir)/Include/internal/pycore_pymem.h \ - $(srcdir)/Include/internal/pycore_pymem_allocators.h \ $(srcdir)/Include/internal/pycore_pymem_init.h \ $(srcdir)/Include/internal/pycore_pystate.h \ $(srcdir)/Include/internal/pycore_range.h \ diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index ed2cbaaeaede5e..768f5f1ccc5181 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -232,7 +232,6 @@ - @@ -240,7 +239,6 @@ - diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index 1ee47533d95708..fd03a7ac450f9d 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -600,9 +600,6 @@ Include\internal - - Include\internal - Include\internal @@ -624,9 +621,6 @@ Include\internal - - Include\internal - Include\internal