8000 gh-132917: Check resident set size (RSS) before GC trigger. by nascheme · Pull Request #133399 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

gh-132917: Check resident set size (RSS) before GC trigger. #133399

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
May 5, 2025
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Doc/library/gc.rst
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,11 @@ The :mod:`gc` module provides the following functions:
starts. For each collection, all the objects in the young generation and some
fraction of the old generation is collected.

In the free-threaded build, the increase in process resident set size (RSS)
is also checked before running the collector. If the RSS has not increased
by 10% since the last collection and the net number of object allocations
has not exceeded 40 times *threshold0*, the collection is not run.

The fraction of the old generation that is collected is **inversely** proportional
to *threshold1*. The larger *threshold1* is, the slower objects in the old generation
are collected.
Expand Down
10 changes: 10 additions & 0 deletions Include/internal/pycore_interp_structs.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,16 @@ struct _gc_runtime_state {

/* True if gc.freeze() has been used. */
int freeze_active;

/* Resident set size (RSS) of the process after last GC. */
Py_ssize_t last_rss;

/* This accumulates the new object count whenever collection is deferred
due to the RSS increase condition not being meet. Reset on collection. */
Py_ssize_t deferred_count;

/* Mutex held for gc_should_collect_rss(). */
PyMutex mutex;
#endif
};

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
For the free-threaded build, check the process resident set size (RSS)
increase before triggering a full automatic garbage collection. If the RSS
has not increased 10% since the last collection then it is deferred.
210 changes: 205 additions & 5 deletions Python/gc_free_threading.c
A3E2
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,29 @@

#include "pydtrace.h"

// Platform-specific includes for get_current_rss().
#ifdef _WIN32
#include <windows.h>
#include <psapi.h> // For GetProcessMemoryInfo
#elif defined(__linux__)
#include <unistd.h> // For sysconf, getpid
#elif defined(__APPLE__)
#include <mach/mach.h>
#include <unistd.h> // For sysconf, getpid
#elif defined(__FreeBSD__)
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/user.h> // Requires sys/user.h for kinfo_proc definition
#include <kvm.h>
#include <unistd.h> // For sysconf, getpid
#include <fcntl.h> // For O_RDONLY
#include <limits.h> // For _POSIX2_LINE_MAX
#elif defined(__OpenBSD__)
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/user.h> // For kinfo_proc
#include <unistd.h> // For sysconf, getpid
#endif

// enable the "mark alive" pass of GC
#define GC_ENABLE_MARK_ALIVE 1
Expand Down Expand Up @@ -1878,6 +1901,173 @@ cleanup_worklist(struct worklist *worklist)
}
}

// Return the current resident set size (RSS) of the process, in units of KB.
// Returns -1 if this operation is not supported or on failure.
static Py_ssize_t
get_current_rss(void)
{
#ifdef _WIN32
// Windows implementation using GetProcessMemoryInfo
PROCESS_MEMORY_COUNTERS pmc;
HANDLE hProcess = GetCurrentProcess();
if (NULL == hProcess) {
// Should not happen for the current process
return -1;
}

// GetProcessMemoryInfo returns non-zero on success
if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc))) {
// pmc.WorkingSetSize is in bytes. Convert to KB.
return (Py_ssize_t)(pmc.WorkingSetSize / 1024);
} else {
CloseHandle(hProcess);
return -1;
}

#elif __linux__
// Linux implementation using /proc/self/statm
long page_size_bytes = sysconf(_SC_PAGE_SIZE);
if (page_size_bytes <= 0) {
return -1;
}

FILE *fp = fopen("/proc/self/statm", "r");
if (fp == NULL) {
return -1;
}

// Second number is resident size in pages
long rss_pages;
if (fscanf(fp, "%*d %ld", &rss_pages) != 1) {
fclose(fp);
return -1;
}
fclose(fp);

// Convert unit to KB
return (Py_ssize_t)rss_pages * (page_size_bytes / 1024);

#elif defined(__APPLE__)
// --- MacOS (Darwin) ---
mach_msg_type_number_t count = MACH_TASK_BASIC_INFO_COUNT;
mach_task_basic_info_data_t info;
kern_return_t kerr;

kerr = task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &count);
if (kerr != KERN_SUCCESS) {
return -1;
}
// info.resident_size is in bytes. Convert to KB.
return (Py_ssize_t)(info.resident_size / 1024);

#elif defined(__FreeBSD__)
long page_size_kb = sysconf(_SC_PAGESIZE) / 1024;
if (page_size_kb <= 0) {
return -1;
}

// Using /dev/null for vmcore avoids needing dump file.
// NULL for kernel file uses running kernel.
char errbuf[_POSIX2_LINE_MAX]; // For kvm error messages
kvm_t *kd = kvm_openfiles(NULL, "/dev/null", NULL, O_RDONLY, errbuf);
if (kd == NULL) {
return -1;
}

// KERN_PROC_PID filters for the specific process ID
// n_procs will contain the number of processes returned (should be 1 or 0)
pid_t pid = getpid();
int n_procs;
struct kinfo_proc *kp = kvm_getprocs(kd, KERN_PROC_PID, pid, &n_procs);
if (kp == NULL) {
kvm_close(kd);
return -1;
}

Py_ssize_t rss_kb = -1;
if (n_procs > 0) {
// kp[0] contains the info for our process
// ki_rssize is in pages. Convert to KB.
rss_kb = (Py_ssize_t)kp->ki_rssize * page_size_kb;
} else {
// Process with PID not found, shouldn't happen for self.
rss_kb = -1;
}

kvm_close(kd);
return rss_kb;

#elif defined(__OpenBSD__)
long page_size_kb = sysconf(_SC_PAGESIZE) / 1024;
if (page_size_kb <= 0) {
return -1;
}

struct kinfo_proc kp;
pid_t pid = getpid();
int mib[6];
size_t len = sizeof(kp);

mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = pid;
mib[4] = sizeof(struct kinfo_proc); // size of the structure we want
mib[5] = 1; // want 1 structure back
if (sysctl(mib, 6, &kp, &len, NULL, 0) == -1) {
return -1;
}

if (len > 0) {
// p_vm_rssize is in pages on OpenBSD. Convert to KB.
return (Py_ssize_t)kp.p_vm_rssize * page_size_kb;
} else {
// Process info not returned
return -1;
}
#else
// Unsupported platform
return -1;
#endif
}

static bool
gc_should_collect_rss(GCState *gcstate)
{
Py_ssize_t rss = get_current_rss();
if (rss < 0) {
// Reading RSS is not support or failed.
return true;
}
int threshold = gcstate->young.threshold;
Py_ssize_t deferred = _Py_atomic_load_ssize_relaxed(&gcstate->deferred_count);
if (deferred > threshold * 40) {
// Too many new container objects since last GC, even though RSS
// might not have increased much. This is intended to avoid resource
// exhaustion if some objects consume resources but don't result in a
// RSS increase. We use 40x as the factor here because older versions
// of Python would do full collections after roughly every 70,000 new
// container objects.
return true;
}
Py_ssize_t last_rss = gcstate->last_rss;
Py_ssize_t rss_threshold = Py_MAX(last_rss / 10, 128);
if ((rss - last_rss) > rss_threshold) {
// The RSS has increased too much, do a collection.
return true;
}
else {
// The RSS has not increased enough, defer the collection and clear
// the young object count so we don't check RSS again on the next call
// to gc_should_collect().
Py_BEGIN_CRITICAL_SECTION_MUT(&gcstate->mutex);
gcstate->deferred_count += gcstate->young.count;
gcstate->young.count = 0;
Py_END_CRITICAL_SECTION();
return false;
}
}

static bool
gc_should_collect(GCState *gcstate)
{
Expand All @@ -1887,11 +2077,17 @@ gc_should_collect(GCState *gcstate)
if (count <= threshold || threshold == 0 || !gc_enabled) {
return false;
}
// Avoid quadratic behavior by scaling threshold to the number of live
// objects. A few tests rely on immediate scheduling of the GC so we ignore
// the scaled threshold if generations[1].threshold is set to zero.
return (count > gcstate->long_lived_total / 4 ||
gcstate->old[0].threshold == 0);
if (gcstate->old[0].threshold == 0) {
// A few tests rely on immediate scheduling of the GC so we ignore the
// extra conditions if generations[1].threshold is set to zero.
return true;
}
if (count < gcstate->long_lived_total / 4) {
// Avoid quadratic behavior by scaling threshold to the number of live
// objects.
return false;
}
return gc_should_collect_rss(gcstate);
}

static void
Expand Down Expand Up @@ -1940,6 +2136,7 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
}

state->gcstate->young.count = 0;
state->gcstate->deferred_count = 0;
for (int i = 1; i <= generation; ++i) {
state->gcstate->old[i-1].count = 0;
}
Expand Down Expand Up @@ -2033,6 +2230,9 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
// to be freed.
delete_garbage(state);

// Store the current RSS, possibly smaller now that we deleted garbage.
state->gcstate->last_rss = get_current_rss();

// Append objects with legacy finalizers to the "gc.garbage" list.
handle_legacy_finalizers(state);
}
Expand Down
Loading
0