8000 bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). by ericsnowcurrently · Pull Request #12360 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content
< 8000 div class="d-flex flex-column flex-md-row flex-items-start flex-md-items-center">

bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). #12360

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Move pending calls from _PyRuntimeState to PyIntepreterState.
  • Loading branch information
ericsnowcurrently committed Apr 5, 2019
commit f8e70b485855dbb709b56ea16a9686ababcefa1d
10 changes: 5 additions & 5 deletions Include/internal/pycore_ceval.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@ extern "C" {
#include "pycore_atomic.h"
#include "pythread.h"

PyAPI_FUNC(void) _Py_FinishPendingCalls(void);
struct _is; // See PyInterpreterState in cpython/pystate.h.

PyAPI_FUNC(int) _Py_AddPendingCall(struct _is*, int (*)(void *), void *);
PyAPI_FUNC(int) _Py_MakePendingCalls(struct _is*);
PyAPI_FUNC(void) _Py_FinishPendingCalls(struct _is*);

struct _pending_calls {
int finishing;
Expand Down Expand Up @@ -41,12 +45,8 @@ struct _ceval_runtime_state {
c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _pending_calls pending;
/* Request for checking signals. */
_Py_atomic_int signals_pending;
struct _gil_runtime_state gil;
Expand Down
10 changes: 10 additions & 0 deletions Include/internal/pycore_pystate.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ extern "C" {
#include "pystate.h"
#include "pythread.h"

#include "pycore_atomic.h"
#include "pycore_ceval.h"
#include "pycore_pathconfig.h"
#include "pycore_pymem.h"
Expand Down Expand Up @@ -44,6 +45,15 @@ struct _is {
/* Used in Python/sysmodule.c. */
int check_interval;

#ifdef Py_BUILD_CORE
struct _ceval {
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
struct _pending_calls pending;
} ceval;
#endif

/* Used in Modules/_threadmodule.c. */
long num_threads;
/* Support for runtime thread stack size tuning.
Expand Down
77 changes: 42 additions & 35 deletions Python/ceval.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,16 +98,16 @@ static long dxp[256];
the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER() \
_Py_atomic_store_relaxed( \
&_PyRuntime.ceval.eval_breaker, \
&PyThreadState_Get()->interp->ceval.eval_breaker, \
GIL_REQUEST | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.signals_pending) | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
_PyRuntime.ceval.pending.async_exc)
_Py_atomic_load_relaxed(&PyThreadState_Get()->interp->ceval.pending.calls_to_do) | \
PyThreadState_Get()->interp->ceval.pending.async_exc)

#define SET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
_Py_atomic_store_relaxed(&PyThreadState_Get()->interp->ceval.eval_breaker, 1); \
} while (0)

#define RESET_GIL_DROP_REQUEST() \
Expand All @@ -119,20 +119,20 @@ static long dxp[256];
/* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
_Py_atomic_store_relaxed(&PyThreadState_Get()->interp->ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&PyThreadState_Get()->interp->ceval.eval_breaker, 1); \
} while (0)

#define UNSIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
_Py_atomic_store_relaxed(&PyThreadState_Get()->interp->ceval.pending.calls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)

#define SIGNAL_PENDING_SIGNALS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.signals_pending, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.interpreters.main->ceval.eval_breaker, 1); \
} while (0)

#define UNSIGNAL_PENDING_SIGNALS() \
Expand All @@ -143,13 +143,13 @@ static long dxp[256];

#define SIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
PyThreadState_Get()->interp->ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&PyThreadState_Get()->interp->ceval.eval_breaker, 1); \
} while (0)

#define UNSIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 0; \
PyThreadState_Get()->interp->ceval.pending.async_exc = 0; \
COMPUTE_EVAL_BREAKER(); \
} while (0)

Expand Down Expand Up @@ -177,10 +177,7 @@ PyEval_InitThreads(void)
create_gil();
take_gil(_PyThreadState_GET());

_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock == NULL) {
Py_FatalError("Can't initialize threads for pending calls");
}
// The pending calls mutex is initialized in PyInterpreterState_New().
}

void
Expand Down Expand Up @@ -256,8 +253,10 @@ PyEval_ReInitThreads(void)
recreate_gil();
take_gil(current_tstate);

_PyRuntime.ceval.pending.lock = PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock == NULL) {
// Only the main interpreter remains, so ignore the rest.
PyInterpreterState *interp = _PyRuntime.interpreters.main;
interp->ceval.pending.lock = PyThread_allocate_lock();
if (interp->ceval.pending.lock == NULL) {
Py_FatalError("Can't initialize threads for pending calls");
}

Expand Down Expand Up @@ -374,9 +373,9 @@ _pop_pending_call(struct _pending_calls *pending,
*/

int
Py_AddPendingCall(int (*func)(void *), void *arg)
_Py_AddPendingCall(PyInterpreterState *interp, int (*func)(void *), void *arg)
{
struct _pending_calls *pending = &_PyRuntime.ceval.pending;
struct _pending_calls *pending = &interp->ceval.pending;

PyThread_acquire_lock(pending->lock, WAIT_LOCK);
if (pending->finishing) {
Expand All @@ -399,6 +398,15 @@ Py_AddPendingCall(int (*func)(void *), void *arg)
return result;
}

/* Py_AddPendingCall() is a simple wrapper for the sake
of backward-compatibility. */
int
Py_AddPendingCall(int (*func)(void *), void *arg)
{
PyInterpreterState *interp = _PyRuntime.interpreters.main;
return _Py_AddPendingCall(interp, func, arg);
}

static int
handle_signals(void)
{
Expand Down Expand Up @@ -429,11 +437,6 @@ make_pending_calls(struct _pending_calls* pending)
{
static int busy = 0;

/* only service pending calls on main thread */
if (PyThread_get_thread_ident() != _PyRuntime.main_thread) {
return 0;
}

/* don't perform recursive pending calls */
if (busy) {
return 0;
Expand Down Expand Up @@ -474,9 +477,9 @@ make_pending_calls(struct _pending_calls* pending)
}

void
_Py_FinishPendingCalls(void)
_Py_FinishPendingCalls(PyInterpreterState *interp)
{
struct _pending_calls *pending = &_PyRuntime.ceval.pending;
struct _pending_calls *pending = &interp->ceval.pending;

assert(PyGILState_Check());

Expand All @@ -497,6 +500,14 @@ _Py_FinishPendingCalls(void)
}
}

int
_Py_MakePendingCalls(PyInterpreterState *interp)
{
assert(PyGILState_Check());

return make_pending_calls(&interp->ceval.pending);
}

/* Py_MakePendingCalls() is a simple wrapper for the sake
of backward-compatibility. */
int
Expand All @@ -511,12 +522,8 @@ Py_MakePendingCalls(void)
return res;
}

res = make_pending_calls(&_PyRuntime.ceval.pending);
if (res != 0) {
return res;
}

return 0;
PyInterpreterState *interp = _PyRuntime.interpreters.main;
return make_pending_calls(&interp->ceval.pending);
}

/* The interpreter's recursion limit */
Expand Down Expand Up @@ -638,7 +645,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
PyObject **fastlocals, **freevars;
PyObject *retval = NULL; /* Return value */
PyThreadState *tstate = _PyThreadState_GET();
_Py_atomic_int *eval_breaker = &_PyRuntime.ceval.eval_breaker;
_Py_atomic_int *eval_breaker = &tstate->interp->ceval.eval_breaker;
PyCodeObject *co;

/* when tracing we set things up so that
Expand Down Expand Up @@ -1059,9 +1066,9 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
}
}
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.pending.calls_to_do))
&tstate->interp->ceval.pending.calls_to_do))
{
if (make_pending_calls(&_PyRuntime.ceval.pending) != 0) {
if (make_pending_calls(&tstate->interp->ceval.pending) != 0) {
goto error;
}
}
Expand Down
5 changes: 4 additions & 1 deletion Python/pylifecycle.c
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,7 @@ Py_FinalizeEx(void)
interp = tstate->interp;

// Make any remaining pending calls.
_Py_FinishPendingCalls();
_Py_FinishPendingCalls(interp);

/* The interpreter is still entirely intact at this point, and the
* exit funcs may be relying on that. In particular, if some thread
Expand Down Expand Up @@ -1552,6 +1552,9 @@ Py_EndInterpreter(PyThreadState *tstate)
// Wrap up existing "threading"-module-created, non-daemon threads.
wait_for_thread_shutdown();

// Make any remaining pending calls.
_Py_FinishPendingCalls(interp);

call_py_exitfuncs(interp);

if (tstate != interp->tstate_head || tstate->next != NULL)
Expand Down
11 changes: 11 additions & 0 deletions Python/pystate.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,14 @@ PyInterpreterState_New(void)
memset(interp, 0, sizeof(*interp));
interp->id_refcount = -1;
interp->check_interval = 100;

interp->ceval.pending.lock = PyThread_allocate_lock();
if (interp->ceval.pending.lock == NULL) {
PyErr_SetString(PyExc_RuntimeError,
"failed to create interpreter ceval pending mutex");
return NULL;
}

interp->core_config = _PyCoreConfig_INIT;
interp->eval_frame = _PyEval_EvalFrameDefault;
#ifdef HAVE_DLOPEN
Expand Down Expand Up @@ -279,6 +287,9 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex);
}
if (interp->ceval.pending.lock != NULL) {
PyThread_free_lock(interp->ceval.pending.lock);
}
PyMem_RawFree(interp);
}

Expand Down
0