8000 gh-131757: allow lru_cache functions to execute concurrently by tom-pytel · Pull Request #131758 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

gh-131757: allow lru_cache functions to execute concurrently #131758

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Apr 14, 2025
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
requested changes
  • Loading branch information
tom-pytel committed Mar 26, 2025
commit 7ead4eec0aff7bc20611a47e8ca541ca1fee8489
42 changes: 34 additions & 8 deletions Modules/_functoolsmodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "pycore_long.h" // _PyLong_GetZero()
#include "pycore_moduleobject.h" // _PyModule_GetState()
#include "pycore_object.h" // _PyObject_GC_TRACK
#include "pycore_pyatomic_ft_wrappers.h"
#include "pycore_pystate.h" // _PyThreadState_GET()
#include "pycore_tuple.h" // _PyTuple_ITEMS()

Expand Down Expand Up @@ -1175,7 +1176,11 @@ uncached_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwd
{
PyObject *result;

#ifdef Py_GIL_DISABLED
_Py_atomic_add_ssize(&self->misses, 1);
#else
self->misses++;
#endif
result = PyObject_Call(self->func, args, kwds);
if (!result)
return NULL;
Expand All @@ -1197,15 +1202,23 @@ infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwd
}
int res = _PyDict_GetItemRef_KnownHash((PyDictObject *)self->cache, key, hash, &result);
if (res > 0) {
#ifdef Py_GIL_DISABLED
_Py_atomic_add_ssize(&self->hits, 1);
#else
self->hits++;
#endif
Py_DECREF(key);
return result;
}
if (res < 0) {
Py_DECREF(key);
return NULL;
}
#ifdef Py_GIL_DISABLED
_Py_atomic_add_ssize(&self->misses, 1);
#else
self->misses++;
#endif
result = PyObject_Call(self->func, args, kwds);
if (!result) {
Py_DECREF(key);
Expand Down Expand Up @@ -1281,8 +1294,8 @@ lru_cache_prepend_link(lru_cache_object *self, lru_list_elem *link)
*/

static int
bounded_lru_cache_wrapper_pre_call_lock_held(lru_cache_object *self, PyObject *args, PyObject *kwds,
PyObject **result, PyObject **key, Py_hash_t *hash)
bounded_lru_cache_get_lock_held(lru_cache_object *self, PyObject *args, PyObject *kwds,
PyObject **result, PyObject **key, Py_hash_t *hash)
{
lru_list_elem *link;

Expand All @@ -1299,7 +1312,11 @@ bounded_lru_cache_wrapper_pre_call_lock_held(lru_cache_object *self, PyObject *a
lru_cache_extract_link(link);
lru_cache_append_link(self, link);
*result = link->result;
#ifdef Py_GIL_DISABLED
_Py_atomic_add_ssize(&self->hits, 1);
#else
self->hits++;
#endif
Py_INCREF(link->result);
Py_DECREF(key_);
return 1;
Expand All @@ -1308,12 +1325,16 @@ bounded_lru_cache_wrapper_pre_call_lock_held(lru_cache_object *self, PyObject *a
Py_DECREF(key_);
return -1;
}
#ifdef Py_GIL_DISABLED
_Py_atomic_add_ssize(&self->misses, 1);
#else
self->misses++;
#endif
return 0;
}

static PyObject *
bounded_lru_cache_wrapper_post_call_lock_held(lru_cache_object *self,
bounded_lru_cache_update_lock_held(lru_cache_object *self,
PyObject *result, PyObject *key, Py_hash_t hash)
{
lru_list_elem *link;
Expand Down Expand Up @@ -1462,7 +1483,7 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds
int res;

Py_BEGIN_CRITICAL_SECTION(self);
res = bounded_lru_cache_wrapper_pre_call_lock_held(self, args, kwds, &result, &key, &hash);
res = bounded_lru_cache_get_lock_held(self, args, kwds, &result, &key, &hash);
Py_END_CRITICAL_SECTION();

if (res < 0) {
Expand All @@ -1475,7 +1496,7 @@ bounded_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds
result = PyObject_Call(self->func, args, kwds);

Py_BEGIN_CRITICAL_SECTION(self);
result = bounded_lru_cache_wrapper_post_call_lock_held(self, result, key, hash);
result = bounded_lru_cache_update_lock_held(self, result, key, hash);
Py_END_CRITICAL_SECTION();

return result;
Expand Down Expand Up @@ -1640,11 +1661,15 @@ _functools__lru_cache_wrapper_cache_info_impl(PyObject *self)
lru_cache_object *_self = (lru_cache_object *) self;
if (_self->maxsize == -1) {
return PyObject_CallFunction(_self->cache_info_type, "nnOn",
_self->hits, _self->misses, Py_None,
FT_ATOMIC_LOAD_SSIZE_RELAXED(_self->hits),
FT_ATOMIC_LOAD_SSIZE_RELAXED(_self->misses),
Py_None,
PyDict_GET_SIZE(_self->cache));
}
return PyObject_CallFunction(_self->cache_info_type, "nnnn",
_self->hits, _self->misses, _self->maxsize,
FT_ATOMIC_LOAD_SSIZE_RELAXED(_self->hits),
FT_ATOMIC_LOAD_SSIZE_RELAXED(_self->misses),
_self->maxsize,
PyDict_GET_SIZE(_self->cache));
}

Expand All @@ -1661,7 +1686,8 @@ _functools__lru_cache_wrapper_cache_clear_impl(PyObject *self)
{
lru_cache_object *_self = (lru_cache_object *) self;
lru_list_elem *list = lru_cache_unlink_list(_self);
_self->hits = _self->misses = 0;
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->hits, 0);
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->misses, 0);
PyDict_Clear(_self->cache);
lru_cache_clear_list(list);
Py_RETURN_NONE;
Expand Down
Loading
0