From 900dd6a205ec91deadb5b6ab6fc457da6200e29f Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 14:32:52 +0200 Subject: [PATCH 01/10] gh-120317: Lock around tokenizer calls in the tokenizer module --- Python/Python-tokenize.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index 09fad18b5b4df7..ae62c2cfe883ae 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -1,5 +1,6 @@ #include "Python.h" #include "errcode.h" +#include "internal/pycore_lock.h" // PyMutex #include "../Parser/lexer/state.h" #include "../Parser/lexer/lexer.h" #include "../Parser/tokenizer/tokenizer.h" @@ -37,6 +38,10 @@ typedef struct PyObject *last_line; Py_ssize_t last_lineno; Py_ssize_t byte_col_offset_diff; + +#ifdef Py_GIL_DISABLED + PyMutex mutex; +#endif } tokenizeriterobject; /*[clinic input] @@ -74,6 +79,10 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline, } self->done = 0; +#ifdef Py_GIL_DISABLED + self->mutex = (PyMutex) {_Py_UNLOCKED}; +#endif + self->last_line = NULL; self->byte_col_offset_diff = 0; self->last_lineno = 0; @@ -182,7 +191,14 @@ tokenizeriter_next(tokenizeriterobject *it) struct token token; _PyToken_Init(&token); +#ifdef Py_GIL_DISABLED + PyMutex_Lock(&it->mutex); +#endif int type = _PyTokenizer_Get(it->tok, &token); +#ifdef Py_GIL_DISABLED + PyMutex_Unlock(&it->mutex); +#endif + if (type == ERRORTOKEN) { if(!PyErr_Occurred()) { _tokenizer_error(it->tok); From abf568a7f1f61eb2e70eb0b76b59ee97656c045e Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 15:49:31 +0200 Subject: [PATCH 02/10] Use Py_BEGIN_CRITICAL_SECTION --- Python/Python-tokenize.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index ae62c2cfe883ae..3ecf858568eb8d 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -1,10 +1,10 @@ #include "Python.h" #include "errcode.h" -#include "internal/pycore_lock.h" // PyMutex +#include "internal/pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION #include "../Parser/lexer/state.h" #include "../Parser/lexer/lexer.h" #include "../Parser/tokenizer/tokenizer.h" -#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset() +#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset() static struct PyModuleDef _tokenizemodule; @@ -38,10 +38,6 @@ typedef struct PyObject *last_line; Py_ssize_t last_lineno; Py_ssize_t byte_col_offset_diff; - -#ifdef Py_GIL_DISABLED - PyMutex mutex; -#endif } tokenizeriterobject; /*[clinic input] @@ -79,10 +75,6 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline, } self->done = 0; -#ifdef Py_GIL_DISABLED - self->mutex = (PyMutex) {_Py_UNLOCKED}; -#endif - self->last_line = NULL; self->byte_col_offset_diff = 0; self->last_lineno = 0; @@ -191,13 +183,10 @@ tokenizeriter_next(tokenizeriterobject *it) struct token token; _PyToken_Init(&token); -#ifdef Py_GIL_DISABLED - PyMutex_Lock(&it->mutex); -#endif - int type = _PyTokenizer_Get(it->tok, &token); -#ifdef Py_GIL_DISABLED - PyMutex_Unlock(&it->mutex); -#endif + int type; + Py_BEGIN_CRITICAL_SECTION(it); + type = _PyTokenizer_Get(it->tok, &token); + Py_END_CRITICAL_SECTION(); if (type == ERRORTOKEN) { if(!PyErr_Occurred()) { From 9dfe174935873b1f5b25044b28425e02c3ba6626 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 17:02:46 +0200 Subject: [PATCH 03/10] Add more locking around global tokenizer state --- Python/Python-tokenize.c | 91 +++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 33 deletions(-) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index 3ecf858568eb8d..bbf06fbd67397c 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -176,6 +176,52 @@ _tokenizer_error(struct tok_state *tok) return result; } +static PyObject * +_get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t size) +{ + PyObject *line; + if (it->tok->lineno != it->last_lineno) { + // Line has changed since last token, so we fetch the new line and cache it + // in the iter object. + Py_XDECREF(it->last_line); + line = PyUnicode_DecodeUTF8(line_start, size, "replace"); + it->last_line = line; + it->byte_col_offset_diff = 0; + } else { + // Line hasn't changed so we reuse the cached one. + line = it->last_line; + } + return line; +} + +static void +_get_col_offsets(tokenizeriterobject *it, struct token token, const char *line_start, + PyObject *line, Py_ssize_t lineno, Py_ssize_t end_lineno, + Py_ssize_t *col_offset, Py_ssize_t *end_col_offset) +{ + Py_ssize_t byte_offset; + if (token.start != NULL && token.start >= line_start) { + byte_offset = token.start - line_start; + *col_offset = byte_offset - it->byte_col_offset_diff; + } + + if (token.end != NULL && token.end >= it->tok->line_start) { + Py_ssize_t end_byte_offset = token.end - it->tok->line_start; + if (lineno == end_lineno) { + // If the whole token is at the same line, we can just use the token.start + // buffer for figuring out the new column offset, since using line is not + // performant for very long lines. + Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset); + *end_col_offset = *col_offset + token_col_offset; + it->byte_col_offset_diff += token.end - token.start - token_col_offset; + } else { + *end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset); + it->byte_col_offset_diff += end_byte_offset - *end_col_offset; + } + } + it->last_lineno = lineno; +} + static PyObject * tokenizeriter_next(tokenizeriterobject *it) { @@ -197,7 +243,11 @@ tokenizeriter_next(tokenizeriterobject *it) } if (it->done || type == ERRORTOKEN) { PyErr_SetString(PyExc_StopIteration, "EOF"); + + Py_BEGIN_CRITICAL_SECTION(it); it->done = 1; + Py_END_CRITICAL_SECTION(); + goto exit; } PyObject *str = NULL; @@ -225,18 +275,9 @@ tokenizeriter_next(tokenizeriterobject *it) if (size >= 1 && it->tok->implicit_newline) { size -= 1; } - - if (it->tok->lineno != it->last_lineno) { - // Line has changed since last token, so we fetch the new line and cache it - // in the iter object. - Py_XDECREF(it->last_line); - line = PyUnicode_DecodeUTF8(line_start, size, "replace"); - it->last_line = line; - it->byte_col_offset_diff = 0; - } else { - // Line hasn't changed so we reuse the cached one. - line = it->last_line; - } + Py_BEGIN_CRITICAL_SECTION(it); + line = _get_current_line(it, line_start, size); + Py_END_CRITICAL_SECTION(); } if (line == NULL) { Py_DECREF(str); @@ -245,29 +286,11 @@ tokenizeriter_next(tokenizeriterobject *it) Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno; Py_ssize_t end_lineno = it->tok->lineno; - it->last_lineno = lineno; - Py_ssize_t col_offset = -1; Py_ssize_t end_col_offset = -1; - Py_ssize_t byte_offset = -1; - if (token.start != NULL && token.start >= line_start) { - byte_offset = token.start - line_start; - col_offset = byte_offset - it->byte_col_offset_diff; - } - if (token.end != NULL && token.end >= it->tok->line_start) { - Py_ssize_t end_byte_offset = token.end - it->tok->line_start; - if (lineno == end_lineno) { - // If the whole token is at the same line, we can just use the token.start - // buffer for figuring out the new column offset, since using line is not - // performant for very long lines. - Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset); - end_col_offset = col_offset + token_col_offset; - it->byte_col_offset_diff += token.end - token.start - token_col_offset; - } else { - end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset); - it->byte_col_offset_diff += end_byte_offset - end_col_offset; - } - } + Py_BEGIN_CRITICAL_SECTION(it); + _get_col_offsets(it, token, line_start, line, lineno, end_lineno, &col_offset, &end_col_offset); + Py_END_CRITICAL_SECTION(); if (it->tok->tok_extra_tokens) { if (is_trailing_token) { @@ -307,7 +330,9 @@ tokenizeriter_next(tokenizeriterobject *it) exit: _PyToken_Free(&token); if (type == ENDMARKER) { + Py_BEGIN_CRITICAL_SECTION(it); it->done = 1; + Py_END_CRITICAL_SECTION(); } return result; } From 5fd07b63a15200838e4123dd894b4e0f8d7fa815 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 17:25:51 +0200 Subject: [PATCH 04/10] Add test --- Lib/test/test_free_threading/test_tokenize.py | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 Lib/test/test_free_threading/test_tokenize.py diff --git a/Lib/test/test_free_threading/test_tokenize.py b/Lib/test/test_free_threading/test_tokenize.py new file mode 100644 index 00000000000000..34764c76162010 --- /dev/null +++ b/Lib/test/test_free_threading/test_tokenize.py @@ -0,0 +1,53 @@ +import io +import time +import unittest +import tokenize +from functools import partial +from threading import Thread + + +class TestTokenize(unittest.TestCase): + def test_tokenizer_iter(self): + source = io.StringIO("for _ in a:\n pass") + it = tokenize._tokenize.TokenizerIter(source.readline, extra_tokens=False) + + tokens = [] + def next_token(it): + while True: + try: + r = next(it) + tokens.append(tokenize.TokenInfo._make(r)) + time.sleep(1) + except StopIteration: + return + + threads = [] + for _ in range(5): + threads.append(Thread(target=partial(next_token, it))) + + for thread in threads: + thread.start() + + for thread in threads: + thread.join() + + expected_tokens = [ + tokenize.TokenInfo(type=1, string='for', start=(1, 0), end=(1, 3), line='for _ in a:\n'), + tokenize.TokenInfo(type=1, string='_', start=(1, 4), end=(1, 5), line='for _ in a:\n'), + tokenize.TokenInfo(type=1, string='in', start=(1, 6), end=(1, 8), line='for _ in a:\n'), + tokenize.TokenInfo(type=1, string='a', start=(1, 9), end=(1, 10), line='for _ in a:\n'), + tokenize.TokenInfo(type=11, string=':', start=(1, 10), end=(1, 11), line='for _ in a:\n'), + tokenize.TokenInfo(type=4, string='', start=(1, 11), end=(1, 11), line='for _ in a:\n'), + tokenize.TokenInfo(type=5, string='', start=(2, -1), end=(2, -1), line=' pass'), + tokenize.TokenInfo(type=1, string='pass', start=(2, 2), end=(2, 6), line=' pass'), + tokenize.TokenInfo(type=4, string='', start=(2, 6), end=(2, 6), line=' pass'), + tokenize.TokenInfo(type=6, string='', start=(2, -1), end=(2, -1), line=' pass'), + tokenize.TokenInfo(type=0, string='', start=(2, -1), end=(2, -1), line=' pass'), + ] + + for token in tokens: + self.assertIn(token, expected_tokens) + + +if __name__ == "__main__": + unittest.main() From 814d9affecac24e7c185d1d063a4ac800d452e4d Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 18:05:23 +0200 Subject: [PATCH 05/10] Address feedback & improve test --- Lib/test/test_free_threading/test_tokenize.py | 5 +++-- Python/Python-tokenize.c | 22 +++++++++---------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Lib/test/test_free_threading/test_tokenize.py b/Lib/test/test_free_threading/test_tokenize.py index 34764c76162010..4d85afe9fe010d 100644 --- a/Lib/test/test_free_threading/test_tokenize.py +++ b/Lib/test/test_free_threading/test_tokenize.py @@ -45,8 +45,9 @@ def next_token(it): tokenize.TokenInfo(type=0, string='', start=(2, -1), end=(2, -1), line=' pass'), ] - for token in tokens: - self.assertIn(token, expected_tokens) + tokens.sort() + expected_tokens.sort() + self.assertListEqual(tokens, expected_tokens) if __name__ == "__main__": diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index bbf06fbd67397c..e336779b0f180a 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -179,6 +179,7 @@ _tokenizer_error(struct tok_state *tok) static PyObject * _get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t size) { + _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(it); PyObject *line; if (it->tok->lineno != it->last_lineno) { // Line has changed since last token, so we fetch the new line and cache it @@ -187,7 +188,8 @@ _get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t si line = PyUnicode_DecodeUTF8(line_start, size, "replace"); it->last_line = line; it->byte_col_offset_diff = 0; - } else { + } + else { // Line hasn't changed so we reuse the cached one. line = it->last_line; } @@ -199,7 +201,8 @@ _get_col_offsets(tokenizeriterobject *it, struct token token, const char *line_s PyObject *line, Py_ssize_t lineno, Py_ssize_t end_lineno, Py_ssize_t *col_offset, Py_ssize_t *end_col_offset) { - Py_ssize_t byte_offset; + _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(it); + Py_ssize_t byte_offset = -1; if (token.start != NULL && token.start >= line_start) { byte_offset = token.start - line_start; *col_offset = byte_offset - it->byte_col_offset_diff; @@ -214,7 +217,8 @@ _get_col_offsets(tokenizeriterobject *it, struct token token, const char *line_s Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset); *end_col_offset = *col_offset + token_col_offset; it->byte_col_offset_diff += token.end - token.start - token_col_offset; - } else { + } + else { *end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset); it->byte_col_offset_diff += end_byte_offset - *end_col_offset; } @@ -243,11 +247,7 @@ tokenizeriter_next(tokenizeriterobject *it) } if (it->done || type == ERRORTOKEN) { PyErr_SetString(PyExc_StopIteration, "EOF"); - - Py_BEGIN_CRITICAL_SECTION(it); - it->done = 1; - Py_END_CRITICAL_SECTION(); - + _Py_atomic_store_int(&it->done, 1); goto exit; } PyObject *str = NULL; @@ -275,6 +275,7 @@ tokenizeriter_next(tokenizeriterobject *it) if (size >= 1 && it->tok->implicit_newline) { size -= 1; } + Py_BEGIN_CRITICAL_SECTION(it); line = _get_current_line(it, line_start, size); Py_END_CRITICAL_SECTION(); @@ -288,6 +289,7 @@ tokenizeriter_next(tokenizeriterobject *it) Py_ssize_t end_lineno = it->tok->lineno; Py_ssize_t col_offset = -1; Py_ssize_t end_col_offset = -1; + Py_BEGIN_CRITICAL_SECTION(it); _get_col_offsets(it, token, line_start, line, lineno, end_lineno, &col_offset, &end_col_offset); Py_END_CRITICAL_SECTION(); @@ -330,9 +332,7 @@ tokenizeriter_next(tokenizeriterobject *it) exit: _PyToken_Free(&token); if (type == ENDMARKER) { - Py_BEGIN_CRITICAL_SECTION(it); - it->done = 1; - Py_END_CRITICAL_SECTION(); + _Py_atomic_store_int(&it->done, 1); } return result; } From c1093be27ba1631b4c2c0e94ea560584ac02434a Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 18:20:40 +0200 Subject: [PATCH 06/10] Guard atomic store behind Py_GIL_DISABLED --- Python/Python-tokenize.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index e336779b0f180a..095a5a6afbf3fc 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -247,7 +247,13 @@ tokenizeriter_next(tokenizeriterobject *it) } if (it->done || type == ERRORTOKEN) { PyErr_SetString(PyExc_StopIteration, "EOF"); + +#ifdef Py_GIL_DISABLED _Py_atomic_store_int(&it->done, 1); +#else + it->done = 1; +#endif + goto exit; } PyObject *str = NULL; @@ -332,7 +338,11 @@ tokenizeriter_next(tokenizeriterobject *it) exit: _PyToken_Free(&token); if (type == ENDMARKER) { +#ifdef Py_GIL_DISABLED _Py_atomic_store_int(&it->done, 1); +#else + it->done = 1; +#endif } return result; } From 117d25605f8451e229a7aabd7e60b76a315b53b2 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 10 Jun 2024 18:24:17 +0200 Subject: [PATCH 07/10] Add requires working threading decorator to test --- Lib/test/test_free_threading/test_tokenize.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Lib/test/test_free_threading/test_tokenize.py b/Lib/test/test_free_threading/test_tokenize.py index 4d85afe9fe010d..67aa6518db3124 100644 --- a/Lib/test/test_free_threading/test_tokenize.py +++ b/Lib/test/test_free_threading/test_tokenize.py @@ -5,7 +5,10 @@ from functools import partial from threading import Thread +from test.support import threading_helper + +@threading_helper.requires_working_threading() class TestTokenize(unittest.TestCase): def test_tokenizer_iter(self): source = io.StringIO("for _ in a:\n pass") From ca466fa0d1ebf2aa1377cbc2b79b5f75b2011557 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Tue, 11 Jun 2024 22:31:14 +0200 Subject: [PATCH 08/10] Lock around all of tokenizeriter_next Co-authored-by: Pablo Galindo --- Python/Python-tokenize.c | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index 4d240da85b9cb6..b1f8873f6ade13 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -85,14 +85,16 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline, } static int -_tokenizer_error(struct tok_state *tok) +_tokenizer_error(tokenizeriterobject *it) { + _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(it); if (PyErr_Occurred()) { return -1; } const char *msg = NULL; PyObject* errtype = PyExc_SyntaxError; + struct tok_state *tok = it->tok; switch (tok->done) { case E_TOKEN: msg = "invalid token"; @@ -182,7 +184,7 @@ static PyObject * _get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t size) { _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(it); - PyObject *line; + PyObject *line = it->last_line; if (it->tok->lineno != it->last_lineno) { // Line has changed since last token, so we fetch the new line and cache it // in the iter object. @@ -193,10 +195,6 @@ _get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t si it->byte_col_offset_diff = 0; } } - else { - // Line hasn't changed so we reuse the cached one. - line = it->last_line; - } return line; } @@ -235,30 +233,23 @@ static PyObject * tokenizeriter_next(tokenizeriterobject *it) { PyObject* result = NULL; - struct token token; - _PyToken_Init(&token); - int type; Py_BEGIN_CRITICAL_SECTION(it); - type = _PyTokenizer_Get(it->tok, &token); - Py_END_CRITICAL_SECTION(); + struct token token; + _PyToken_Init(&token); + + int type = _PyTokenizer_Get(it->tok, &token); if (type == ERRORTOKEN) { if(!PyErr_Occurred()) { - _tokenizer_error(it->tok); + _tokenizer_error(it); assert(PyErr_Occurred()); } goto exit; } if (it->done || type == ERRORTOKEN) { PyErr_SetString(PyExc_StopIteration, "EOF"); - -#ifdef Py_GIL_DISABLED - _Py_atomic_store_int(&it->done, 1); -#else it->done = 1; -#endif - goto exit; } PyObject *str = NULL; @@ -287,9 +278,7 @@ tokenizeriter_next(tokenizeriterobject *it) size -= 1; } - Py_BEGIN_CRITICAL_SECTION(it); line = _get_current_line(it, line_start, size); - Py_END_CRITICAL_SECTION(); } if (line == NULL) { Py_DECREF(str); @@ -300,10 +289,7 @@ tokenizeriter_next(tokenizeriterobject *it) Py_ssize_t end_lineno = it->tok->lineno; Py_ssize_t col_offset = -1; Py_ssize_t end_col_offset = -1; - - Py_BEGIN_CRITICAL_SECTION(it); _get_col_offsets(it, token, line_start, line, lineno, end_lineno, &col_offset, &end_col_offset); - Py_END_CRITICAL_SECTION(); if (it->tok->tok_extra_tokens) { if (is_trailing_token) { @@ -343,12 +329,10 @@ tokenizeriter_next(tokenizeriterobject *it) exit: _PyToken_Free(&token); if (type == ENDMARKER) { -#ifdef Py_GIL_DISABLED - _Py_atomic_store_int(&it->done, 1); -#else it->done = 1; -#endif } + + Py_END_CRITICAL_SECTION(); return result; } From 601e5393bf1552e91faa17160900b870632e220a Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Wed, 12 Jun 2024 21:02:25 +0200 Subject: [PATCH 09/10] Fix formatting --- Python/Python-tokenize.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c index 3cad6c38ad2b74..34b4445be27f62 100644 --- a/Python/Python-tokenize.c +++ b/Python/Python-tokenize.c @@ -193,7 +193,8 @@ _get_current_line(tokenizeriterobject *it, const char *line_start, Py_ssize_t si line = PyUnicode_DecodeUTF8(line_start, size, "replace"); it->last_line = line; it->byte_col_offset_diff = 0; - } else { + } + else { line = it->last_line; *line_changed = 0; } From 0ddde2fdb974da252bea212a7fcbd296b14c9017 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Mon, 1 Jul 2024 11:00:20 +0200 Subject: [PATCH 10/10] Change sleeping time --- Lib/test/test_free_threading/test_tokenize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Lib/test/test_free_threading/test_tokenize.py b/Lib/test/test_free_threading/test_tokenize.py index 67aa6518db3124..860cfec4d710f4 100644 --- a/Lib/test/test_free_threading/test_tokenize.py +++ b/Lib/test/test_free_threading/test_tokenize.py @@ -20,7 +20,7 @@ def next_token(it): try: r = next(it) tokens.append(tokenize.TokenInfo._make(r)) - time.sleep(1) + time.sleep(0.03) except StopIteration: return