10000 gh-102856: Python tokenizer implementation for PEP 701 by mgmacias95 · Pull Request #104323 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

gh-102856: Python tokenizer implementation for PEP 701 #104323

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
May 21, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Some progress
  • Loading branch information
pablogsal committed May 18, 2023
commit 63ef1c16284f845d1e080d1d0e357d64693da0da
6 changes: 3 additions & 3 deletions Lib/inspect.py
Original file line number Diff line number Diff line change
Expand Up @@ -2187,15 +2187,16 @@ def _signature_strip_non_python_syntax(signature):
if string == ',':
current_parameter += 1

if (type == ERRORTOKEN) and (string == '$'):
# if (type == ERRORTOKEN) and (string == '$'):
if (type == OP) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue

add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
clean_signature = ''.join(text).strip()
return clean_signature, self_parameter


Expand All @@ -2215,7 +2216,6 @@ def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
module = None

if not isinstance(module, ast.Module):
breakpoint()
raise ValueError("{!r} builtin has invalid signature".format(obj))

f = module.body[0]
Expand Down
25 changes: 14 additions & 11 deletions Lib/test/test_tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_basic(self):
NAME 'False' (4, 11) (4, 16)
COMMENT '# NEWLINE' (4, 17) (4, 26)
NEWLINE '\\n' (4, 26) (4, 27)
DEDENT '' (5, 0) (5, 0)
DEDENT '' (4, 27) (4, 27)
""")
indent_error_file = b"""\
def k(x):
Expand Down Expand Up @@ -230,6 +230,10 @@ def number_token(s):
continue
self.assertEqual(number_token(lit), lit)
for lit in INVALID_UNDERSCORE_LITERALS:
try:
number_token(lit)
except SyntaxError:
continue
self.assertNotEqual(number_token(lit), lit)

def test_string(self):
Expand Down Expand Up @@ -728,8 +732,8 @@ def test_tabs(self):
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (3, 14) (3, 14)
DEDENT '' (3, 14) (3, 14)
""")

def test_non_ascii_identifiers(self):
Expand Down Expand Up @@ -941,7 +945,7 @@ async def foo():
NUMBER '1' (2, 17) (2, 18)
OP ':' (2, 18) (2, 19)
NAME 'pass' (2, 20) (2, 24)
DEDENT '' (3, 0) (3, 0)
DEDENT '' (2, 25) (2, 25)
""")

self.check_tokenize('''async def foo(async): await''', """\
Expand Down Expand Up @@ -989,7 +993,7 @@ async def bar(): pass
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
DEDENT '' (6, 12) (6, 12)
""")

self.check_tokenize('''\
Expand Down Expand Up @@ -1027,7 +1031,7 @@ async def bar(): pass
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
DEDENT '' (6, 12) (6, 12)
""")

class GenerateTokensTest(TokenizeTest):
Expand All @@ -1052,7 +1056,7 @@ def decistmt(s):
])
else:
result.append((toknum, tokval))
return untokenize(result).decode('utf-8')
return untokenize(result).decode('utf-8').strip()

class TestMisc(TestCase):

Expand Down Expand Up @@ -1408,9 +1412,9 @@ def test_open_error(self):

class TestTokenize(TestCase):

def test_tokenize(self):
def test_tokenizee(self):
import tokenize as tokenize_module
encoding = object()
encoding = "utf-8"
encoding_used = None
def mock_detect_encoding(readline):
return encoding, [b'first', b'second']
Expand Down Expand Up @@ -2643,8 +2647,7 @@ def generate_source(indents):
compile(valid, "<string>", "exec")

invalid = generate_source(MAXINDENT)
tokens = list(_generate_tokens_from_c_tokenizer(invalid))
self.assertEqual(tokens[-1].type, NEWLINE)
self.assertRaises(SyntaxError, lambda: list(_generate_tokens_from_c_tokenizer(invalid)))
self.assertRaises(
IndentationError, compile, invalid, "<string>", "exec"
)
Expand Down
11 changes: 9 additions & 2 deletions Lib/tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,6 @@ def open(filename):

def tokenize2(readline):
encoding, consumed = detect_encoding(readline)

rl_gen = _itertools.chain(consumed, iter(readline, b""))
if encoding is not None:
if encoding == "utf-8-sig":
Expand All @@ -417,6 +416,7 @@ def tokenize2(readline):

def _tokenize2(rl_gen, encoding):
source = b"".join(rl_gen)
token = None
for token in _generate_tokens_from_c_tokenizer(source.decode(encoding), extra_tokens=True):
# TODO: Marta -> limpiar esto
if 6 < token.type <= 54:
Expand All @@ -429,6 +429,9 @@ def _tokenize2(rl_gen, encoding):
token = token._replace(string='\n', start=(l_start, c_start), end=(l_end, c_end+1))

yield token
if token is not None:
last_line, _ = token.start
yield TokenInfo(ENDMARKER, '', (last_line + 1, 0), (last_line + 1, 0), '')


def tokenize(readline):
Expand Down Expand Up @@ -638,6 +641,7 @@ def _tokenize(readline, encoding):
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')

tokenize = tokenize2

def generate_tokens(readline):
"""Tokenize a source reading Python code as unicode strings.
Expand All @@ -647,7 +651,10 @@ def generate_tokens(readline):
"""
def _gen():
while True:
line = readline()
try:
line = readline()
except StopIteration:
return
if not line:
return
yield line.encode()
Expand Down
15 changes: 10 additions & 5 deletions Parser/tokenizer.c
B41A
Original file line number Diff line number Diff line change
Expand Up @@ -1638,6 +1638,7 @@ token_setup(struct tok_state *tok, struct token *token, int type, const char *st
return type;
}


static int
tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
{
Expand All @@ -1652,7 +1653,6 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
blankline = 0;


const char* starting_indent = NULL;
/* Get indentation level */
if (tok->atbol) {
int col = 0;
Expand Down Expand Up @@ -1749,19 +1749,24 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
}
}

starting_indent = tok->start;
tok->start = tok->cur;
tok->starting_col_offset = tok->col_offset;

/* Return pending indents/dedents */
if (tok->pendin != 0) {
p_start = tok->buf;
p_end = tok->cur;
if (tok->pendin < 0) {
if (tok->tok_extra_tokens) {
p_start = tok->cur;
p_end = tok->cur;
}
tok->pendin++;
return MAKE_TOKEN(DEDENT);
}
else {
if (tok->tok_extra_tokens) {
p_start = tok->buf;
p_end = tok->cur;
}
tok->pendin--;
return MAKE_TOKEN(INDENT);
}
Expand Down Expand Up @@ -1883,7 +1888,7 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
tok_backup(tok, c); /* don't eat the newline or EOF */
p_start = p;
p_end = tok->cur;
tok->comment_newline = 1;
tok->comment_newline = blankline;
return MAKE_TOKEN(COMMENT);
}
}
Expand Down
60 changes: 59 additions & 1 deletion Python/Python-tokenize.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#include "Python.h"
#include "errcode.h"
#include "../Parser/tokenizer.h"
#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()
#include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()

static struct PyModuleDef _tokenizemodule;

Expand Down Expand Up @@ -64,12 +66,68 @@ tokenizeriter_new_impl(PyTypeObject *type, const char *source,
return (PyObject *)self;
}

static int
_tokenizer_error(struct tok_state *tok)
{
if (PyErr_Occurred()) {
return -1;
}

const char *msg = NULL;
PyObject* errtype = PyExc_SyntaxError;
switch (tok->done) {
case E_TOKEN:
msg = "invalid token";
break;
case E_EOF:
if (tok->level) {
PyErr_Format(PyExc_SyntaxError,
"parenthesis '%c' was never closed",
tok->parenstack[tok->level-1]);
} else {
PyErr_SetString(PyExc_SyntaxError, "unexpected EOF while parsing");
}
return -1;
case E_DEDENT:
PyErr_SetString(PyExc_IndentationError,
"unindent does not match any outer indentation level");
return -1;
case E_INTR:
if (!PyErr_Occurred()) {
PyErr_SetNone(PyExc_KeyboardInterrupt);
}
return -1;
case E_NOMEM:
PyErr_NoMemory();
return -1;
case E_TABSPACE:
errtype = PyExc_TabError;
msg = "inconsistent use of tabs and spaces in indentation";
break;
case E_TOODEEP:
errtype = PyExc_IndentationError;
msg = "too many levels of indentation";
break;
case E_LINECONT: {
msg = "unexpected character after line continuation character";
break;
}
default:
msg = "unknown parsing error";
}
PyErr_SetString(errtype, msg);
return -1;
}

static PyObject *
tokenizeriter_next(tokenizeriterobject *it)
{
struct token token;
int type = _PyTokenizer_Get(it->tok, &token);
if (type == ERRORTOKEN && PyErr_Occurred()) {
if (type == ERRORTOKEN) {
if(!PyErr_Occurred()) {
_tokenizer_error(it->tok);
}
return NULL;
}
if (type == ERRORTOKEN || type == ENDMARKER) {
Expand Down
0