@@ -449,16 +449,6 @@ def _tokenize(rl_gen, encoding):
449
449
source = b"" .join (rl_gen ).decode (encoding )
450
450
token = None
451
451
for token in _generate_tokens_from_c_tokenizer (source , extra_tokens = True ):
452
- # TODO: Marta -> limpiar esto
453
- if 6 < token .type <= 54 :
454
- token = token ._replace (type = OP )
455
- if token .type in {ASYNC , AWAIT }:
456
- token = token ._replace (type = NAME )
457
- if token .type == NEWLINE :
458
- l_start , c_start = token .start
459
- l_end , c_end = token .end
460
- token = token ._replace (string = '\n ' , start = (l_start , c_start ), end = (l_end , c_end + 1 ))
461
-
462
452
yield token
463
453
if token is not None :
464
454
last_line , _ = token .start
@@ -550,8 +540,7 @@ def _generate_tokens_from_c_tokenizer(source, extra_tokens=False):
550
540
"""Tokenize a source reading Python code as unicode strings using the internal C tokenizer"""
551
541
import _tokenize as c_tokenizer
552
542
for info in c_tokenizer .TokenizerIter (source , extra_tokens = extra_tokens ):
553
- tok , type , lineno , end_lineno , col_off , end_col_off , line = info
554
- yield TokenInfo (type , tok , (lineno , col_off ), (end_lineno , end_col_off ), line )
543
+ yield TokenInfo ._make (info )
555
544
556
545
557
546
if __name__ == "__main__" :
0 commit comments