@@ -22,7 +22,7 @@ _PyPegen_interactive_exit(Parser *p)
22
22
Py_ssize_t
23
23
_PyPegen_byte_offset_to_character_offset_line (PyObject * line , Py_ssize_t col_offset , Py_ssize_t end_col_offset )
24
24
{
25
- const char * data = PyUnicode_AsUTF8 (line );
25
+ const unsigned char * data = ( const unsigned char * ) PyUnicode_AsUTF8 (line );
26
26
27
27
Py_ssize_t len = 0 ;
28
28
while (col_offset < end_col_offset ) {
@@ -47,7 +47,7 @@ _PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t col_off
47
47
Py_ssize_t
48
48
_PyPegen_byte_offset_to_character_offset_raw (const char * str , Py_ssize_t col_offset )
49
49
{
50
- Py_ssize_t len = strlen (str );
50
+ Py_ssize_t len = ( Py_ssize_t ) strlen (str );
51
51
if (col_offset > len + 1 ) {
52
52
col_offset = len + 1 ;
53
53
}
@@ -158,7 +158,7 @@ growable_comment_array_deallocate(growable_comment_array *arr) {
158
158
static int
159
159
_get_keyword_or_name_type (Parser * p , struct token * new_token )
160
160
{
161
- int name_len = new_token -> end_col_offset - new_token -> col_offset ;
161
+ Py_ssize_t name_len = new_token -> end_col_offset - new_token -> col_offset ;
162
162
assert (name_len > 0 );
163
163
164
164
if (name_len >= p -> n_keyword_lists ||
@@ -167,7 +167,7 @@ _get_keyword_or_name_type(Parser *p, struct token *new_token)
167
167
return NAME ;
168
168
}
169
169
for (KeywordToken * k = p -> keywords [name_len ]; k != NULL && k -> type != -1 ; k ++ ) {
170
- if (strncmp (k -> str , new_token -> start , name_len ) == 0 ) {
170
+ if (strncmp (k -> str , new_token -> start , ( size_t ) name_len ) == 0 ) {
171
171
return k -> type ;
172
172
}
173
173
}
@@ -218,7 +218,7 @@ initialize_token(Parser *p, Token *parser_token, struct token *new_token, int to
218
218
static int
219
219
_resize_tokens_array (Parser * p ) {
220
220
int newsize = p -> size * 2 ;
221
- Token * * new_tokens = PyMem_Realloc (p -> tokens , newsize * sizeof (Token * ));
221
+ Token * * new_tokens = PyMem_Realloc (p -> tokens , ( size_t ) newsize * sizeof (Token *
8000
span>));
222
222
if (new_tokens == NULL ) {
223
223
PyErr_NoMemory ();
224
224
return -1 ;
@@ -247,12 +247,12 @@ _PyPegen_fill_token(Parser *p)
247
247
// Record and skip '# type: ignore' comments
248
248
while (type == TYPE_IGNORE ) {
249
249
Py_ssize_t len = new_token .end_col_offset - new_token .col_offset ;
250
- char * tag = PyMem_Malloc (len + 1 );
250
+ char * tag = PyMem_Malloc (( size_t ) len + 1 );
251
251
if (tag == NULL ) {
252
252
PyErr_NoMemory ();
253
253
goto error ;
254
254
}
255
- strncpy (tag , new_token .start , len );
255
+ strncpy (tag , new_token .start , ( size_t ) len );
256
256
tag [len ] = '\0' ;
257
257
// Ownership of tag passes to the growable array
258
258
if (!growable_comment_array_add (& p -> type_ignore_comments , p -> tok -> lineno , tag )) {
@@ -505,7 +505,7 @@ _PyPegen_get_last_nonnwhitespace_token(Parser *p)
505
505
PyObject *
506
506
_PyPegen_new_identifier (Parser * p , const char * n )
507
507
{
508
- PyObject * id = PyUnicode_DecodeUTF8 (n , strlen (n ), NULL );
508
+ PyObject * id = PyUnicode_DecodeUTF8 (n , ( Py_ssize_t ) strlen (n ), NULL );
509
509
if (!id ) {
510
510
goto error ;
511
511
}
@@ -601,7 +601,7 @@ expr_ty _PyPegen_soft_keyword_token(Parser *p) {
601
601
Py_ssize_t size ;
602
602
PyBytes_AsStringAndSize (t -> bytes , & the_token , & size );
603
603
for (char * * keyword = p -> soft_keywords ; * keyword != NULL ; keyword ++ ) {
604
- if (strncmp (* keyword , the_token , size ) == 0 ) {
604
+ if (strncmp (* keyword , the_token , ( size_t ) size ) == 0 ) {
605
605
return _PyPegen_name_from_token (p , t );
606
606
}
607
607
}
0 commit comments