1
1
// This code glues the code emitters to the runtime.
2
2
3
3
#include <stdio.h>
4
+ #include <string.h>
4
5
#include <assert.h>
5
6
6
7
#include "misc.h"
23
24
#endif
24
25
25
26
typedef enum {
26
- MP_CODE_NONE ,
27
+ MP_CODE_UNUSED ,
28
+ MP_CODE_RESERVED ,
27
29
MP_CODE_BYTE ,
28
30
MP_CODE_NATIVE ,
29
31
MP_CODE_INLINE_ASM ,
@@ -49,16 +51,16 @@ typedef struct _mp_code_t {
49
51
} mp_code_t ;
50
52
51
53
STATIC machine_uint_t unique_codes_alloc = 0 ;
54
+ STATIC machine_uint_t unique_codes_total = 0 ; // always >= unique_codes_alloc
52
55
STATIC mp_code_t * unique_codes = NULL ;
53
- STATIC uint next_unique_code_id ;
54
56
55
57
#ifdef WRITE_CODE
56
58
FILE * fp_write_code = NULL ;
57
59
#endif
58
60
59
61
void mp_emit_glue_init (void ) {
60
- next_unique_code_id = 0 ;
61
62
unique_codes_alloc = 0 ;
63
+ unique_codes_total = 0 ;
62
64
unique_codes = NULL ;
63
65
64
66
#ifdef WRITE_CODE
@@ -77,25 +79,34 @@ void mp_emit_glue_deinit(void) {
77
79
}
78
80
79
81
uint mp_emit_glue_get_unique_code_id (void ) {
80
- return next_unique_code_id ++ ;
82
+ // look for an existing unused slot
83
+ for (uint i = 0 ; i < unique_codes_alloc ; i ++ ) {
84
+ if (unique_codes [i ].kind == MP_CODE_UNUSED ) {
85
+ unique_codes [i ].kind = MP_CODE_RESERVED ;
86
+ return i ;
87
+ }
88
+ }
89
+ // no existing slot
90
+ // return next available id, memory will be allocated later
91
+ return unique_codes_total ++ ;
81
92
}
82
93
83
94
STATIC void mp_emit_glue_alloc_unique_codes (void ) {
84
- if (next_unique_code_id > unique_codes_alloc ) {
85
- DEBUG_printf ("allocate more unique codes: " UINT_FMT " -> %u\n" , unique_codes_alloc , next_unique_code_id );
86
- // increase size of unique_codes table
87
- unique_codes = m_renew (mp_code_t , unique_codes , unique_codes_alloc , next_unique_code_id );
88
- for (uint i = unique_codes_alloc ; i < next_unique_code_id ; i ++ ) {
89
- unique_codes [i ].kind = MP_CODE_NONE ;
95
+ if (unique_codes_total > unique_codes_alloc ) {
96
+ DEBUG_printf ("allocate more unique codes: " UINT_FMT " -> %u\n" , unique_codes_alloc , unique_codes_total );
97
+ // increase size of unique_codes table (all new entries are already reserved)
98
+ unique_codes = m_renew (mp_code_t , unique_codes , unique_codes_alloc , unique_codes_total );
99
+ for (uint i = unique_codes_alloc ; i < unique_codes_total ; i ++ ) {
100
+ unique_codes [i ].kind = MP_CODE_RESERVED ;
90
101
}
91
- unique_codes_alloc = next_unique_code_id ;
102
+ unique_codes_alloc = unique_codes_total ;
92
103
}
93
104
}
94
105
95
106
void mp_emit_glue_assign_byte_code (uint unique_code_id , byte * code , uint len , int n_args , int n_locals , uint scope_flags , qstr * arg_names ) {
96
107
mp_emit_glue_alloc_unique_codes ();
97
108
98
- assert (unique_code_id < next_unique_code_id && unique_codes [unique_code_id ].kind == MP_CODE_NONE );
109
+ assert (unique_code_id < unique_codes_alloc && unique_codes [unique_code_id ].kind == MP_CODE_RESERVED );
99
110
unique_codes [unique_code_id ].kind = MP_CODE_BYTE ;
100
111
unique_codes [unique_code_id ].scope_flags = scope_flags ;
101
112
unique_codes [unique_code_id ].n_args = n_args ;
@@ -123,7 +134,7 @@ void mp_emit_glue_assign_byte_code(uint unique_code_id, byte *code, uint len, in
123
134
void mp_emit_glue_assign_native_code (uint unique_code_id , void * fun , uint len , int n_args ) {
124
135
mp_emit_glue_alloc_unique_codes ();
125
136
126
- assert (unique_code_id < next_unique_code_id && unique_codes [unique_code_id ].kind == MP_CODE_NONE );
137
+ assert (unique_code_id < unique_codes_alloc && unique_codes [unique_code_id ].kind == MP_CODE_RESERVED );
127
138
unique_codes [unique_code_id ].kind = MP_CODE_NATIVE ;
128
139
unique_codes [unique_code_id ].scope_flags = 0 ;
129
140
unique_codes [unique_code_id ].n_args = n_args ;
@@ -154,7 +165,7 @@ void mp_emit_glue_assign_native_code(uint unique_code_id, void *fun, uint len, i
154
165
void mp_emit_glue_assign_inline_asm_code (uint unique_code_id , void * fun , uint len , int n_args ) {
155
166
mp_emit_glue_alloc_unique_codes ();
156
167
157
- assert (unique_code_id < next_unique_code_id && unique_codes [unique_code_id ].kind == MP_CODE_NONE );
168
+ assert (unique_code_id < unique_codes_alloc && unique_codes [unique_code_id ].kind == MP_CODE_RESERVED );
158
169
unique_codes [unique_code_id ].kind = MP_CODE_INLINE_ASM ;
159
170
unique_codes [unique_code_id ].scope_flags = 0 ;
160
171
unique_codes [unique_code_id ].n_args = n_args ;
@@ -179,9 +190,9 @@ void mp_emit_glue_assign_inline_asm_code(uint unique_code_id, void *fun, uint le
179
190
#endif
180
191
}
181
192
182
- mp_obj_t rt_make_function_from_id (int unique_code_id , mp_obj_t def_args ) {
193
+ mp_obj_t rt_make_function_from_id (uint unique_code_id , bool free_unique_code , mp_obj_t def_args ) {
183
194
DEBUG_OP_printf ("make_function_from_id %d\n" , unique_code_id );
184
- if (unique_code_id >= next_unique_code_id ) {
195
+ if (unique_code_id >= unique_codes_total ) {
185
196
// illegal code id
186
197
return mp_const_none ;
187
198
}
@@ -200,22 +211,30 @@ mp_obj_t rt_make_function_from_id(int unique_code_id, mp_obj_t def_args) {
200
211
fun = mp_obj_new_fun_asm (c -> n_args , c -> u_inline_asm .fun );
201
212
break ;
202
213
default :
214
+ // code id was never assigned (this should not happen)
203
215
assert (0 );
204
- fun = mp_const_none ;
216
+ return mp_const_none ;
205
217
}
206
218
207
219
// check for generator functions and if so wrap in generator object
208
220
if ((c -> scope_flags & MP_SCOPE_FLAG_GENERATOR ) != 0 ) {
209
221
fun = mp_obj_new_gen_wrap (fun );
210
222
}
211
223
224
+ // in some cases we can free the unique_code slot
225
+ // any dynamically allocade memory is now owned by the fun object
226
+ if (free_unique_code ) {
227
+ memset (c , 0 , sizeof * c ); // make sure all pointers are zeroed
228
+ c -> kind = MP_CODE_UNUSED ;
229
+ }
230
+
212
231
return fun ;
213
232
}
214
233
215
- mp_obj_t rt_make_closure_from_id (int unique_code_id , mp_obj_t closure_tuple , mp_obj_t def_args ) {
234
+ mp_obj_t rt_make_closure_from_id (uint unique_code_id , mp_obj_t closure_tuple , mp_obj_t def_args ) {
216
235
DEBUG_OP_printf ("make_closure_from_id %d\n" , unique_code_id );
217
236
// make function object
218
- mp_obj_t ffun = rt_make_function_from_id (unique_code_id , def_args );
237
+ mp_obj_t ffun = rt_make_function_from_id (unique_code_id , false, def_args );
219
238
// wrap function in closure object
220
239
return mp_obj_new_closure (ffun , closure_tuple );
221
240
}
0 commit comments