8000 WIP: Use mp_pending_exception for scheduler pending. · jimmo/micropython@6980fee · GitHub
[go: up one dir, main page]

Skip to content

Commit 6980fee

Browse files
committed
WIP: Use mp_pending_exception for scheduler pending.
Signed-off-by: Jim Mussared <jim.mussared@gmail.com>
1 parent bb77c1d commit 6980fee

File tree

4 files changed

+32
-63
lines changed

4 files changed

+32
-63
lines changed

py/mpstate.h

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,6 @@ typedef struct mp_dynamic_compiler_t {
6161
extern mp_dynamic_compiler_t mp_dynamic_compiler;
6262
#endif
6363

64-
// These are the values for sched_state
65-
#define MP_SCHED_IDLE (1)
66-
#define MP_SCHED_LOCKED (-1)
67-
#define MP_SCHED_PENDING (0) // 0 so it's a quick check in the VM
68-
6964
typedef struct _mp_sched_item_t {
7065
mp_obj_t func;
7166
mp_obj_t arg;
@@ -211,7 +206,11 @@ typedef struct _mp_state_vm_t {
211206
#endif
212207

213208
#if MICROPY_ENABLE_SCHEDULER
214-
volatile int16_t sched_state;
209+
volatile uint16_t sched_lock_depth;
210+
211+
// These index sched_queue.
212+
uint8_t sched_len;
213+
uint8_t sched_idx;
215214

216215
#if MICROPY_SCHEDULER_STATIC_NODES
217216
// These will usually point to statically allocated memory. They are not
@@ -220,10 +219,6 @@ typedef struct _mp_state_vm_t {
220219
struct _mp_sched_node_t *sched_head;
221220
struct _mp_sched_node_t *sched_tail;
222221
#endif
223-
224-
// These index sched_queue.
225-
uint8_t sched_len;
226-
uint8_t sched_idx;
227222
#endif
228223

229224
#if MICROPY_PY_THREAD_GIL

py/runtime.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,12 +70,9 @@ void mp_init(void) {
7070
MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
7171
#if MICROPY_ENABLE_SCHEDULER
7272
#if MICROPY_SCHEDULER_STATIC_NODES
73-
if (MP_STATE_VM(sched_head) == NULL) {
74-
// no pending callbacks to start with
75-
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
76-
} else {
73+
if (MP_STATE_VM(sched_head) != NULL) {
7774
// pending callbacks are on the list, eg from before a soft reset
78-
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
75+
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_SENTINEL;
7976
}
8077
#endif
8178
MP_STATE_VM(sched_idx) = 0;

py/scheduler.c

Lines changed: 23 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,6 @@
3232
// sources such as interrupts and UNIX signal handlers).
3333
void MICROPY_WRAP_MP_SCHED_EXCEPTION(mp_sched_exception)(mp_obj_t exc) {
3434
MP_STATE_MAIN_THREAD(mp_pending_exception) = exc;
35-
36-
#if MICROPY_ENABLE_SCHEDULER && !MICROPY_PY_THREAD
37-
// Optimisation for the case where we have scheduler but no threading.
38-
// Allows the VM to do a single check to exclude both pending exception
39-
// and queued tasks.
40-
if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
41-
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
42-
}
43-
#endif
4435
}
4536

4637
#if MICROPY_KBD_EXCEPTION
@@ -68,16 +59,20 @@ static inline bool mp_sched_empty(void) {
6859

6960
static inline void mp_sched_run_pending(void) {
7061
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
71-
if (MP_STATE_VM(sched_state) != MP_SCHED_PENDING) {
62+
if (MP_STATE_VM(sched_lock_depth)) {
7263
// Something else (e.g. hard IRQ) locked the scheduler while we
7364
// acquired the lock.
7465
MICROPY_END_ATOMIC_SECTION(atomic_state);
7566
return;
7667
}
7768

69+
if (MP_STATE_MAIN_THREAD(mp_pending_exception) == MP_OBJ_SENTINEL) {
70+
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_NULL;
71+
}
72+
7873
// Equivalent to mp_sched_lock(), but we're already in the atomic
7974
// section and know that we're pending.
80-
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
75+
++MP_STATE_VM(sched_lock_depth);
8176

8277
#if MICROPY_SCHEDULER_STATIC_NODES
8378
// Run all pending C callbacks.
@@ -116,34 +111,24 @@ static inline void mp_sched_run_pending(void) {
116111
// tasks and also in hard interrupts or GC finalisers.
117112
void mp_sched_lock(void) {
118113
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
119-
if (MP_STATE_VM(sched_state) < 0) {
120-
// Already locked, increment lock (recursive lock).
121-
--MP_STATE_VM(sched_state);
122-
} else {
123-
// Pending or idle.
124-
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
125-
}
114+
++MP_STATE_VM(sched_lock_depth);
126115
MICROPY_END_ATOMIC_SECTION(atomic_state);
127116
}
128117

129118
void mp_sched_unlock(void) {
130119
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
131-
assert(MP_STATE_VM(sched_state) < 0);
132-
if (++MP_STATE_VM(sched_state) == 0) {
120+
assert(MP_STATE_VM(sched_lock_depth) > 0);
121+
if (--MP_STATE_VM(sched_lock_depth) == 0) {
133122
// Scheduler became unlocked. Check if there are still tasks in the
134123
// queue and set sched_state accordingly.
135-
if (
136-
#if !MICROPY_PY_THREAD
137-
// See optimisation in mp_sched_exception.
138-
MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL ||
139-
#endif
140-
#if MICROPY_SCHEDULER_STATIC_NODES
141-
MP_STATE_VM(sched_head) != NULL ||
142-
#endif
143-
mp_sched_num_pending()) {
144-
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
145-
} else {
146-
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
124+
if (MP_STATE_MAIN_THREAD(mp_pending_exception) == MP_OBJ_NULL) {
125+
if (
126+
#if MICROPY_SCHEDULER_STATIC_NODES
127+
MP_STATE_VM(sched_head) != NULL ||
128+
#endif
129+
mp_sched_num_pending()) {
130+
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_SENTINEL;
131+
}
147132
}
148133
}
149134
MICROPY_END_ATOMIC_SECTION(atomic_state);
@@ -153,8 +138,8 @@ bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj
153138
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
154139
bool ret;
155140
if (!mp_sched_full()) {
156-
if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
157-
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
141+
if (MP_STATE_MAIN_THREAD(mp_pending_exception) == MP_OBJ_NULL) {
142+
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_SENTINEL;
158143
}
159144
uint8_t iput = IDX_MASK(MP_STATE_VM(sched_idx) + MP_STATE_VM(sched_len)++);
160145
MP_STATE_VM(sched_queue)[iput].func = function;
@@ -174,8 +159,8 @@ bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback)
174159
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
175160
bool ret;
176161
if (node->callback == NULL) {
177-
if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
178-
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
162+
if (MP_STATE_MAIN_THREAD(mp_pending_exception) == MP_OBJ_NULL) {
163+
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_SENTINEL;
179164
}
180165
node->callback = callback;
181166
node->next = NULL;
@@ -206,7 +191,7 @@ void mp_handle_pending(bool raise_exc) {
206191
if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
207192
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
208193
mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
209-
if (obj != MP_OBJ_NULL) {
194+
if (obj != MP_OBJ_NULL && obj != MP_OBJ_SENTINEL) {
210195
MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
211196
if (raise_exc) {
212197
MICROPY_END_ATOMIC_SECTION(atomic_state);
@@ -216,7 +201,7 @@ void mp_handle_pending(bool raise_exc) {
216201
MICROPY_END_ATOMIC_SECTION(atomic_state);
217202
}
218203
#if MICROPY_ENABLE_SCHEDULER
219-
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
204+
if (MP_STATE_MAIN_THREAD(mp_pending_exception) == MP_OBJ_SENTINEL) {
220205
mp_sched_run_pending();
221206
}
222207
#endif

py/vm.c

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1321,17 +1321,9 @@ unwind_jump:;
13211321
// we can inline the check for the common case where there is
13221322
// neither.
13231323
if (
1324-
#if MICROPY_ENABLE_SCHEDULER
1324+
MP_STATE_MAIN_THREAD(mp_pending_exception) != MP_OBJ_NULL
13251325
#if MICROPY_PY_THREAD
1326-
// Scheduler + threading: Scheduler and pending exceptions are independent, check both.
1327-
MP_STATE_VM(sched_state) == MP_SCHED_PENDING || MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
1328-
#else
1329-
// Scheduler + non-threading: Optimisation: pending exception sets sched_state, only check sched_state.
1330-
MP_STATE_VM(sched_state) == MP_SCHED_PENDING
1331-
#endif
1332-
#else
1333-
// No scheduler: Just check pending exception.
1334-
MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
1326+
|| MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
13351327
#endif
13361328
) {
13371329
MARK_EXC_IP_SELECTIVE();

0 commit comments

Comments
 (0)
0