3232// sources such as interrupts and UNIX signal handlers).
3333void MICROPY_WRAP_MP_SCHED_EXCEPTION (mp_sched_exception )(mp_obj_t exc ) {
3434 MP_STATE_MAIN_THREAD (mp_pending_exception ) = exc ;
35-
36- #if MICROPY_ENABLE_SCHEDULER && !MICROPY_PY_THREAD
37- // Optimisation for the case where we have scheduler but no threading.
38- // Allows the VM to do a single check to exclude both pending exception
39- // and queued tasks.
40- if (MP_STATE_VM (sched_state ) == MP_SCHED_IDLE ) {
41- MP_STATE_VM (sched_state ) = MP_SCHED_PENDING ;
42- }
43- #endif
4435}
4536
4637#if MICROPY_KBD_EXCEPTION
@@ -68,16 +59,20 @@ static inline bool mp_sched_empty(void) {
6859
6960static inline void mp_sched_run_pending (void ) {
7061 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
71- if (MP_STATE_VM (sched_state ) != MP_SCHED_PENDING ) {
62+ if (MP_STATE_VM (sched_lock_depth ) ) {
7263 // Something else (e.g. hard IRQ) locked the scheduler while we
7364 // acquired the lock.
7465 MICROPY_END_ATOMIC_SECTION (atomic_state );
7566 return ;
7667 }
7768
69+ if (MP_STATE_MAIN_THREAD (mp_pending_exception ) == MP_OBJ_SENTINEL ) {
70+ MP_STATE_MAIN_THREAD (mp_pending_exception ) = MP_OBJ_NULL ;
71+ }
72+
7873 // Equivalent to mp_sched_lock(), but we're already in the atomic
7974 // section and know that we're pending.
80- MP_STATE_VM (sched_state ) = MP_SCHED_LOCKED ;
75+ ++ MP_STATE_VM (sched_lock_depth ) ;
8176
8277 #if MICROPY_SCHEDULER_STATIC_NODES
8378 // Run all pending C callbacks.
@@ -116,34 +111,24 @@ static inline void mp_sched_run_pending(void) {
116111// tasks and also in hard interrupts or GC finalisers.
117112void mp_sched_lock (void ) {
118113 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
119- if (MP_STATE_VM (sched_state ) < 0 ) {
120- // Already locked, increment lock (recursive lock).
121- -- MP_STATE_VM (sched_state );
122- } else {
123- // Pending or idle.
124- MP_STATE_VM (sched_state ) = MP_SCHED_LOCKED ;
125- }
114+ ++ MP_STATE_VM (sched_lock_depth );
126115 MICROPY_END_ATOMIC_SECTION (atomic_state );
127116}
128117
129118void mp_sched_unlock (void ) {
130119 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
131- assert (MP_STATE_VM (sched_state ) < 0 );
132- if (++ MP_STATE_VM (sched_state ) == 0 ) {
120+ assert (MP_STATE_VM (sched_lock_depth ) > 0 );
121+ if (-- MP_STATE_VM (sched_lock_depth ) == 0 ) {
133122 // Scheduler became unlocked. Check if there are still tasks in the
134123 // queue and set sched_state accordingly.
135- if (
136- #if !MICROPY_PY_THREAD
137- // See optimisation in mp_sched_exception.
138- MP_STATE_THREAD (mp_pending_exception ) != MP_OBJ_NULL ||
139- #endif
140- #if MICROPY_SCHEDULER_STATIC_NODES
141- MP_STATE_VM (sched_head ) != NULL ||
142- #endif
143- mp_sched_num_pending ()) {
144- MP_STATE_VM (sched_state ) = MP_SCHED_PENDING ;
145- } else {
146- MP_STATE_VM (sched_state ) = MP_SCHED_IDLE ;
124+ if (MP_STATE_MAIN_THREAD (mp_pending_exception ) == MP_OBJ_NULL ) {
125+ if (
126+ #if MICROPY_SCHEDULER_STATIC_NODES
127+ MP_STATE_VM (sched_head ) != NULL ||
128+ #endif
129+ mp_sched_num_pending ()) {
130+ MP_STATE_MAIN_THREAD (mp_pending_exception ) = MP_OBJ_SENTINEL ;
131+ }
147132 }
148133 }
149134 MICROPY_END_ATOMIC_SECTION (atomic_state );
@@ -153,8 +138,8 @@ bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj
153138 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
154139 bool ret ;
155140 if (!mp_sched_full ()) {
156- if (MP_STATE_VM ( sched_state ) == MP_SCHED_IDLE ) {
157- MP_STATE_VM ( sched_state ) = MP_SCHED_PENDING ;
141+ if (MP_STATE_MAIN_THREAD ( mp_pending_exception ) == MP_OBJ_NULL ) {
142+ MP_STATE_MAIN_THREAD ( mp_pending_exception ) = MP_OBJ_SENTINEL ;
158143 }
159144 uint8_t iput = IDX_MASK (MP_STATE_VM (sched_idx ) + MP_STATE_VM (sched_len )++ );
160145 MP_STATE_VM (sched_queue )[iput ].func = function ;
@@ -174,8 +159,8 @@ bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback)
174159 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
175160 bool ret ;
176161 if (node -> callback == NULL ) {
177- if (MP_STATE_VM ( sched_state ) == MP_SCHED_IDLE ) {
178- MP_STATE_VM ( sched_state ) = MP_SCHED_PENDING ;
162+ if (MP_STATE_MAIN_THREAD ( mp_pending_exception ) == MP_OBJ_NULL ) {
163+ MP_STATE_MAIN_THREAD ( mp_pending_exception ) = MP_OBJ_SENTINEL ;
179164 }
180165 node -> callback = callback ;
181166 node -> next = NULL ;
@@ -206,7 +191,7 @@ void mp_handle_pending(bool raise_exc) {
206191 if (MP_STATE_THREAD (mp_pending_exception ) != MP_OBJ_NULL ) {
207192 mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
208193 mp_obj_t obj = MP_STATE_THREAD (mp_pending_exception );
209- if (obj != MP_OBJ_NULL ) {
194+ if (obj != MP_OBJ_NULL && obj != MP_OBJ_SENTINEL ) {
210195 MP_STATE_THREAD (mp_pending_exception ) = MP_OBJ_NULL ;
211196 if (raise_exc ) {
212197 MICROPY_END_ATOMIC_SECTION (atomic_state );
@@ -216,7 +201,7 @@ void mp_handle_pending(bool raise_exc) {
216201 MICROPY_END_ATOMIC_SECTION (atomic_state );
217202 }
218203 #if MICROPY_ENABLE_SCHEDULER
219- if (MP_STATE_VM ( sched_state ) == MP_SCHED_PENDING ) {
204+ if (MP_STATE_MAIN_THREAD ( mp_pending_exception ) == MP_OBJ_SENTINEL ) {
220205 mp_sched_run_pending ();
221206 }
222207 #endif
0 commit comments