10000 rp2: Fix power consumption when sleeping with a timeout. by projectgus · Pull Request #15398 · micropython/micropython · GitHub
[go: up one dir, main page]

Skip to content

rp2: Fix power consumption when sleeping with a timeout. #15398

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions ports/rp2/mphalport.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,13 @@ static void soft_timer_hardware_callback(unsigned int alarm_num) {
// The timer alarm ISR needs to call here and trigger PendSV dispatch via
// a second ISR, as PendSV may be currently suspended by the other CPU.
pendsv_schedule_dispatch(PENDSV_DISPATCH_SOFT_TIMER, soft_timer_handler);

// This ISR only runs on core0, but if core1 is running Python code then it
// may be blocked in WFE so wake it up as well. Unfortunately this also sets
// the event flag on core0, so a subsequent WFE on this core will not suspend
if (core1_entry != NULL) {
__sev();
}
}

void soft_timer_init(void) {
Expand Down
2 changes: 1 addition & 1 deletion ports/rp2/mpthreadport.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ extern uint8_t __StackTop, __StackBottom;
void *core_state[2];

// This will be non-NULL while Python code is executing.
static void *(*core1_entry)(void *) = NULL;
core_entry_func_t core1_entry = NULL;

static void *core1_arg = NULL;
static uint32_t *core1_stack = NULL;
Expand Down
4 changes: 4 additions & 0 deletions ports/rp2/mpthreadport.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ typedef struct mutex mp_thread_mutex_t;

extern void *core_state[2];

typedef void *(*core_entry_func_t)(void *);

extern core_entry_func_t core1_entry;

void mp_thread_init(void);
void mp_thread_deinit(void);
void mp_thread_gc_others(void);
Expand Down
19 changes: 19 additions & 0 deletions ports/rp2/mutex_extra.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,22 @@ void __time_critical_func(recursive_mutex_exit_and_restore_interrupts)(recursive
}
lock_internal_spin_unlock_with_notify(&mtx->core, save);
}

void __time_critical_func(recursive_mutex_nowait_enter_blocking)(recursive_mutex_nowait_t * mtx) {
while (!recursive_mutex_try_enter(&mtx->mutex, NULL)) {
tight_loop_contents();
}
}

void __time_critical_func(recursive_mutex_nowait_exit)(recursive_mutex_nowait_t * wrapper) {
recursive_mutex_t *mtx = &wrapper->mutex;
// Rest of this function is a copy of recursive_mutex_exit(), with
// lock_internal_spin_unlock_with_notify() removed.
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
assert(lock_is_owner_id_valid(mtx->owner));
assert(mtx->enter_count);
if (!--mtx->enter_count) {
mtx->owner = LOCK_INVALID_OWNER_ID;
}
spin_unlock(mtx->core.spin_lock, save);
}
24 changes: 24 additions & 0 deletions ports/rp2/mutex_extra.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,28 @@
uint32_t recursive_mutex_enter_blocking_and_disable_interrupts(recursive_mutex_t *mtx);
void recursive_mutex_exit_and_restore_interrupts(recursive_mutex_t *mtx, uint32_t save);

// Alternative version of recursive_mutex_t that doesn't issue WFE and SEV
// instructions. This means it will use more power (busy-waits), but exiting
// this mutex doesn't disrupt the calling CPU's event state in the same way a
// recursive mutex does (because recurse_mutex_exit() executes SEV each time the
// mutex is released.)
//
// Implement as a wrapper type because no longer compatible with the normal
// recursive_mutex functions.

typedef struct {
recursive_mutex_t mutex;
} recursive_mutex_nowait_t;

inline static void recursive_mutex_nowait_init(recursive_mutex_nowait_t *mtx) {
recursive_mutex_init(&mtx->mutex);
}

inline static bool recursive_mutex_nowait_try_enter(recursive_mutex_nowait_t *mtx, uint32_t *owner_out) {
return recursive_mutex_try_enter(&mtx->mutex, owner_out);
}

void recursive_mutex_nowait_enter_blocking(recursive_mutex_nowait_t *mtx);
void recursive_mutex_nowait_exit(recursive_mutex_nowait_t *mtx);

#endif // MICROPY_INCLUDED_RP2_MUTEX_EXTRA_H
22 changes: 13 additions & 9 deletions ports/rp2/pendsv.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@
*/

#include <assert.h>
#include "pico/mutex.h"
#include "py/mpconfig.h"
#include "mutex_extra.h"
#include "pendsv.h"
#include "RP2040.h"

Expand All @@ -35,21 +35,25 @@
#endif

static pendsv_dispatch_t pendsv_dispatch_table[PENDSV_DISPATCH_NUM_SLOTS];
static recursive_mutex_t pendsv_mutex;

// Using the nowait variant here as softtimer updates PendSV from the loop of mp_wfe_or_timeout(),
// where we don't want the CPU event bit to be set.
static recursive_mutex_nowait_t pendsv_mutex;

void pendsv_init(void) {
recursive_mutex_init(&pendsv_mutex);
recursive_mutex_nowait_init(&pendsv_mutex);
}

void pendsv_suspend(void) {
// Recursive Mutex here as either core may call pendsv_suspend() and expect
// both mutual exclusion (other core can't enter pendsv_suspend() at the
// same time), and that no PendSV handler will run.
recursive_mutex_enter_blocking(&pendsv_mutex);
recursive_mutex_nowait_enter_blocking(&pendsv_mutex);
}

void pendsv_resume(void) {
recursive_mutex_exit(&pendsv_mutex);
recursive_mutex_nowait_exit(&pendsv_mutex);

// Run pendsv if needed. Find an entry with a dispatch and call pendsv dispatch
// with it. If pendsv runs it will service all slots.
int count = PENDSV_DISPATCH_NUM_SLOTS;
Expand All @@ -63,7 +67,7 @@ void pendsv_resume(void) {

void pendsv_schedule_dispatch(size_t slot, pendsv_dispatch_t f) {
pendsv_dispatch_table[slot] = f;
if (pendsv_mutex.enter_count == 0) {
if (pendsv_mutex.mutex.enter_count == 0) {
// There is a race here where other core calls pendsv_suspend() before
// ISR can execute, but dispatch will happen later when other core
// calls pendsv_resume().
Expand All @@ -78,13 +82,13 @@ void pendsv_schedule_dispatch(size_t slot, pendsv_dispatch_t f) {
// PendSV interrupt handler to perform background processing.
void PendSV_Handler(void) {

if (!recursive_mutex_try_enter(&pendsv_mutex, NULL)) {
if (!recursive_mutex_nowait_try_enter(&pendsv_mutex, NULL)) {
// Failure here means core 1 holds pendsv_mutex. ISR will
// run again after core 1 calls pendsv_resume().
return;
}
// Core 0 should not already have locked pendsv_mutex
assert(pendsv_mutex.enter_count == 1);
assert(pendsv_mutex.mutex.enter_count == 1);

#if MICROPY_PY_NETWORK_CYW43
CYW43_STAT_INC(PENDSV_RUN_COUNT);
Expand All @@ -98,5 +102,5 @@ void PendSV_Handler(void) {
}
}

recursive_mutex_exit(&pendsv_mutex);
recursive_mutex_nowait_exit(&pendsv_mutex);
}
Loading
0