summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ports/rp2/mutex_extra.c19
-rw-r--r--ports/rp2/mutex_extra.h24
-rw-r--r--ports/rp2/pendsv.c22
3 files changed, 56 insertions, 9 deletions
diff --git a/ports/rp2/mutex_extra.c b/ports/rp2/mutex_extra.c
index 7a70a40ac..db8419170 100644
--- a/ports/rp2/mutex_extra.c
+++ b/ports/rp2/mutex_extra.c
@@ -33,3 +33,22 @@ void __time_critical_func(recursive_mutex_exit_and_restore_interrupts)(recursive
}
lock_internal_spin_unlock_with_notify(&mtx->core, save);
}
+
+void __time_critical_func(recursive_mutex_nowait_enter_blocking)(recursive_mutex_nowait_t * mtx) {
+ while (!recursive_mutex_try_enter(&mtx->mutex, NULL)) {
+ tight_loop_contents();
+ }
+}
+
+void __time_critical_func(recursive_mutex_nowait_exit)(recursive_mutex_nowait_t * wrapper) {
+ recursive_mutex_t *mtx = &wrapper->mutex;
+ // Rest of this function is a copy of recursive_mutex_exit(), with
+ // lock_internal_spin_unlock_with_notify() removed.
+ uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
+ assert(lock_is_owner_id_valid(mtx->owner));
+ assert(mtx->enter_count);
+ if (!--mtx->enter_count) {
+ mtx->owner = LOCK_INVALID_OWNER_ID;
+ }
+ spin_unlock(mtx->core.spin_lock, save);
+}
diff --git a/ports/rp2/mutex_extra.h b/ports/rp2/mutex_extra.h
index 61b6b4035..5f4a2c364 100644
--- a/ports/rp2/mutex_extra.h
+++ b/ports/rp2/mutex_extra.h
@@ -31,4 +31,28 @@
uint32_t recursive_mutex_enter_blocking_and_disable_interrupts(recursive_mutex_t *mtx);
void recursive_mutex_exit_and_restore_interrupts(recursive_mutex_t *mtx, uint32_t save);
+// Alternative version of recursive_mutex_t that doesn't issue WFE and SEV
+// instructions. This means it will use more power (busy-waits), but exiting
+// this mutex doesn't disrupt the calling CPU's event state in the same way a
+// recursive mutex does (because recurse_mutex_exit() executes SEV each time the
+// mutex is released.)
+//
+// Implement as a wrapper type because no longer compatible with the normal
+// recursive_mutex functions.
+
+typedef struct {
+ recursive_mutex_t mutex;
+} recursive_mutex_nowait_t;
+
+inline static void recursive_mutex_nowait_init(recursive_mutex_nowait_t *mtx) {
+ recursive_mutex_init(&mtx->mutex);
+}
+
+inline static bool recursive_mutex_nowait_try_enter(recursive_mutex_nowait_t *mtx, uint32_t *owner_out) {
+ return recursive_mutex_try_enter(&mtx->mutex, owner_out);
+}
+
+void recursive_mutex_nowait_enter_blocking(recursive_mutex_nowait_t *mtx);
+void recursive_mutex_nowait_exit(recursive_mutex_nowait_t *mtx);
+
#endif // MICROPY_INCLUDED_RP2_MUTEX_EXTRA_H
diff --git a/ports/rp2/pendsv.c b/ports/rp2/pendsv.c
index 68bb65f04..6cfe624c3 100644
--- a/ports/rp2/pendsv.c
+++ b/ports/rp2/pendsv.c
@@ -25,8 +25,8 @@
*/
#include <assert.h>
-#include "pico/mutex.h"
#include "py/mpconfig.h"
+#include "mutex_extra.h"
#include "pendsv.h"
#include "RP2040.h"
@@ -35,21 +35,25 @@
#endif
static pendsv_dispatch_t pendsv_dispatch_table[PENDSV_DISPATCH_NUM_SLOTS];
-static recursive_mutex_t pendsv_mutex;
+
+// Using the nowait variant here as softtimer updates PendSV from the loop of mp_wfe_or_timeout(),
+// where we don't want the CPU event bit to be set.
+static recursive_mutex_nowait_t pendsv_mutex;
void pendsv_init(void) {
- recursive_mutex_init(&pendsv_mutex);
+ recursive_mutex_nowait_init(&pendsv_mutex);
}
void pendsv_suspend(void) {
// Recursive Mutex here as either core may call pendsv_suspend() and expect
// both mutual exclusion (other core can't enter pendsv_suspend() at the
// same time), and that no PendSV handler will run.
- recursive_mutex_enter_blocking(&pendsv_mutex);
+ recursive_mutex_nowait_enter_blocking(&pendsv_mutex);
}
void pendsv_resume(void) {
- recursive_mutex_exit(&pendsv_mutex);
+ recursive_mutex_nowait_exit(&pendsv_mutex);
+
// Run pendsv if needed. Find an entry with a dispatch and call pendsv dispatch
// with it. If pendsv runs it will service all slots.
int count = PENDSV_DISPATCH_NUM_SLOTS;
@@ -63,7 +67,7 @@ void pendsv_resume(void) {
void pendsv_schedule_dispatch(size_t slot, pendsv_dispatch_t f) {
pendsv_dispatch_table[slot] = f;
- if (pendsv_mutex.enter_count == 0) {
+ if (pendsv_mutex.mutex.enter_count == 0) {
// There is a race here where other core calls pendsv_suspend() before
// ISR can execute, but dispatch will happen later when other core
// calls pendsv_resume().
@@ -78,13 +82,13 @@ void pendsv_schedule_dispatch(size_t slot, pendsv_dispatch_t f) {
// PendSV interrupt handler to perform background processing.
void PendSV_Handler(void) {
- if (!recursive_mutex_try_enter(&pendsv_mutex, NULL)) {
+ if (!recursive_mutex_nowait_try_enter(&pendsv_mutex, NULL)) {
// Failure here means core 1 holds pendsv_mutex. ISR will
// run again after core 1 calls pendsv_resume().
return;
}
// Core 0 should not already have locked pendsv_mutex
- assert(pendsv_mutex.enter_count == 1);
+ assert(pendsv_mutex.mutex.enter_count == 1);
#if MICROPY_PY_NETWORK_CYW43
CYW43_STAT_INC(PENDSV_RUN_COUNT);
@@ -98,5 +102,5 @@ void PendSV_Handler(void) {
}
}
- recursive_mutex_exit(&pendsv_mutex);
+ recursive_mutex_nowait_exit(&pendsv_mutex);
}