summaryrefslogtreecommitdiff
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/mpconfig.h5
-rw-r--r--py/mpstate.h10
-rw-r--r--py/runtime.c10
-rw-r--r--py/runtime.h10
-rw-r--r--py/scheduler.c52
5 files changed, 85 insertions, 2 deletions
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 754daa7bd..22dfbb867 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -893,6 +893,11 @@ typedef double mp_float_t;
#define MICROPY_ENABLE_SCHEDULER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
#endif
+// Whether the scheduler supports scheduling static nodes with C callbacks
+#ifndef MICROPY_SCHEDULER_STATIC_NODES
+#define MICROPY_SCHEDULER_STATIC_NODES (0)
+#endif
+
// Maximum number of entries in the scheduler
#ifndef MICROPY_SCHEDULER_DEPTH
#define MICROPY_SCHEDULER_DEPTH (4)
diff --git a/py/mpstate.h b/py/mpstate.h
index a493b780a..ab6090e1a 100644
--- a/py/mpstate.h
+++ b/py/mpstate.h
@@ -241,6 +241,16 @@ typedef struct _mp_state_vm_t {
#if MICROPY_ENABLE_SCHEDULER
volatile int16_t sched_state;
+
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ // These will usually point to statically allocated memory. They are not
+ // traced by the GC. They are assumed to be zero'd out before mp_init() is
+ // called (usually because this struct lives in the BSS).
+ struct _mp_sched_node_t *sched_head;
+ struct _mp_sched_node_t *sched_tail;
+ #endif
+
+ // These index sched_queue.
uint8_t sched_len;
uint8_t sched_idx;
#endif
diff --git a/py/runtime.c b/py/runtime.c
index 2a07df664..02b866d83 100644
--- a/py/runtime.c
+++ b/py/runtime.c
@@ -65,7 +65,15 @@ void mp_init(void) {
// no pending exceptions to start with
MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
#if MICROPY_ENABLE_SCHEDULER
- MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ if (MP_STATE_VM(sched_head) == NULL) {
+ // no pending callbacks to start with
+ MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ } else {
+ // pending callbacks are on the list, eg from before a soft reset
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ }
+ #endif
MP_STATE_VM(sched_idx) = 0;
MP_STATE_VM(sched_len) = 0;
#endif
diff --git a/py/runtime.h b/py/runtime.h
index f0d41f38d..4393fbfa8 100644
--- a/py/runtime.h
+++ b/py/runtime.h
@@ -57,6 +57,15 @@ typedef struct _mp_arg_t {
mp_arg_val_t defval;
} mp_arg_t;
+struct _mp_sched_node_t;
+
+typedef void (*mp_sched_callback_t)(struct _mp_sched_node_t *);
+
+typedef struct _mp_sched_node_t {
+ mp_sched_callback_t callback;
+ struct _mp_sched_node_t *next;
+} mp_sched_node_t;
+
// Tables mapping operator enums to qstrs, defined in objtype.c
extern const byte mp_unary_op_method_name[];
extern const byte mp_binary_op_method_name[];
@@ -74,6 +83,7 @@ void mp_sched_lock(void);
void mp_sched_unlock(void);
#define mp_sched_num_pending() (MP_STATE_VM(sched_len))
bool mp_sched_schedule(mp_obj_t function, mp_obj_t arg);
+bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback);
#endif
// extra printing method specifically for mp_obj_t's which are integral type
diff --git a/py/scheduler.c b/py/scheduler.c
index bd0bbf207..3966da297 100644
--- a/py/scheduler.c
+++ b/py/scheduler.c
@@ -90,6 +90,24 @@ void mp_handle_pending(bool raise_exc) {
// or by the VM's inlined version of that function.
void mp_handle_pending_tail(mp_uint_t atomic_state) {
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
+
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ // Run all pending C callbacks.
+ while (MP_STATE_VM(sched_head) != NULL) {
+ mp_sched_node_t *node = MP_STATE_VM(sched_head);
+ MP_STATE_VM(sched_head) = node->next;
+ if (MP_STATE_VM(sched_head) == NULL) {
+ MP_STATE_VM(sched_tail) = NULL;
+ }
+ mp_sched_callback_t callback = node->callback;
+ node->callback = NULL;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ callback(node);
+ atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ }
+ #endif
+
+ // Run at most one pending Python callback.
if (!mp_sched_empty()) {
mp_sched_item_t item = MP_STATE_VM(sched_queue)[MP_STATE_VM(sched_idx)];
MP_STATE_VM(sched_idx) = IDX_MASK(MP_STATE_VM(sched_idx) + 1);
@@ -99,6 +117,7 @@ void mp_handle_pending_tail(mp_uint_t atomic_state) {
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state);
}
+
mp_sched_unlock();
}
@@ -117,7 +136,11 @@ void mp_sched_unlock(void) {
assert(MP_STATE_VM(sched_state) < 0);
if (++MP_STATE_VM(sched_state) == 0) {
// vm became unlocked
- if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL || mp_sched_num_pending()) {
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ || MP_STATE_VM(sched_head) != NULL
+ #endif
+ || mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
} else {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
@@ -146,6 +169,33 @@ bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj
return ret;
}
+#if MICROPY_SCHEDULER_STATIC_NODES
+bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ bool ret;
+ if (node->callback == NULL) {
+ if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ }
+ node->callback = callback;
+ node->next = NULL;
+ if (MP_STATE_VM(sched_tail) == NULL) {
+ MP_STATE_VM(sched_head) = node;
+ } else {
+ MP_STATE_VM(sched_tail)->next = node;
+ }
+ MP_STATE_VM(sched_tail) = node;
+ MICROPY_SCHED_HOOK_SCHEDULED;
+ ret = true;
+ } else {
+ // already scheduled
+ ret = false;
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ return ret;
+}
+#endif
+
#else // MICROPY_ENABLE_SCHEDULER
// A variant of this is inlined in the VM at the pending exception check