summaryrefslogtreecommitdiff
path: root/py/vm.c
diff options
context:
space:
mode:
authorJim Mussared <jim.mussared@gmail.com>2022-07-01 11:22:24 +1000
committerDamien George <damien@micropython.org>2022-07-12 15:54:33 +1000
commit8db99f11a7da84e29c2e9b37e9f224ab424d73e1 (patch)
treea9393543c5aa3bdb0e2285ee0af2cef24a5e3709 /py/vm.c
parent1329155b969386623200feff776e87336bf8e2e7 (diff)
py/scheduler: De-inline and fix race with pending exception / scheduler.
The optimisation that allows a single check in the VM for either a pending exception or non-empty scheduler queue doesn't work when threading is enabled, as one thread can clear the sched_state if it has no pending exception, meaning the thread with the pending exception will never see it. This removes that optimisation for threaded builds. Also fixes a race in non-scheduler builds where get-and-clear of the pending exception is not protected by the atomic section. Also removes the bulk of the inlining of pending exceptions and scheduler handling from the VM. This just costs code size and complexity at no performance benefit. Signed-off-by: Jim Mussared <jim.mussared@gmail.com>
Diffstat (limited to 'py/vm.c')
-rw-r--r--py/vm.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/py/vm.c b/py/vm.c
index 5a624e91f..ce1e29318 100644
--- a/py/vm.c
+++ b/py/vm.c
@@ -1295,41 +1295,37 @@ yield:
#endif
pending_exception_check:
+ // We've just done a branch, use this as a convenient point to
+ // run periodic code/checks and/or bounce the GIL.. i.e.
+ // not _every_ instruction but on average a branch should
+ // occur every few instructions.
MICROPY_VM_HOOK_LOOP
+ // Check for pending exceptions or scheduled tasks to run.
+ // Note: it's safe to just call mp_handle_pending(true), but
+ // we can inline the check for the common case where there is
+ // neither.
+ if (
#if MICROPY_ENABLE_SCHEDULER
- // This is an inlined variant of mp_handle_pending
- if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
- mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
- // Re-check state is still pending now that we're in the atomic section.
- if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
- MARK_EXC_IP_SELECTIVE();
- mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
- if (obj != MP_OBJ_NULL) {
- MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
- if (!mp_sched_num_pending()) {
- MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
- }
- MICROPY_END_ATOMIC_SECTION(atomic_state);
- RAISE(obj);
- }
- mp_handle_pending_tail(atomic_state);
- } else {
- MICROPY_END_ATOMIC_SECTION(atomic_state);
- }
- }
+ #if MICROPY_PY_THREAD
+ // Scheduler + threading: Scheduler and pending exceptions are independent, check both.
+ MP_STATE_VM(sched_state) == MP_SCHED_PENDING || MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
#else
- // This is an inlined variant of mp_handle_pending
- if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
+ // Scheduler + non-threading: Optimisation: pending exception sets sched_state, only check sched_state.
+ MP_STATE_VM(sched_state) == MP_SCHED_PENDING
+ #endif
+ #else
+ // No scheduler: Just check pending exception.
+ MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
+ #endif
+ ) {
MARK_EXC_IP_SELECTIVE();
- mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
- MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
- RAISE(obj);
+ mp_handle_pending(true);
}
- #endif
#if MICROPY_PY_THREAD_GIL
#if MICROPY_PY_THREAD_GIL_VM_DIVISOR
+ // Don't bounce the GIL too frequently (default every 32 branches).
if (--gil_divisor == 0)
#endif
{