summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Love <rml@tech9.net>2002-06-13 20:51:48 -0700
committerDave Kleikamp <shaggy@kleikamp.austin.ibm.com>2002-06-13 20:51:48 -0700
commit4d0b85ea4610b51beeb97cd0cc43edb1be42d063 (patch)
tree94ee171a0673171fd474555a2d1b9a4529b72a41
parentd257b778c1c21ebb99c400e0ffa9f3af215da18a (diff)
[PATCH] kernel preemption bits (2/2)
In both preempt_schedule in sched.c and resume_kernel in entry.S, it is possible to return with need_resched set and thus a pending preemption but not service that preemption for some time. Consider: - return from schedule() to preempt_schedule - interrupt occurs, sets need_resched - we cannot preempt since preempt_count = PREEMPT_ACTIVE - back in preempt_schedule, set preempt_count = 0 Now we again can preempt, but we will not. Instead we return and continue executing. On the next interrupt, we will redo the whole fiasco which is a waste since we could of reentered schedule while we were there. Worse, if we acquire a lock before the next interrupt we can potentially delay the pending reschedule a very long time. This is not acceptable. The solution is to check for and loop on need_resched on resume_kernel and preempt_schedule like schedule itself does.
-rw-r--r--arch/i386/kernel/entry.S4
-rw-r--r--kernel/sched.c6
2 files changed, 9 insertions, 1 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 6a42ae06ccbe..09d303454378 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -211,6 +211,7 @@ ENTRY(resume_userspace)
ENTRY(resume_kernel)
cmpl $0,TI_PRE_COUNT(%ebx) # non-zero preempt_count ?
jnz restore_all
+need_resched:
movl TI_FLAGS(%ebx), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
@@ -220,7 +221,8 @@ ENTRY(resume_kernel)
sti
call schedule
movl $0,TI_PRE_COUNT(%ebx)
- jmp restore_all
+ cli
+ jmp need_resched
#endif
# system call handler stub
diff --git a/kernel/sched.c b/kernel/sched.c
index 1254ad6df71f..7dc3cd10f3b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -893,9 +893,15 @@ asmlinkage void preempt_schedule(void)
if (unlikely(ti->preempt_count))
return;
+need_resched:
ti->preempt_count = PREEMPT_ACTIVE;
schedule();
ti->preempt_count = 0;
+
+ /* we can miss a preemption opportunity between schedule and now */
+ barrier();
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+ goto need_resched;
}
#endif /* CONFIG_PREEMPT */