diff options
| -rw-r--r-- | arch/i386/kernel/entry.S | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 6 |
2 files changed, 9 insertions, 1 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 6a42ae06ccbe..09d303454378 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -211,6 +211,7 @@ ENTRY(resume_userspace) ENTRY(resume_kernel) cmpl $0,TI_PRE_COUNT(%ebx) # non-zero preempt_count ? jnz restore_all +need_resched: movl TI_FLAGS(%ebx), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl jz restore_all @@ -220,7 +221,8 @@ ENTRY(resume_kernel) sti call schedule movl $0,TI_PRE_COUNT(%ebx) - jmp restore_all + cli + jmp need_resched #endif # system call handler stub diff --git a/kernel/sched.c b/kernel/sched.c index 1254ad6df71f..7dc3cd10f3b8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -893,9 +893,15 @@ asmlinkage void preempt_schedule(void) if (unlikely(ti->preempt_count)) return; +need_resched: ti->preempt_count = PREEMPT_ACTIVE; schedule(); ti->preempt_count = 0; + + /* we can miss a preemption opportunity between schedule and now */ + barrier(); + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) + goto need_resched; } #endif /* CONFIG_PREEMPT */ |
