diff options
| author | Ingo Molnar <mingo@elte.hu> | 2002-07-23 20:04:01 -0700 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2002-07-23 20:04:01 -0700 |
| commit | a6efb709806ceffb0331c22328267d7f327c6518 (patch) | |
| tree | d7b8229a573e88b8b7eefa4bcb10f057917f2b31 /kernel | |
| parent | 1da3174ffe3d0f29d21209a74e510723c9655031 (diff) | |
[PATCH] irqlock patch 2.5.27-H6
- init thread needs to have preempt_count of 1 until sched_init().
(William Lee Irwin III)
- clean up the irq-mask macros. (Linus)
- add barrier() to irq_enter() and irq_exit(). (based on Oleg Nesterov's
comment.)
- move the irqs-off check into preempt_schedule() and remove
CONFIG_DEBUG_IRQ_SCHEDULE.
- remove spin_unlock_no_resched() and comment the affected places more
agressively.
- slab.c needs to spin_unlock_no_resched(), instead of spin_unlock(). (It
also has to check for preemption in the right spot.) This should fix
the memory corruption.
- irq_exit() needs to run softirqs if interrupts not active - in the
previous patch it ran them when preempt_count() was 0, which is
incorrect.
- spinlock macros are updated to enable preemption after enabling
interrupts. Besides avoiding false positive warnings, this also
- fork.c has to call scheduler_tick() with preemption disabled -
otherwise scheduler_tick()'s spin_unlock can preempt!
- irqs_disabled() macro introduced.
- [ all other local_irq_enable() or sti instances conditional on
CONFIG_DEBUG_IRQ_SCHEDULE are to fix false positive warnings. ]
- fix buggy in_softirq(). Fortunately the bug made the test broader,
which didnt result in algorithmical breakage, just suboptimal
performance.
- move do_softirq() processing into irq_exit() => this also fixes the
softirq processing bugs present in apic.c IRQ handlers that did not
test for softirqs after irq_exit().
- simplify local_bh_enable().
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 12 | ||||
| -rw-r--r-- | kernel/sched.c | 22 |
2 files changed, 24 insertions, 10 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 99ac5e5799d3..8fbe794a5ece 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -740,10 +740,10 @@ struct task_struct *do_fork(unsigned long clone_flags, * total amount of pending timeslices in the system doesnt change, * resulting in more scheduling fairness. */ - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); p->time_slice = (current->time_slice + 1) >> 1; current->time_slice >>= 1; + p->sleep_timestamp = jiffies; if (!current->time_slice) { /* * This case is rare, it happens when the parent has only @@ -751,10 +751,12 @@ struct task_struct *do_fork(unsigned long clone_flags, * runqueue lock is not a problem. */ current->time_slice = 1; + preempt_disable(); scheduler_tick(0, 0); - } - p->sleep_timestamp = jiffies; - local_irq_restore(flags); + local_irq_restore(flags); + preempt_enable(); + } else + local_irq_restore(flags); /* * Ok, add it to the run-queues and make it diff --git a/kernel/sched.c b/kernel/sched.c index 1a6ce9d15d8a..ccceab9745db 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -728,7 +728,7 @@ void scheduler_tick(int user_tick, int system) if (p == rq->idle) { /* note: this timer irq context must be accounted for as well */ - if (preempt_count() >= 2*IRQ_OFFSET) + if (irq_count() >= 2*HARDIRQ_OFFSET) kstat.per_cpu_system[cpu] += system; #if CONFIG_SMP idle_tick(); @@ -902,6 +902,12 @@ asmlinkage void preempt_schedule(void) if (unlikely(ti->preempt_count)) return; + if (unlikely(irqs_disabled())) { + preempt_disable(); + printk("bad: schedule() with irqs disabled!\n"); + show_stack(NULL); + preempt_enable_no_resched(); + } need_resched: ti->preempt_count = PREEMPT_ACTIVE; @@ -1020,7 +1026,7 @@ void wait_for_completion(struct completion *x) wait_queue_t wait; \ init_waitqueue_entry(&wait, current); -#define SLEEP_ON_HEAD \ +#define SLEEP_ON_HEAD \ spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ spin_unlock(&q->lock); @@ -1467,7 +1473,12 @@ asmlinkage long sys_sched_yield(void) list_add_tail(¤t->run_list, array->queue + current->prio); __set_bit(current->prio, array->bitmap); } - spin_unlock_no_resched(&rq->lock); + /* + * Since we are going to call schedule() anyway, there's + * no need to preempt: + */ + _raw_spin_unlock(&rq->lock); + preempt_enable_no_resched(); schedule(); @@ -1680,8 +1691,7 @@ void __init init_idle(task_t *idle, int cpu) runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); unsigned long flags; - local_save_flags(flags); - local_irq_disable(); + local_irq_save(flags); double_rq_lock(idle_rq, rq); idle_rq->curr = idle_rq->idle = idle; @@ -1697,6 +1707,8 @@ void __init init_idle(task_t *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ #if CONFIG_PREEMPT idle->thread_info->preempt_count = (idle->lock_depth >= 0); +#else + idle->thread_info->preempt_count = 0; #endif } |
