summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-10-10 20:05:46 -0700
committerAnton Blanchard <anton@samba.org>2002-10-10 20:05:46 -0700
commit31989cba5d3acc6dfad2becd96eda6f1fb37383e (patch)
treee3a1122f219bc9003f6c47012919fdc9f7869323 /kernel
parentc0c7e4767e1041e8546efa420af5ab444aa8f632 (diff)
[PATCH] sched-2.5.41-A0
This fixes the scheduler's migration code to not disable preemption. It also fixes the bug that was hidden by the broken disable-preempt change: the migration thread did not kick idle CPUs if a task is migrated to them, which causes a hung boot when ksoftirqds are started. It was pure luck it worked until now, it was broken pretty much from day 1 on.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 62e81a6f285d..0464ac0649b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1953,7 +1953,6 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
BUG();
#endif
- preempt_disable();
rq = task_rq_lock(p, &flags);
p->cpus_allowed = new_mask;
/*
@@ -1962,7 +1961,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
*/
if (new_mask & (1UL << task_cpu(p))) {
task_rq_unlock(rq, &flags);
- goto out;
+ return;
}
/*
* If the task is not on a runqueue (and not running), then
@@ -1971,17 +1970,16 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, __ffs(p->cpus_allowed));
task_rq_unlock(rq, &flags);
- goto out;
+ return;
}
init_completion(&req.done);
req.task = p;
list_add(&req.list, &rq->migration_queue);
task_rq_unlock(rq, &flags);
+
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
-out:
- preempt_enable();
}
/*
@@ -1999,16 +1997,12 @@ static int migration_thread(void * data)
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
- set_cpus_allowed(current, 1UL << cpu);
-
/*
- * Migration can happen without a migration thread on the
- * target CPU because here we remove the thread from the
- * runqueue and the helper thread then moves this thread
- * to the target CPU - we'll wake up there.
+ * Either we are running on the right CPU, or there's a
+ * a migration thread on the target CPU, guaranteed.
*/
- if (smp_processor_id() != cpu)
- printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
+ set_cpus_allowed(current, 1UL << cpu);
+
ret = setscheduler(0, SCHED_FIFO, &param);
rq = this_rq();
@@ -2055,6 +2049,8 @@ repeat:
if (p->array) {
deactivate_task(p, rq_src);
activate_task(p, rq_dest);
+ if (p->prio < rq_dest->curr->prio)
+ resched_task(rq_dest->curr);
}
}
double_rq_unlock(rq_src, rq_dest);