summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2004-08-26 20:30:32 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-26 20:30:32 -0700
commita5212682cc2c0a2e3fb1ab50160861d5cca04fc2 (patch)
treeea9ae134f24a7ca1ec9c9db8c6814cd02892f429 /kernel
parente50873c0fe87b08a467e16f6c24371d7e4a81b8c (diff)
[PATCH] Fix CPU Hotplug: neaten migrate_all_tasks.
A followup patch wants to do forced migration, so separate that part of the code out of migrate_all_tasks(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c68
1 files changed, 35 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0b415e7b429d..732344a7be1b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3957,50 +3957,52 @@ wait_to_die:
}
#ifdef CONFIG_HOTPLUG_CPU
+/* Figure out where task on dead CPU should go, use force if neccessary. */
+static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
+{
+ int dest_cpu;
+ cpumask_t mask;
+
+ /* On same node? */
+ mask = node_to_cpumask(cpu_to_node(dead_cpu));
+ cpus_and(mask, mask, tsk->cpus_allowed);
+ dest_cpu = any_online_cpu(mask);
+
+ /* On any allowed CPU? */
+ if (dest_cpu == NR_CPUS)
+ dest_cpu = any_online_cpu(tsk->cpus_allowed);
+
+ /* No more Mr. Nice Guy. */
+ if (dest_cpu == NR_CPUS) {
+ cpus_setall(tsk->cpus_allowed);
+ dest_cpu = any_online_cpu(tsk->cpus_allowed);
+
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (tsk->mm && printk_ratelimit())
+ printk(KERN_INFO "process %d (%s) no "
+ "longer affine to cpu%d\n",
+ tsk->pid, tsk->comm, dead_cpu);
+ }
+ __migrate_task(tsk, dead_cpu, dest_cpu);
+}
+
/* migrate_all_tasks - function to migrate all tasks from the dead cpu. */
static void migrate_all_tasks(int src_cpu)
{
struct task_struct *tsk, *t;
- int dest_cpu;
- unsigned int node;
write_lock_irq(&tasklist_lock);
- /* watch out for per node tasks, let's stay on this node */
- node = cpu_to_node(src_cpu);
-
do_each_thread(t, tsk) {
- cpumask_t mask;
if (tsk == current)
continue;
- if (task_cpu(tsk) != src_cpu)
- continue;
-
- /* Figure out where this task should go (attempting to
- * keep it on-node), and check if it can be migrated
- * as-is. NOTE that kernel threads bound to more than
- * one online cpu will be migrated. */
- mask = node_to_cpumask(node);
- cpus_and(mask, mask, tsk->cpus_allowed);
- dest_cpu = any_online_cpu(mask);
- if (dest_cpu == NR_CPUS)
- dest_cpu = any_online_cpu(tsk->cpus_allowed);
- if (dest_cpu == NR_CPUS) {
- cpus_setall(tsk->cpus_allowed);
- dest_cpu = any_online_cpu(tsk->cpus_allowed);
-
- /*
- * Don't tell them about moving exiting tasks
- * or kernel threads (both mm NULL), since
- * they never leave kernel.
- */
- if (tsk->mm && printk_ratelimit())
- printk(KERN_INFO "process %d (%s) no "
- "longer affine to cpu%d\n",
- tsk->pid, tsk->comm, src_cpu);
- }
- __migrate_task(tsk, src_cpu, dest_cpu);
+ if (task_cpu(tsk) == src_cpu)
+ move_task_off_dead_cpu(src_cpu, tsk);
} while_each_thread(t, tsk);
write_unlock_irq(&tasklist_lock);