summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2002-07-26 01:28:07 -0700
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-07-26 01:28:07 -0700
commitc5e062079a7090891ea5cd1b23a7eab52b156b2a (patch)
treeb3ed0870f22e6375a39dada4d8d37ea2a94fcfa9 /kernel/sched.c
parente1eec525be1b708894650a4573d6d7f61e96c4fa (diff)
[PATCH] Hot-plug CPU Boot Changes
This patch alters the boot sequence to "plug in" each CPU, one at a time. You need the patch for each architecture, as well. The interface used to be "smp_boot_cpus()", "smp_commence()", and each arch implemented the "maxcpus" boot arg itself. With this patch, it is: smp_prepare_cpus(maxcpus): probe for cpus and set up cpu_possible(cpu). __cpu_up(cpu): called *after* initcalls, for each cpu where cpu_possible(cpu) is true. smp_cpus_done(maxcpus): called after every cpu has been brought up
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c58
1 files changed, 28 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a54ee5a0356c..67ac32a24c1f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -27,6 +27,8 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/security.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
@@ -1777,9 +1779,11 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
migration_req_t req;
runqueue_t *rq;
+#if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
new_mask &= cpu_online_map;
if (!new_mask)
BUG();
+#endif
preempt_disable();
rq = task_rq_lock(p, &flags);
@@ -1812,8 +1816,6 @@ out:
preempt_enable();
}
-static __initdata int master_migration_thread;
-
static int migration_thread(void * bind_cpu)
{
int cpu = (int) (long) bind_cpu;
@@ -1825,15 +1827,7 @@ static int migration_thread(void * bind_cpu)
sigfillset(&current->blocked);
set_fs(KERNEL_DS);
- /*
- * The first migration thread is started on the boot CPU, it
- * migrates the other migration threads to their destination CPUs.
- */
- if (cpu != master_migration_thread) {
- while (!cpu_rq(master_migration_thread)->migration_thread)
- yield();
- set_cpus_allowed(current, 1UL << cpu);
- }
+ set_cpus_allowed(current, 1UL << cpu);
printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
ret = setscheduler(0, SCHED_FIFO, &param);
@@ -1890,29 +1884,33 @@ repeat:
}
}
-void __init migration_init(void)
+static int migration_call(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
- int cpu;
-
- master_migration_thread = smp_processor_id();
- current->cpus_allowed = 1UL << master_migration_thread;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
- if (kernel_thread(migration_thread, (void *) (long) cpu,
- CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
- BUG();
+ switch (action) {
+ case CPU_ONLINE:
+ printk("Starting migration thread for cpu %li\n",
+ (long)hcpu);
+ kernel_thread(migration_thread, hcpu,
+ CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
+ break;
}
- current->cpus_allowed = -1L;
+ return NOTIFY_OK;
+}
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (!cpu_online(cpu))
- continue;
- while (!cpu_rq(cpu)->migration_thread)
- schedule_timeout(2);
- }
+static struct notifier_block migration_notifier = { &migration_call, NULL, 0 };
+
+int __init migration_init(void)
+{
+ /* Start one for boot CPU. */
+ migration_call(&migration_notifier, CPU_ONLINE,
+ (void *)smp_processor_id());
+ register_cpu_notifier(&migration_notifier);
+ return 0;
}
+
+__initcall(migration_init);
#endif
extern void init_timervecs(void);