summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-30 00:25:56 -0800
committerLinus Torvalds <torvalds@penguin.transmeta.com>2002-10-30 00:25:56 -0800
commitdc85a09d313235fd1dab3adeeb2f79142475b49e (patch)
tree41750fef3d2517ae01a889b5110dae1fd626c5b4 /kernel
parent4c664ca51867c1d26d4a294db435584faad200e4 (diff)
parenta0e7d495df35797364092fedff52ec488ec702eb (diff)
Merge master.kernel.org:/home/davem/BK/net-2.5
into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/ksyms.c2
-rw-r--r--kernel/rcupdate.c43
-rw-r--r--kernel/softirq.c37
-rw-r--r--kernel/timer.c93
6 files changed, 156 insertions, 41 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a155998dbe3e..4c0ada2b99ae 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -29,26 +29,38 @@ void unregister_cpu_notifier(struct notifier_block *nb)
int __devinit cpu_up(unsigned int cpu)
{
int ret;
+ void *hcpu = (void *)(long)cpu;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
if (cpu_online(cpu)) {
ret = -EINVAL;
goto out;
}
+ ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
+ if (ret == NOTIFY_BAD) {
+ printk("%s: attempt to bring up CPU %u failed\n",
+ __FUNCTION__, cpu);
+ ret = -EINVAL;
+ goto out_notify;
+ }
/* Arch-specific enabling code. */
ret = __cpu_up(cpu);
- if (ret != 0) goto out;
+ if (ret != 0)
+ goto out_notify;
if (!cpu_online(cpu))
BUG();
/* Now call notifier in preparation. */
printk("CPU %u IS NOW UP!\n", cpu);
- notifier_call_chain(&cpu_chain, CPU_ONLINE, (void *)(long)cpu);
+ notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
- out:
+out_notify:
+ if (ret != 0)
+ notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
+out:
up(&cpucontrol);
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 2f5f00301182..4a33d682dfaa 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -769,8 +769,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->start_time = jiffies;
p->security = NULL;
- INIT_LIST_HEAD(&p->local_pages);
-
retval = -ENOMEM;
if (security_ops->task_alloc_security(p))
goto bad_fork_cleanup;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 7ecffcd552d1..6628b24ba827 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -253,7 +253,9 @@ EXPORT_SYMBOL(find_inode_number);
EXPORT_SYMBOL(is_subdir);
EXPORT_SYMBOL(get_unused_fd);
EXPORT_SYMBOL(vfs_read);
+EXPORT_SYMBOL(vfs_readv);
EXPORT_SYMBOL(vfs_write);
+EXPORT_SYMBOL(vfs_writev);
EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_mkdir);
EXPORT_SYMBOL(vfs_mknod);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 1a149dff7832..91483119714c 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -41,13 +41,14 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
#include <linux/rcupdate.h>
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk =
{ .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
.maxbatch = 1, .rcu_cpu_mask = 0 };
-struct rcu_data rcu_data[NR_CPUS] __cacheline_aligned;
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
@@ -198,6 +199,33 @@ void rcu_check_callbacks(int cpu, int user)
tasklet_schedule(&RCU_tasklet(cpu));
}
+static void __devinit rcu_online_cpu(int cpu)
+{
+ memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
+ tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
+ INIT_LIST_HEAD(&RCU_nxtlist(cpu));
+ INIT_LIST_HEAD(&RCU_curlist(cpu));
+}
+
+static int __devinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ rcu_online_cpu(cpu);
+ break;
+ /* Space reserved for CPU_OFFLINE :) */
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
/*
* Initializes rcu mechanism. Assumed to be called early.
* That is before local timer(SMP) or jiffie timer (uniproc) is setup.
@@ -206,16 +234,13 @@ void rcu_check_callbacks(int cpu, int user)
*/
void __init rcu_init(void)
{
- int i;
-
- memset(&rcu_data[0], 0, sizeof(rcu_data));
- for (i = 0; i < NR_CPUS; i++) {
- tasklet_init(&RCU_tasklet(i), rcu_process_callbacks, 0UL);
- INIT_LIST_HEAD(&RCU_nxtlist(i));
- INIT_LIST_HEAD(&RCU_curlist(i));
- }
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
}
+
/* Because of FASTCALL declaration of complete, we use this wrapper */
static void wakeme_after_rcu(void *completion)
{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 45e8712f9f70..59fb7acb0ee1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -12,6 +12,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/notifier.h>
/*
- No shared variables, all the data are CPU local.
@@ -260,10 +261,39 @@ void tasklet_kill(struct tasklet_struct *t)
clear_bit(TASKLET_STATE_SCHED, &t->state);
}
+
+static void tasklet_init_cpu(int cpu)
+{
+ per_cpu(tasklet_vec, cpu).list = NULL;
+ per_cpu(tasklet_hi_vec, cpu).list = NULL;
+}
+
+static int tasklet_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ switch(action) {
+ case CPU_UP_PREPARE:
+ tasklet_init_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block tasklet_nb = {
+ .notifier_call = tasklet_cpu_notify,
+ .next = NULL,
+};
+
void __init softirq_init()
{
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
+ tasklet_cpu_notify(&tasklet_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&tasklet_nb);
}
static int ksoftirqd(void * __bind_cpu)
@@ -316,12 +346,13 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
while (!ksoftirqd_task(hotcpu))
yield();
- return NOTIFY_OK;
}
- return NOTIFY_BAD;
+ return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = { &cpu_callback, NULL, 0 };
+static struct notifier_block __devinitdata cpu_nfb = {
+ .notifier_call = cpu_callback
+};
__init int spawn_ksoftirqd(void)
{
diff --git a/kernel/timer.c b/kernel/timer.c
index 2d30f7fd0ecb..ebb2b6c627cc 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -24,6 +24,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
@@ -62,7 +63,8 @@ struct tvec_t_base_s {
typedef struct tvec_t_base_s tvec_base_t;
-static tvec_base_t tvec_bases[NR_CPUS] __cacheline_aligned;
+/* Fake initialization */
+static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED };
/* Fake initialization needed to avoid compiler breakage */
static DEFINE_PER_CPU(struct tasklet_struct, timer_tasklet) = { NULL };
@@ -122,7 +124,7 @@ static inline void internal_add_timer(tvec_base_t *base, timer_t *timer)
void add_timer(timer_t *timer)
{
int cpu = get_cpu();
- tvec_base_t *base = tvec_bases + cpu;
+ tvec_base_t *base = &per_cpu(tvec_bases, cpu);
unsigned long flags;
BUG_ON(timer_pending(timer) || !timer->function);
@@ -135,6 +137,26 @@ void add_timer(timer_t *timer)
}
/***
+ * add_timer_on - start a timer on a particular CPU
+ * @timer: the timer to be added
+ * @cpu: the CPU to start it on
+ *
+ * This is not very scalable on SMP.
+ */
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+ tvec_base_t *base = &per_cpu(tvec_bases, cpu);
+ unsigned long flags;
+
+ BUG_ON(timer_pending(timer) || !timer->function);
+
+ spin_lock_irqsave(&base->lock, flags);
+ internal_add_timer(base, timer);
+ timer->base = base;
+ spin_unlock_irqrestore(&base->lock, flags);
+}
+
+/***
* mod_timer - modify a timer's timeout
* @timer: the timer to be modified
*
@@ -169,7 +191,7 @@ int mod_timer(timer_t *timer, unsigned long expires)
return 1;
local_irq_save(flags);
- new_base = tvec_bases + smp_processor_id();
+ new_base = &per_cpu(tvec_bases, smp_processor_id());
repeat:
old_base = timer->base;
@@ -265,15 +287,17 @@ repeat:
*/
int del_timer_sync(timer_t *timer)
{
- tvec_base_t *base = tvec_bases;
+ tvec_base_t *base;
int i, ret = 0;
del_again:
ret += del_timer(timer);
- for (i = 0; i < NR_CPUS; i++, base++) {
+ for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
+
+ base = &per_cpu(tvec_bases, i);
if (base->running_timer == timer) {
while (base->running_timer == timer) {
cpu_relax();
@@ -711,7 +735,7 @@ unsigned long last_time_offset;
*/
static void run_timer_tasklet(unsigned long data)
{
- tvec_base_t *base = tvec_bases + smp_processor_id();
+ tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id());
if ((long)(jiffies - base->timer_jiffies) >= 0)
__run_timers(base);
@@ -1066,23 +1090,46 @@ out:
return 0;
}
-void __init init_timers(void)
+static void __devinit init_timers_cpu(int cpu)
{
- int i, j;
-
- for (i = 0; i < NR_CPUS; i++) {
- tvec_base_t *base;
-
- base = tvec_bases + i;
- spin_lock_init(&base->lock);
- for (j = 0; j < TVN_SIZE; j++) {
- INIT_LIST_HEAD(base->tv5.vec + j);
- INIT_LIST_HEAD(base->tv4.vec + j);
- INIT_LIST_HEAD(base->tv3.vec + j);
- INIT_LIST_HEAD(base->tv2.vec + j);
- }
- for (j = 0; j < TVR_SIZE; j++)
- INIT_LIST_HEAD(base->tv1.vec + j);
- tasklet_init(&per_cpu(timer_tasklet, i), run_timer_tasklet, 0);
+ int j;
+ tvec_base_t *base;
+
+ base = &per_cpu(tvec_bases, cpu);
+ spin_lock_init(&base->lock);
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
+ INIT_LIST_HEAD(base->tv4.vec + j);
+ INIT_LIST_HEAD(base->tv3.vec + j);
+ INIT_LIST_HEAD(base->tv2.vec + j);
}
+ for (j = 0; j < TVR_SIZE; j++)
+ INIT_LIST_HEAD(base->tv1.vec + j);
+ tasklet_init(&per_cpu(timer_tasklet, cpu), run_timer_tasklet, 0UL);
+}
+
+static int __devinit timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ switch(action) {
+ case CPU_UP_PREPARE:
+ init_timers_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata timers_nb = {
+ .notifier_call = timer_cpu_notify,
+};
+
+
+void __init init_timers(void)
+{
+ timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&timers_nb);
}