summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <greg@kroah.com>2003-07-03 03:52:16 -0700
committerGreg Kroah-Hartman <greg@kroah.com>2003-07-03 03:52:16 -0700
commit8394c855cb9420633d37906fb6e2bcfed13c310f (patch)
treeef7996402cea6e212dba748c317b00168b241a79 /kernel
parent7e2fa9927e8b1601ae947f87a50fdd5860a9599d (diff)
parentd23caa21ece9e3f2d0270047b0b7f0b6887e51fb (diff)
Merge kroah.com:/home/linux/BK/bleed-2.5
into kroah.com:/home/linux/BK/pci-2.5
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/ksyms.c3
-rw-r--r--kernel/softirq.c30
4 files changed, 29 insertions, 18 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 367854d246ef..2f090213e481 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -651,6 +651,8 @@ static void exit_notify(struct task_struct *tsk)
if (tsk->exit_signal != -1) {
int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
do_notify_parent(tsk, signal);
+ } else if (tsk->ptrace) {
+ do_notify_parent(tsk, SIGCHLD);
}
tsk->state = TASK_ZOMBIE;
@@ -715,7 +717,7 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
exit_notify(tsk);
- if (tsk->exit_signal == -1)
+ if (tsk->exit_signal == -1 && tsk->ptrace == 0)
release_task(tsk);
schedule();
@@ -859,7 +861,7 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
BUG_ON(state != TASK_DEAD);
return 0;
}
- if (unlikely(p->exit_signal == -1))
+ if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
/*
* This can only happen in a race with a ptraced thread
* dying on another processor.
@@ -889,8 +891,12 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
/* Double-check with lock held. */
if (p->real_parent != p->parent) {
__ptrace_unlink(p);
- do_notify_parent(p, p->exit_signal);
p->state = TASK_ZOMBIE;
+ /* If this is a detached thread, this is where it goes away. */
+ if (p->exit_signal == -1)
+ release_task (p);
+ else
+ do_notify_parent(p, p->exit_signal);
p = NULL;
}
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 2abbc9c2da23..c17e05614c88 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -286,7 +286,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
continue;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
- if (!vm_enough_memory(len))
+ if (security_vm_enough_memory(len))
goto fail_nomem;
charge += len;
}
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 8544287c9045..66ea4b6b4d84 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -462,6 +462,7 @@ EXPORT_SYMBOL(preempt_schedule);
#endif
EXPORT_SYMBOL(schedule_timeout);
EXPORT_SYMBOL(yield);
+EXPORT_SYMBOL(io_schedule);
EXPORT_SYMBOL(__cond_resched);
EXPORT_SYMBOL(set_user_nice);
EXPORT_SYMBOL(task_nice);
@@ -586,7 +587,7 @@ EXPORT_SYMBOL(tasklet_kill);
EXPORT_SYMBOL(do_softirq);
EXPORT_SYMBOL(raise_softirq);
EXPORT_SYMBOL(open_softirq);
-EXPORT_SYMBOL(cpu_raise_softirq);
+EXPORT_SYMBOL(raise_softirq_irqoff);
EXPORT_SYMBOL(__tasklet_schedule);
EXPORT_SYMBOL(__tasklet_hi_schedule);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 20bf233a14c3..96294a3d673f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/percpu.h>
#include <linux/cpu.h>
/*
@@ -41,15 +42,18 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
-static inline void wakeup_softirqd(unsigned cpu)
+static inline void wakeup_softirqd(void)
{
- struct task_struct * tsk = ksoftirqd_task(cpu);
+ /* Interrupts are disabled: no need to stop preemption */
+ struct task_struct *tsk = __get_cpu_var(ksoftirqd);
if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk);
@@ -96,7 +100,7 @@ restart:
goto restart;
}
if (pending)
- wakeup_softirqd(smp_processor_id());
+ wakeup_softirqd();
__local_bh_enable();
}
@@ -117,9 +121,9 @@ EXPORT_SYMBOL(local_bh_enable);
/*
* This function must run with irqs disabled!
*/
-inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
+inline void raise_softirq_irqoff(unsigned int nr)
{
- __cpu_raise_softirq(cpu, nr);
+ __raise_softirq_irqoff(nr);
/*
* If we're in an interrupt or softirq, we're done
@@ -131,7 +135,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
* schedule the softirq soon.
*/
if (!in_interrupt())
- wakeup_softirqd(cpu);
+ wakeup_softirqd();
}
void raise_softirq(unsigned int nr)
@@ -139,7 +143,7 @@ void raise_softirq(unsigned int nr)
unsigned long flags;
local_irq_save(flags);
- cpu_raise_softirq(smp_processor_id(), nr);
+ raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
@@ -168,7 +172,7 @@ void __tasklet_schedule(struct tasklet_struct *t)
local_irq_save(flags);
t->next = __get_cpu_var(tasklet_vec).list;
__get_cpu_var(tasklet_vec).list = t;
- cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
+ raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -179,7 +183,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
local_irq_save(flags);
t->next = __get_cpu_var(tasklet_hi_vec).list;
__get_cpu_var(tasklet_hi_vec).list = t;
- cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
+ raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -211,7 +215,7 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable();
t->next = __get_cpu_var(tasklet_vec).list;
__get_cpu_var(tasklet_vec).list = t;
- __cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
+ __raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_enable();
}
}
@@ -244,7 +248,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
t->next = __get_cpu_var(tasklet_hi_vec).list;
__get_cpu_var(tasklet_hi_vec).list = t;
- __cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
+ __raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
}
@@ -325,7 +329,7 @@ static int ksoftirqd(void * __bind_cpu)
__set_current_state(TASK_INTERRUPTIBLE);
mb();
- local_ksoftirqd_task() = current;
+ __get_cpu_var(ksoftirqd) = current;
for (;;) {
if (!local_softirq_pending())
@@ -354,7 +358,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
return NOTIFY_BAD;
}
- while (!ksoftirqd_task(hotcpu))
+ while (!per_cpu(ksoftirqd, hotcpu))
yield();
}
return NOTIFY_OK;