summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c26
-rw-r--r--kernel/exit.c17
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/ksyms.c3
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/signal.c61
-rw-r--r--kernel/softirq.c30
-rw-r--r--kernel/suspend.c8
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/timer.c16
-rw-r--r--kernel/user.c1
11 files changed, 134 insertions, 60 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index e63095525ac2..0009dfb25dcb 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -394,17 +394,25 @@ static void do_acct_process(long exitcode, struct file *file)
/*
* acct_process - now just a wrapper around do_acct_process
*/
-int acct_process(long exitcode)
+void acct_process(long exitcode)
{
struct file *file = NULL;
+
+ /*
+ * accelerate the common fastpath:
+ */
+ if (!acct_globals.file)
+ return;
+
spin_lock(&acct_globals.lock);
- if (acct_globals.file) {
- file = acct_globals.file;
- get_file(file);
+ file = acct_globals.file;
+ if (unlikely(!file)) {
spin_unlock(&acct_globals.lock);
- do_acct_process(exitcode, file);
- fput(file);
- } else
- spin_unlock(&acct_globals.lock);
- return 0;
+ return;
+ }
+ get_file(file);
+ spin_unlock(&acct_globals.lock);
+
+ do_acct_process(exitcode, file);
+ fput(file);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 367854d246ef..7792bb1268ff 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -230,6 +230,7 @@ void reparent_to_init(void)
/* signals? */
security_task_reparent_to_init(current);
memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
+ atomic_inc(&(INIT_USER->__count));
switch_uid(INIT_USER);
write_unlock_irq(&tasklist_lock);
@@ -442,7 +443,7 @@ static inline void __exit_mm(struct task_struct * tsk)
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
- enter_lazy_tlb(mm, current, smp_processor_id());
+ enter_lazy_tlb(mm, current);
task_unlock(tsk);
mmput(mm);
}
@@ -651,6 +652,8 @@ static void exit_notify(struct task_struct *tsk)
if (tsk->exit_signal != -1) {
int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
do_notify_parent(tsk, signal);
+ } else if (tsk->ptrace) {
+ do_notify_parent(tsk, SIGCHLD);
}
tsk->state = TASK_ZOMBIE;
@@ -680,6 +683,8 @@ NORET_TYPE void do_exit(long code)
panic("Attempted to kill the idle task!");
if (unlikely(tsk->pid == 1))
panic("Attempted to kill init!");
+ if (tsk->io_context)
+ exit_io_context();
tsk->flags |= PF_EXITING;
del_timer_sync(&tsk->real_timer);
@@ -715,7 +720,7 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
exit_notify(tsk);
- if (tsk->exit_signal == -1)
+ if (tsk->exit_signal == -1 && tsk->ptrace == 0)
release_task(tsk);
schedule();
@@ -859,7 +864,7 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
BUG_ON(state != TASK_DEAD);
return 0;
}
- if (unlikely(p->exit_signal == -1))
+ if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
/*
* This can only happen in a race with a ptraced thread
* dying on another processor.
@@ -889,8 +894,12 @@ static int wait_task_zombie(task_t *p, unsigned int *stat_addr, struct rusage *r
/* Double-check with lock held. */
if (p->real_parent != p->parent) {
__ptrace_unlink(p);
- do_notify_parent(p, p->exit_signal);
p->state = TASK_ZOMBIE;
+ /* If this is a detached thread, this is where it goes away. */
+ if (p->exit_signal == -1)
+ release_task (p);
+ else
+ do_notify_parent(p, p->exit_signal);
p = NULL;
}
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 2abbc9c2da23..96ce3385cc75 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -286,7 +286,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
continue;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
- if (!vm_enough_memory(len))
+ if (security_vm_enough_memory(len))
goto fail_nomem;
charge += len;
}
@@ -864,6 +864,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->lock_depth = -1; /* -1 = no lock */
p->start_time = get_jiffies_64();
p->security = NULL;
+ p->io_context = NULL;
retval = -ENOMEM;
if ((retval = security_task_alloc(p)))
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 8544287c9045..66ea4b6b4d84 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -462,6 +462,7 @@ EXPORT_SYMBOL(preempt_schedule);
#endif
EXPORT_SYMBOL(schedule_timeout);
EXPORT_SYMBOL(yield);
+EXPORT_SYMBOL(io_schedule);
EXPORT_SYMBOL(__cond_resched);
EXPORT_SYMBOL(set_user_nice);
EXPORT_SYMBOL(task_nice);
@@ -586,7 +587,7 @@ EXPORT_SYMBOL(tasklet_kill);
EXPORT_SYMBOL(do_softirq);
EXPORT_SYMBOL(raise_softirq);
EXPORT_SYMBOL(open_softirq);
-EXPORT_SYMBOL(cpu_raise_softirq);
+EXPORT_SYMBOL(raise_softirq_irqoff);
EXPORT_SYMBOL(__tasklet_schedule);
EXPORT_SYMBOL(__tasklet_hi_schedule);
diff --git a/kernel/sched.c b/kernel/sched.c
index bb552059577d..556c5cdbb9c2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -508,8 +508,8 @@ repeat_lock_task:
}
#ifdef CONFIG_SMP
else
- if (unlikely(kick) && task_running(rq, p) && (p->thread_info->cpu != smp_processor_id()))
- smp_send_reschedule(p->thread_info->cpu);
+ if (unlikely(kick) && task_running(rq, p) && (task_cpu(p) != smp_processor_id()))
+ smp_send_reschedule(task_cpu(p));
#endif
p->state = TASK_RUNNING;
}
@@ -646,9 +646,9 @@ static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next
if (unlikely(!mm)) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
- enter_lazy_tlb(oldmm, next, smp_processor_id());
+ enter_lazy_tlb(oldmm, next);
} else
- switch_mm(oldmm, mm, next, smp_processor_id());
+ switch_mm(oldmm, mm, next);
if (unlikely(!prev->mm)) {
prev->active_mm = NULL;
@@ -1175,6 +1175,7 @@ DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
void scheduler_tick(int user_ticks, int sys_ticks)
{
int cpu = smp_processor_id();
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
runqueue_t *rq = this_rq();
task_t *p = current;
@@ -1184,19 +1185,19 @@ void scheduler_tick(int user_ticks, int sys_ticks)
if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */
if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
- kstat_cpu(cpu).cpustat.system += sys_ticks;
+ cpustat->system += sys_ticks;
else if (atomic_read(&rq->nr_iowait) > 0)
- kstat_cpu(cpu).cpustat.iowait += sys_ticks;
+ cpustat->iowait += sys_ticks;
else
- kstat_cpu(cpu).cpustat.idle += sys_ticks;
+ cpustat->idle += sys_ticks;
rebalance_tick(rq, 1);
return;
}
if (TASK_NICE(p) > 0)
- kstat_cpu(cpu).cpustat.nice += user_ticks;
+ cpustat->nice += user_ticks;
else
- kstat_cpu(cpu).cpustat.user += user_ticks;
- kstat_cpu(cpu).cpustat.system += sys_ticks;
+ cpustat->user += user_ticks;
+ cpustat->system += sys_ticks;
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
@@ -1332,7 +1333,7 @@ pick_next_task:
switch_tasks:
prefetch(next);
clear_tsk_need_resched(prev);
- RCU_qsctr(prev->thread_info->cpu)++;
+ RCU_qsctr(task_cpu(prev))++;
if (likely(prev != next)) {
rq->nr_switches++;
@@ -2527,7 +2528,7 @@ void __init sched_init(void)
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
- enter_lazy_tlb(&init_mm, current, smp_processor_id());
+ enter_lazy_tlb(&init_mm, current);
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
diff --git a/kernel/signal.c b/kernel/signal.c
index 78c4dfa0073c..7ac72191b30b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -579,8 +579,8 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
/*
* Bad permissions for sending the signal
*/
-static inline int check_kill_permission(int sig, struct siginfo *info,
- struct task_struct *t)
+static int check_kill_permission(int sig, struct siginfo *info,
+ struct task_struct *t)
{
int error = -EINVAL;
if (sig < 0 || sig > _NSIG)
@@ -797,10 +797,11 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
int ret;
spin_lock_irqsave(&t->sighand->siglock, flags);
- if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
+ if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
- sigdelset(&t->blocked, sig);
- recalc_sigpending_tsk(t);
+ sigdelset(&t->blocked, sig);
+ recalc_sigpending_tsk(t);
+ }
ret = specific_send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
@@ -2081,12 +2082,58 @@ sys_kill(int pid, int sig)
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
- info.si_pid = current->pid;
+ info.si_pid = current->tgid;
info.si_uid = current->uid;
return kill_something_info(sig, &info, pid);
}
+/**
+ * sys_tkill - send signal to one specific thread
+ * @tgid: the thread group ID of the thread
+ * @pid: the PID of the thread
+ * @sig: signal to be sent
+ *
+ * This syscall also checks the tgid and returns -ESRCH even if the PID
+ * exists but it's not belonging to the target process anymore. This
+ * method solves the problem of threads exiting and PIDs getting reused.
+ */
+asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+{
+ struct siginfo info;
+ int error;
+ struct task_struct *p;
+
+ /* This is only valid for single tasks */
+ if (pid <= 0 || tgid <= 0)
+ return -EINVAL;
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_TKILL;
+ info.si_pid = current->tgid;
+ info.si_uid = current->uid;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ error = -ESRCH;
+ if (p && (p->tgid == tgid)) {
+ error = check_kill_permission(sig, &info, p);
+ /*
+ * The null signal is a permissions and process existence
+ * probe. No signal is actually delivered.
+ */
+ if (!error && sig && p->sighand) {
+ spin_lock_irq(&p->sighand->siglock);
+ handle_stop_signal(sig, p);
+ error = specific_send_sig_info(sig, &info, p);
+ spin_unlock_irq(&p->sighand->siglock);
+ }
+ }
+ read_unlock(&tasklist_lock);
+ return error;
+}
+
/*
* Send a signal to only one task, even if it's a CLONE_THREAD task.
*/
@@ -2104,7 +2151,7 @@ sys_tkill(int pid, int sig)
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
- info.si_pid = current->pid;
+ info.si_pid = current->tgid;
info.si_uid = current->uid;
read_lock(&tasklist_lock);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 20bf233a14c3..96294a3d673f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/percpu.h>
#include <linux/cpu.h>
/*
@@ -41,15 +42,18 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
-static inline void wakeup_softirqd(unsigned cpu)
+static inline void wakeup_softirqd(void)
{
- struct task_struct * tsk = ksoftirqd_task(cpu);
+ /* Interrupts are disabled: no need to stop preemption */
+ struct task_struct *tsk = __get_cpu_var(ksoftirqd);
if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk);
@@ -96,7 +100,7 @@ restart:
goto restart;
}
if (pending)
- wakeup_softirqd(smp_processor_id());
+ wakeup_softirqd();
__local_bh_enable();
}
@@ -117,9 +121,9 @@ EXPORT_SYMBOL(local_bh_enable);
/*
* This function must run with irqs disabled!
*/
-inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
+inline void raise_softirq_irqoff(unsigned int nr)
{
- __cpu_raise_softirq(cpu, nr);
+ __raise_softirq_irqoff(nr);
/*
* If we're in an interrupt or softirq, we're done
@@ -131,7 +135,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
* schedule the softirq soon.
*/
if (!in_interrupt())
- wakeup_softirqd(cpu);
+ wakeup_softirqd();
}
void raise_softirq(unsigned int nr)
@@ -139,7 +143,7 @@ void raise_softirq(unsigned int nr)
unsigned long flags;
local_irq_save(flags);
- cpu_raise_softirq(smp_processor_id(), nr);
+ raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
@@ -168,7 +172,7 @@ void __tasklet_schedule(struct tasklet_struct *t)
local_irq_save(flags);
t->next = __get_cpu_var(tasklet_vec).list;
__get_cpu_var(tasklet_vec).list = t;
- cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
+ raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -179,7 +183,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
local_irq_save(flags);
t->next = __get_cpu_var(tasklet_hi_vec).list;
__get_cpu_var(tasklet_hi_vec).list = t;
- cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
+ raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -211,7 +215,7 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable();
t->next = __get_cpu_var(tasklet_vec).list;
__get_cpu_var(tasklet_vec).list = t;
- __cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
+ __raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_enable();
}
}
@@ -244,7 +248,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
t->next = __get_cpu_var(tasklet_hi_vec).list;
__get_cpu_var(tasklet_hi_vec).list = t;
- __cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
+ __raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
}
@@ -325,7 +329,7 @@ static int ksoftirqd(void * __bind_cpu)
__set_current_state(TASK_INTERRUPTIBLE);
mb();
- local_ksoftirqd_task() = current;
+ __get_cpu_var(ksoftirqd) = current;
for (;;) {
if (!local_softirq_pending())
@@ -354,7 +358,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
return NOTIFY_BAD;
}
- while (!ksoftirqd_task(hotcpu))
+ while (!per_cpu(ksoftirqd, hotcpu))
yield();
}
return NOTIFY_OK;
diff --git a/kernel/suspend.c b/kernel/suspend.c
index 843cbaf6d433..1033ee31df68 100644
--- a/kernel/suspend.c
+++ b/kernel/suspend.c
@@ -1203,12 +1203,12 @@ static int read_suspend_image(const char * specialfile, int noresume)
void software_resume(void)
{
-#ifdef CONFIG_SMP
- printk(KERN_WARNING "Software Suspend has a malfunctioning SMP support. Disabled :(\n");
-#else
+ if (num_online_cpus() > 1) {
+ printk(KERN_WARNING "Software Suspend has malfunctioning SMP support. Disabled :(\n");
+ return;
+ }
/* We enable the possibility of machine suspend */
software_suspend_enabled = 1;
-#endif
if (!resume_status)
return;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7f0cc00cfa4d..edebad7ddec4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -130,7 +130,7 @@ extern ctl_table random_table[];
static ssize_t proc_readsys(struct file *, char __user *, size_t, loff_t *);
static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *);
-static int proc_sys_permission(struct inode *, int);
+static int proc_sys_permission(struct inode *, int, struct nameidata *);
struct file_operations proc_sys_file_operations = {
.read = proc_readsys,
@@ -1177,7 +1177,7 @@ static ssize_t proc_writesys(struct file * file, const char __user * buf,
return do_rw_proc(1, file, (char __user *) buf, count, ppos);
}
-static int proc_sys_permission(struct inode *inode, int op)
+static int proc_sys_permission(struct inode *inode, int op, struct nameidata *nd)
{
return test_perm(inode->i_mode, op);
}
diff --git a/kernel/timer.c b/kernel/timer.c
index 7bce7a7cb2c2..3995425e44a5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -126,13 +126,17 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
* or you set a timer to go off in the past
*/
vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
- } else if (idx <= 0xffffffffUL) {
- int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
- vec = base->tv5.vec + i;
} else {
- /* Can only get here on architectures with 64-bit jiffies */
- INIT_LIST_HEAD(&timer->entry);
- return;
+ int i;
+ /* If the timeout is larger than 0xffffffff on 64-bit
+ * architectures then we use the maximum timeout:
+ */
+ if (idx > 0xffffffffUL) {
+ idx = 0xffffffffUL;
+ expires = idx + base->timer_jiffies;
+ }
+ i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ vec = base->tv5.vec + i;
}
/*
* Timers are FIFO:
diff --git a/kernel/user.c b/kernel/user.c
index 592680d8cc68..86bd412b85da 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -126,7 +126,6 @@ void switch_uid(struct user_struct *new_user)
* we should be checking for it. -DaveM
*/
old_user = current->user;
- atomic_inc(&new_user->__count);
atomic_inc(&new_user->processes);
atomic_dec(&old_user->processes);
current->user = new_user;