summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/kmod.c10
-rw-r--r--kernel/ksyms.c4
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/sys.c13
-rw-r--r--kernel/time.c15
-rw-r--r--kernel/user.c17
7 files changed, 43 insertions, 23 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 729e93bff8e4..de34ed9091f5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -249,7 +249,7 @@ void reparent_to_init(void)
/* signals? */
security_task_reparent_to_init(current);
memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
- current->user = INIT_USER;
+ switch_uid(INIT_USER);
write_unlock_irq(&tasklist_lock);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 1930367d3736..257634f94652 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -121,15 +121,7 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
if (curtask->files->fd[i]) close(i);
}
- /* Drop the "current user" thing */
- {
- struct user_struct *user = curtask->user;
- curtask->user = INIT_USER;
- atomic_inc(&INIT_USER->__count);
- atomic_inc(&INIT_USER->processes);
- atomic_dec(&user->processes);
- free_uid(user);
- }
+ switch_uid(INIT_USER);
/* Give kmod all effective privileges.. */
curtask->euid = curtask->fsuid = 0;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 4d7bfe2accde..f0503df9fe3d 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -208,6 +208,7 @@ EXPORT_SYMBOL(close_bdev_excl);
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
+EXPORT_SYMBOL(sync_dirty_buffer);
EXPORT_SYMBOL(submit_bh);
EXPORT_SYMBOL(unlock_buffer);
EXPORT_SYMBOL(__wait_on_buffer);
@@ -489,6 +490,9 @@ EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(xtime_lock);
EXPORT_SYMBOL(do_gettimeofday);
EXPORT_SYMBOL(do_settimeofday);
+#if (BITS_PER_LONG < 64)
+EXPORT_SYMBOL(get_jiffies_64);
+#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
EXPORT_SYMBOL(__might_sleep);
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 6d4a18e6fb21..2ea021f7af8d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -620,7 +620,6 @@ static void handle_stop_signal(int sig, struct task_struct *p)
t = p;
do {
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- wake_up_state(t, TASK_STOPPED);
/*
* If there is a handler for SIGCONT, we must make
@@ -632,9 +631,13 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* flag set, the thread will pause and acquire the
* siglock that we hold now and until we've queued
* the pending signal.
+ *
+ * Wake up the stopped thread _after_ setting
+ * TIF_SIGPENDING
*/
if (!sigismember(&t->blocked, SIGCONT))
set_tsk_thread_flag(t, TIF_SIGPENDING);
+ wake_up_state(t, TASK_STOPPED);
t = next_thread(t);
} while (t != p);
diff --git a/kernel/sys.c b/kernel/sys.c
index 9404304eba74..dffb67035c78 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -561,19 +561,12 @@ asmlinkage long sys_setgid(gid_t gid)
static int set_user(uid_t new_ruid, int dumpclear)
{
- struct user_struct *new_user, *old_user;
+ struct user_struct *new_user;
- /* What if a process setreuid()'s and this brings the
- * new uid over his NPROC rlimit? We can check this now
- * cheaply with the new uid cache, so if it matters
- * we should be checking for it. -DaveM
- */
new_user = alloc_uid(new_ruid);
if (!new_user)
return -EAGAIN;
- old_user = current->user;
- atomic_dec(&old_user->processes);
- atomic_inc(&new_user->processes);
+ switch_uid(new_user);
if(dumpclear)
{
@@ -581,8 +574,6 @@ static int set_user(uid_t new_ruid, int dumpclear)
wmb();
}
current->uid = new_ruid;
- current->user = new_user;
- free_uid(old_user);
return 0;
}
diff --git a/kernel/time.c b/kernel/time.c
index c8c8a10eae1f..4ecc0a3b2ac1 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -27,7 +27,6 @@
#include <linux/timex.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
-
#include <asm/uaccess.h>
/*
@@ -416,3 +415,17 @@ struct timespec current_kernel_time(void)
return now;
}
+
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void)
+{
+ unsigned long seq;
+ u64 ret;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ ret = jiffies_64;
+ } while (read_seqretry(&xtime_lock, seq));
+ return ret;
+}
+#endif
diff --git a/kernel/user.c b/kernel/user.c
index 0704b2aad9c5..592680d8cc68 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -116,6 +116,23 @@ struct user_struct * alloc_uid(uid_t uid)
return up;
}
+void switch_uid(struct user_struct *new_user)
+{
+ struct user_struct *old_user;
+
+ /* What if a process setreuid()'s and this brings the
+ * new uid over his NPROC rlimit? We can check this now
+ * cheaply with the new uid cache, so if it matters
+ * we should be checking for it. -DaveM
+ */
+ old_user = current->user;
+ atomic_inc(&new_user->__count);
+ atomic_inc(&new_user->processes);
+ atomic_dec(&old_user->processes);
+ current->user = new_user;
+ free_uid(old_user);
+}
+
static int __init uid_cache_init(void)
{