summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.ninka.net>2003-05-19 02:48:39 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2003-05-19 02:48:39 -0700
commit3b1d5e1d06c7d8c6d24f565ef2a7d1cdfdf9a908 (patch)
tree56d1004bfa4303f36da74c5327ad3132db83c5a6 /kernel
parentdcee68e74537c6ca9011628560b3edb4d9e1b39d (diff)
parentd7b7a72d6835c797d8c9828eb9d2cb64af204136 (diff)
Merge nuts.ninka.net:/home/davem/src/BK/network-2.5
into nuts.ninka.net:/home/davem/src/BK/net-2.5
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c35
-rw-r--r--kernel/ksyms.c3
-rw-r--r--kernel/sched.c71
-rw-r--r--kernel/signal.c14
4 files changed, 47 insertions, 76 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 2d74b5cf05e2..a509e6da132f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -752,12 +752,12 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static struct task_struct *copy_process(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr)
+struct task_struct *copy_process(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+ int __user *parent_tidptr,
+ int __user *child_tidptr)
{
int retval;
struct task_struct *p = NULL;
@@ -1067,15 +1067,16 @@ static inline int fork_traceflag (unsigned clone_flags)
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
-struct task_struct *do_fork(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr)
+long do_fork(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+ int __user *parent_tidptr,
+ int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
+ long pid;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
@@ -1084,6 +1085,12 @@ struct task_struct *do_fork(unsigned long clone_flags,
}
p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr);
+ /*
+ * Do this prior waking up the new thread - the thread pointer
+ * might get invalid after that point, if the thread exits quickly.
+ */
+ pid = IS_ERR(p) ? PTR_ERR(p) : p->pid;
+
if (!IS_ERR(p)) {
struct completion vfork;
@@ -1104,7 +1111,7 @@ struct task_struct *do_fork(unsigned long clone_flags,
++total_forks;
if (unlikely (trace)) {
- current->ptrace_message = (unsigned long) p->pid;
+ current->ptrace_message = pid;
ptrace_notify ((trace << 8) | SIGTRAP);
}
@@ -1119,7 +1126,7 @@ struct task_struct *do_fork(unsigned long clone_flags,
*/
set_need_resched();
}
- return p;
+ return pid;
}
/* SLAB cache for signal_struct structures (tsk->signal) */
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index cd8f9810ddc9..eed4ac9b8f51 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -223,6 +223,7 @@ EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(generic_cont_expand);
EXPORT_SYMBOL(cont_prepare_write);
EXPORT_SYMBOL(generic_commit_write);
+EXPORT_SYMBOL(block_commit_write);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_file_read);
@@ -556,6 +557,8 @@ EXPORT_SYMBOL(buffer_insert_list);
EXPORT_SYMBOL(make_bad_inode);
EXPORT_SYMBOL(is_bad_inode);
EXPORT_SYMBOL(__inode_dir_notify);
+EXPORT_SYMBOL(generic_osync_inode);
+EXPORT_SYMBOL(remove_suid);
#ifdef CONFIG_UID16
EXPORT_SYMBOL(overflowuid);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b43c81a49ed..f9dc0a968a61 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -340,10 +340,9 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
* Update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
*/
-static inline int activate_task(task_t *p, runqueue_t *rq)
+static inline void activate_task(task_t *p, runqueue_t *rq)
{
long sleep_time = jiffies - p->last_run - 1;
- int requeue_waker = 0;
if (sleep_time > 0) {
int sleep_avg;
@@ -372,8 +371,6 @@ static inline int activate_task(task_t *p, runqueue_t *rq)
}
}
__activate_task(p, rq);
-
- return requeue_waker;
}
/*
@@ -454,27 +451,12 @@ repeat:
}
#endif
-/*
- * kick_if_running - kick the remote CPU if the task is running currently.
- *
- * This code is used by the signal code to signal tasks
- * which are in user-mode, as quickly as possible.
- *
- * (Note that we do this lockless - if the task does anything
- * while the message is in flight then it will notice the
- * sigpending condition anyway.)
- */
-void kick_if_running(task_t * p)
-{
- if ((task_running(task_rq(p), p)) && (task_cpu(p) != smp_processor_id()))
- resched_task(p);
-}
-
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
* @state: the mask of task states that can be woken
* @sync: do a synchronous wakeup?
+ * @kick: kick the CPU if the task is already running?
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
@@ -484,10 +466,10 @@ void kick_if_running(task_t * p)
*
* returns failure only if the task is already active.
*/
-static int try_to_wake_up(task_t * p, unsigned int state, int sync)
+static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick)
{
- int success = 0, requeue_waker = 0;
unsigned long flags;
+ int success = 0;
long old_state;
runqueue_t *rq;
@@ -513,42 +495,34 @@ repeat_lock_task:
if (sync)
__activate_task(p, rq);
else {
- requeue_waker = activate_task(p, rq);
+ activate_task(p, rq);
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
}
success = 1;
- }
+ } else
+ if (unlikely(kick) && task_running(rq, p))
+ resched_task(rq->curr);
p->state = TASK_RUNNING;
}
task_rq_unlock(rq, &flags);
- /*
- * We have to do this outside the other spinlock, the two
- * runqueues might be different:
- */
- if (requeue_waker) {
- prio_array_t *array;
-
- rq = task_rq_lock(current, &flags);
- array = current->array;
- dequeue_task(current, array);
- current->prio = effective_prio(current);
- enqueue_task(current, array);
- task_rq_unlock(rq, &flags);
- }
-
return success;
}
int wake_up_process(task_t * p)
{
- return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
+ return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 0);
+}
+
+int wake_up_process_kick(task_t * p)
+{
+ return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 1);
}
int wake_up_state(task_t *p, unsigned int state)
{
- return try_to_wake_up(p, state, 0);
+ return try_to_wake_up(p, state, 0, 0);
}
/*
@@ -1206,7 +1180,7 @@ void scheduler_tick(int user_ticks, int sys_ticks)
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
set_tsk_need_resched(p);
- return;
+ goto out;
}
spin_lock(&rq->lock);
/*
@@ -1233,7 +1207,7 @@ void scheduler_tick(int user_ticks, int sys_ticks)
dequeue_task(p, rq->active);
enqueue_task(p, rq->active);
}
- goto out;
+ goto out_unlock;
}
if (!--p->time_slice) {
dequeue_task(p, rq->active);
@@ -1249,8 +1223,9 @@ void scheduler_tick(int user_ticks, int sys_ticks)
} else
enqueue_task(p, rq->active);
}
-out:
+out_unlock:
spin_unlock(&rq->lock);
+out:
rebalance_tick(rq, 0);
}
@@ -1389,7 +1364,7 @@ need_resched:
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{
task_t *p = curr->task;
- return try_to_wake_up(p, mode, sync);
+ return try_to_wake_up(p, mode, sync, 0);
}
/*
@@ -1440,8 +1415,6 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
__wake_up_common(q, mode, 1, 0);
}
-#ifdef CONFIG_SMP
-
/**
* __wake_up - sync- wake up threads blocked on a waitqueue.
* @q: the waitqueue
@@ -1452,6 +1425,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
+ *
+ * On UP it can prevent extra preemption.
*/
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
@@ -1468,8 +1443,6 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
spin_unlock_irqrestore(&q->lock, flags);
}
-#endif
-
void complete(struct completion *x)
{
unsigned long flags;
diff --git a/kernel/signal.c b/kernel/signal.c
index 0baa17479014..d15a55ec9e5e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -521,18 +521,6 @@ inline void signal_wake_up(struct task_struct *t, int resume)
set_tsk_thread_flag(t,TIF_SIGPENDING);
/*
- * If the task is running on a different CPU
- * force a reschedule on the other CPU to make
- * it notice the new signal quickly.
- *
- * The code below is a tad loose and might occasionally
- * kick the wrong CPU if we catch the process in the
- * process of changing - but no harm is done by that
- * other than doing an extra (lightweight) IPI interrupt.
- */
- if (t->state == TASK_RUNNING)
- kick_if_running(t);
- /*
* If resume is set, we want to wake it up in the TASK_STOPPED case.
* We don't check for TASK_STOPPED because there is a race with it
* executing another processor and just now entering stopped state.
@@ -543,7 +531,7 @@ inline void signal_wake_up(struct task_struct *t, int resume)
if (resume)
mask |= TASK_STOPPED;
if (t->state & mask) {
- wake_up_process(t);
+ wake_up_process_kick(t);
return;
}
}