diff options
| -rw-r--r-- | include/linux/sched.h | 1 | ||||
| -rw-r--r-- | kernel/sched.c | 56 | ||||
| -rw-r--r-- | kernel/signal.c | 16 |
3 files changed, 38 insertions, 35 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c5ff8e452d49..16864532fcd9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -516,6 +516,7 @@ extern unsigned long itimer_ticks; extern unsigned long itimer_next; extern void do_timer(struct pt_regs *); +extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); extern void FASTCALL(sched_exit(task_t * p)); diff --git a/kernel/sched.c b/kernel/sched.c index 68bd00e54a81..3e967ec6814f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -438,6 +438,7 @@ void kick_if_running(task_t * p) /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread + * @state: the mask of task states that can be woken * @sync: do a synchronous wakeup? * * Put it on the run-queue if it's not already there. The "current" @@ -448,7 +449,7 @@ void kick_if_running(task_t * p) * * returns failure only if the task is already active. */ -static int try_to_wake_up(task_t * p, int sync) +static int try_to_wake_up(task_t * p, unsigned int state, int sync) { unsigned long flags; int success = 0; @@ -458,28 +459,30 @@ static int try_to_wake_up(task_t * p, int sync) repeat_lock_task: rq = task_rq_lock(p, &flags); old_state = p->state; - if (!p->array) { - /* - * Fast-migrate the task if it's not running or runnable - * currently. Do not violate hard affinity. - */ - if (unlikely(sync && !task_running(rq, p) && - (task_cpu(p) != smp_processor_id()) && - (p->cpus_allowed & (1UL << smp_processor_id())))) { - - set_task_cpu(p, smp_processor_id()); - task_rq_unlock(rq, &flags); - goto repeat_lock_task; + if (old_state & state) { + if (!p->array) { + /* + * Fast-migrate the task if it's not running or runnable + * currently. Do not violate hard affinity. + */ + if (unlikely(sync && !task_running(rq, p) && + (task_cpu(p) != smp_processor_id()) && + (p->cpus_allowed & (1UL << smp_processor_id())))) { + + set_task_cpu(p, smp_processor_id()); + task_rq_unlock(rq, &flags); + goto repeat_lock_task; + } + if (old_state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible--; + activate_task(p, rq); + + if (p->prio < rq->curr->prio) + resched_task(rq->curr); + success = 1; } - if (old_state == TASK_UNINTERRUPTIBLE) - rq->nr_uninterruptible--; - activate_task(p, rq); - - if (p->prio < rq->curr->prio) - resched_task(rq->curr); - success = 1; + p->state = TASK_RUNNING; } - p->state = TASK_RUNNING; task_rq_unlock(rq, &flags); return success; @@ -487,7 +490,12 @@ repeat_lock_task: int wake_up_process(task_t * p) { - return try_to_wake_up(p, 0); + return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); +} + +int wake_up_state(task_t *p, unsigned int state) +{ + return try_to_wake_up(p, state, 0); } /* @@ -1263,7 +1271,7 @@ need_resched: int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) { task_t *p = curr->task; - return ((p->state & mode) && try_to_wake_up(p, sync)); + return try_to_wake_up(p, mode, sync); } /* @@ -2418,7 +2426,7 @@ void __init sched_init(void) rq->curr = current; rq->idle = current; set_task_cpu(current, smp_processor_id()); - wake_up_process(current); + wake_up_forked_process(current); init_timers(); diff --git a/kernel/signal.c b/kernel/signal.c index 1b4dfe6d8a54..63c16ac1a9fa 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -620,13 +620,9 @@ static void handle_stop_signal(int sig, struct task_struct *p) t = p; do { rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); + wake_up_state(t, TASK_STOPPED); + /* - * This wakeup is only need if in TASK_STOPPED, - * but there can be SMP races with testing for that. - * In the normal SIGCONT case, all will be stopped. - * A spuriously sent SIGCONT will interrupt all running - * threads to check signals even if it's ignored. - * * If there is a handler for SIGCONT, we must make * sure that no thread returns to user mode before * we post the signal, in case it was the only @@ -637,11 +633,9 @@ static void handle_stop_signal(int sig, struct task_struct *p) * siglock that we hold now and until we've queued * the pending signal. */ - if (!(t->flags & PF_EXITING)) { - if (!sigismember(&t->blocked, SIGCONT)) - set_tsk_thread_flag(t, TIF_SIGPENDING); - wake_up_process(t); - } + if (!sigismember(&t->blocked, SIGCONT)) + set_tsk_thread_flag(t, TIF_SIGPENDING); + t = next_thread(t); } while (t != p); } |
