summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2002-06-11 06:02:35 +0200
committerIngo Molnar <mingo@elte.hu>2002-06-11 06:02:35 +0200
commit0ed4dd24ff471eb7542378359f2852e2bf12bb99 (patch)
tree4e97f27cf64061e9bfe523c3d1810bc8a7c23c1e /kernel/sched.c
parent2b75b53521e41a4b62ccafb7d2e2b978d3ba7eda (diff)
- rq-lock optimization in the preemption case, from Robert Love, plus some more cleanups.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c42
1 files changed, 28 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c6fd5cb821b6..856ca71682d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -152,17 +152,21 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{
struct runqueue *rq;
repeat_lock_task:
- preempt_disable();
+ local_irq_save(*flags);
rq = task_rq(p);
- spin_lock_irqsave(&rq->lock, *flags);
+ spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) {
spin_unlock_irqrestore(&rq->lock, *flags);
- preempt_enable();
goto repeat_lock_task;
}
return rq;
@@ -171,7 +175,23 @@ repeat_lock_task:
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
{
spin_unlock_irqrestore(&rq->lock, *flags);
- preempt_enable();
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *rq_lock(runqueue_t *rq)
+{
+ local_irq_disable();
+ rq = this_rq();
+ spin_lock(&rq->lock);
+ return rq;
+}
+
+static inline void rq_unlock(runqueue_t *rq)
+{
+ spin_unlock(&rq->lock);
+ local_irq_enable();
}
/*
@@ -364,9 +384,7 @@ void wake_up_forked_process(task_t * p)
{
runqueue_t *rq;
- preempt_disable();
- rq = this_rq();
- spin_lock_irq(&rq->lock);
+ rq = rq_lock(rq);
p->state = TASK_RUNNING;
if (!rt_task(p)) {
@@ -382,8 +400,7 @@ void wake_up_forked_process(task_t * p)
p->thread_info->cpu = smp_processor_id();
activate_task(p, rq);
- spin_unlock_irq(&rq->lock);
- preempt_enable();
+ rq_unlock(rq);
}
/*
@@ -1367,8 +1384,7 @@ asmlinkage long sys_sched_yield(void)
runqueue_t *rq;
prio_array_t *array;
- preempt_disable();
- rq = this_rq();
+ rq = rq_lock(rq);
/*
* Decrease the yielding task's priority by one, to avoid
@@ -1378,7 +1394,6 @@ asmlinkage long sys_sched_yield(void)
* If priority is already MAX_PRIO-1 then we still
* roundrobin the task within the runlist.
*/
- spin_lock_irq(&rq->lock);
array = current->array;
/*
* If the task has reached maximum priority (or is a RT task)
@@ -1395,8 +1410,7 @@ asmlinkage long sys_sched_yield(void)
list_add_tail(&current->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap);
}
- spin_unlock(&rq->lock);
- preempt_enable_no_resched();
+ spin_unlock_no_resched(&rq->lock);
schedule();