summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRobert Love <rml@tech9.net>2002-05-28 05:03:31 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-05-28 05:03:31 -0700
commit01bc15eda4218694be18921cd8699bcd9a8427df (patch)
treec28463b5e29fc1a758cd1185148de0ab2c465538 /kernel/sched.c
parent5ff8f2bb405a6018e26560c16606d17234a6397e (diff)
[PATCH] O(1) count_active_tasks
This is William Irwin's algorithmically O(1) version of count_active_tasks (which is currently O(n) for n total tasks on the system). I like it a lot: we become O(1) because now we count uninterruptible tasks, so we can return (nr_uninterruptible + nr_running). It does not introduce any overhead or hurt the case for small n, so I have no complaints. This copy has a small optimization over the original posting, but is otherwise the same thing wli posted earlier. I have tested to make sure this returns accurate results and that the kernel profile improves.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a433a34f6470..07caec2ae996 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -137,6 +137,7 @@ struct runqueue {
spinlock_t lock;
spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp;
+ signed long nr_uninterruptible;
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
int prev_nr_running[NR_CPUS];
@@ -244,6 +245,8 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
+ if (p->state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible++;
dequeue_task(p, p->array);
p->array = NULL;
}
@@ -323,11 +326,15 @@ static int try_to_wake_up(task_t * p)
{
unsigned long flags;
int success = 0;
+ long old_state;
runqueue_t *rq;
rq = task_rq_lock(p, &flags);
+ old_state = p->state;
p->state = TASK_RUNNING;
if (!p->array) {
+ if (old_state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible--;
activate_task(p, rq);
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
@@ -433,6 +440,16 @@ unsigned long nr_running(void)
return sum;
}
+unsigned long nr_uninterruptible(void)
+{
+ unsigned long i, sum = 0;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible;
+
+ return sum;
+}
+
unsigned long nr_context_switches(void)
{
unsigned long i, sum = 0;