summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2002-10-31 04:10:09 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2002-10-31 04:10:09 -0800
commitfd3e620537030db24fd8bc2135b94816dd08c347 (patch)
tree1798f75cce190cd952faf78c56376a70c82aaebd /kernel
parent8f2215c6a1d2120850959e48dd9933fc7b4e5547 (diff)
[PATCH] make kernel_stat use per-cpu infrastructure
Patch from Ravikiran G Thirumalai <kiran@in.ibm.com> 1. Break out disk stats from kernel_stat and move disk stat to blkdev.h 2. Group cpu stat in kernel_stat and make them "per_cpu" instead of the NR_CPUS array 3. Remove EXPORT_SYMBOL(kstat) from ksyms.c (as I noticed that no module is using kstat)
Diffstat (limited to 'kernel')
-rw-r--r--kernel/ksyms.c1
-rw-r--r--kernel/sched.c47
-rw-r--r--kernel/timer.c1
3 files changed, 41 insertions, 8 deletions
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 6628b24ba827..e040a6a29ed9 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -493,7 +493,6 @@ EXPORT_SYMBOL(__might_sleep);
EXPORT_SYMBOL(loops_per_jiffy);
#endif
-EXPORT_SYMBOL(kstat);
/* misc */
EXPORT_SYMBOL(panic);
diff --git a/kernel/sched.c b/kernel/sched.c
index 20d2854c0bc6..aa62ce0df85f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -839,6 +839,8 @@ static inline void idle_tick(runqueue_t *rq)
#endif
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+
/*
* We place interactive tasks back into the active array, if possible.
*
@@ -872,21 +874,21 @@ void scheduler_tick(int user_ticks, int sys_ticks)
if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */
if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
- kstat.per_cpu_system[cpu] += sys_ticks;
+ kstat_cpu(cpu).cpustat.system += sys_ticks;
else if (atomic_read(&nr_iowait_tasks) > 0)
- kstat.per_cpu_iowait[cpu] += sys_ticks;
+ kstat_cpu(cpu).cpustat.iowait += sys_ticks;
else
- kstat.per_cpu_idle[cpu] += sys_ticks;
+ kstat_cpu(cpu).cpustat.idle += sys_ticks;
#if CONFIG_SMP
idle_tick(rq);
#endif
return;
}
if (TASK_NICE(p) > 0)
- kstat.per_cpu_nice[cpu] += user_ticks;
+ kstat_cpu(cpu).cpustat.nice += user_ticks;
else
- kstat.per_cpu_user[cpu] += user_ticks;
- kstat.per_cpu_system[cpu] += sys_ticks;
+ kstat_cpu(cpu).cpustat.user += user_ticks;
+ kstat_cpu(cpu).cpustat.system += sys_ticks;
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
@@ -2112,11 +2114,44 @@ __init int migration_init(void)
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#endif
+static void kstat_init_cpu(int cpu)
+{
+ /* Add any initialisation to kstat here */
+ /* Useful when cpu offlining logic is added.. */
+}
+
+static int __devinit kstat_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ switch(action) {
+ case CPU_UP_PREPARE:
+ kstat_init_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata kstat_nb = {
+ .notifier_call = kstat_cpu_notify,
+ .next = NULL,
+};
+
+__init static void init_kstat(void) {
+ kstat_cpu_notify(&kstat_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&kstat_nb);
+}
+
void __init sched_init(void)
{
runqueue_t *rq;
int i, j, k;
+ /* Init the kstat counters */
+ init_kstat();
for (i = 0; i < NR_CPUS; i++) {
prio_array_t *array;
diff --git a/kernel/timer.c b/kernel/timer.c
index ebb2b6c627cc..3c19cec3570d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -409,7 +409,6 @@ struct timespec xtime __attribute__ ((aligned (16)));
/* Don't completely fail for HZ > 500. */
int tickadj = 500/HZ ? : 1; /* microsecs */
-struct kernel_stat kstat;
/*
* phase-lock loop variables