diff options
| author | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-05 00:12:58 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@athlon.transmeta.com> | 2002-02-05 00:12:58 -0800 |
| commit | 908920b1d370e7a5c301d14cfce10c310be19be3 (patch) | |
| tree | 25606bf2c5215cc0e25c9647612390b88622b279 /kernel/timer.c | |
| parent | d01b7e92c0020f89b4bb33fe61c0dffab7078b42 (diff) | |
v2.5.1.9 -> v2.5.1.10
- Kai Germaschewski: ISDN updates
- Al Viro: start moving buffer cache indexing to "struct block_device *"
- Greg KH: USB update
- Russell King: fix up some ARM merge issues
- Ingo Molnar: scalable scheduler
Diffstat (limited to 'kernel/timer.c')
| -rw-r--r-- | kernel/timer.c | 94 |
1 files changed, 91 insertions, 3 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index c56ec37ea817..ce3945cda799 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -25,6 +25,8 @@ #include <asm/uaccess.h> +struct kernel_stat kstat; + /* * Timekeeping variables */ @@ -584,13 +586,16 @@ void update_process_times(int user_tick) update_one_process(p, user_tick, system, cpu); if (p->pid) { expire_task(p); - if (p->nice > 0) + if (p->__nice > 0) kstat.per_cpu_nice[cpu] += user_tick; else kstat.per_cpu_user[cpu] += user_tick; kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) - kstat.per_cpu_system[cpu] += system; + } else { + idle_tick(); + if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + kstat.per_cpu_system[cpu] += system; + } } /* @@ -791,6 +796,89 @@ asmlinkage long sys_getegid(void) #endif +static void process_timeout(unsigned long __data) +{ + wake_up_process((task_t *)__data); +} + +/** + * schedule_timeout - sleep until timeout + * @timeout: timeout value in jiffies + * + * Make the current task sleep until @timeout jiffies have + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to + * pass before the routine returns. The routine will return 0 + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. In this case the remaining time + * in jiffies will be returned, or 0 if the timer expired in time + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule + * the CPU away without a bound on the timeout. In this case the return + * value will be %MAX_SCHEDULE_TIMEOUT. + * + * In all cases the return value is guaranteed to be non-negative. + */ +signed long schedule_timeout(signed long timeout) +{ + struct timer_list timer; + unsigned long expire; + + switch (timeout) + { + case MAX_SCHEDULE_TIMEOUT: + /* + * These two special cases are useful to be comfortable + * in the caller. Nothing more. We could take + * MAX_SCHEDULE_TIMEOUT from one of the negative value + * but I' d like to return a valid offset (>=0) to allow + * the caller to do everything it want with the retval. + */ + schedule(); + goto out; + default: + /* + * Another bit of PARANOID. Note that the retval will be + * 0 since no piece of kernel is supposed to do a check + * for a negative retval of schedule_timeout() (since it + * should never happens anyway). You just have the printk() + * that will tell you if something is gone wrong and where. + */ + if (timeout < 0) + { + printk(KERN_ERR "schedule_timeout: wrong timeout " + "value %lx from %p\n", timeout, + __builtin_return_address(0)); + current->state = TASK_RUNNING; + goto out; + } + } + + expire = timeout + jiffies; + + init_timer(&timer); + timer.expires = expire; + timer.data = (unsigned long) current; + timer.function = process_timeout; + + add_timer(&timer); + schedule(); + del_timer_sync(&timer); + + timeout = expire - jiffies; + + out: + return timeout < 0 ? 0 : timeout; +} + /* Thread ID - the internal kernel "pid" */ asmlinkage long sys_gettid(void) { |
