summaryrefslogtreecommitdiff
path: root/kernel/time.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2003-02-04 23:25:27 -0800
committerLinus Torvalds <torvalds@penguin.transmeta.com>2003-02-04 23:25:27 -0800
commitbb59cfa4c9113214f91fa0ce744fd92fe2745039 (patch)
tree392951d646a403765ff56681fa3a6f5ae99815ed /kernel/time.c
parent62672619d8b2203538f37c05ca167b9a8b3f94d4 (diff)
[PATCH] seqlock for xtime
Add "seqlock" infrastructure for doing low-overhead optimistic reader locks (writer increments a sequence number, reader verifies that no writers came in during the critical region, and lots of careful memory barriers to take care of business). Make xtime/get_jiffies_64() use this new locking.
Diffstat (limited to 'kernel/time.c')
-rw-r--r--kernel/time.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/kernel/time.c b/kernel/time.c
index ead1e3c7fb29..c8c8a10eae1f 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -36,9 +36,6 @@
*/
struct timezone sys_tz;
-/* The xtime_lock is not only serializing the xtime read/writes but it's also
- serializing all accesses to the global NTP variables now. */
-extern rwlock_t xtime_lock;
extern unsigned long last_time_offset;
#if !defined(__alpha__) && !defined(__ia64__)
@@ -80,7 +77,7 @@ asmlinkage long sys_stime(int * tptr)
return -EPERM;
if (get_user(value, tptr))
return -EFAULT;
- write_lock_irq(&xtime_lock);
+ write_seqlock_irq(&xtime_lock);
xtime.tv_sec = value;
xtime.tv_nsec = 0;
last_time_offset = 0;
@@ -88,7 +85,7 @@ asmlinkage long sys_stime(int * tptr)
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
- write_unlock_irq(&xtime_lock);
+ write_sequnlock_irq(&xtime_lock);
return 0;
}
@@ -96,13 +93,13 @@ asmlinkage long sys_stime(int * tptr)
asmlinkage long sys_gettimeofday(struct timeval *tv, struct timezone *tz)
{
- if (tv) {
+ if (likely(tv != NULL)) {
struct timeval ktv;
do_gettimeofday(&ktv);
if (copy_to_user(tv, &ktv, sizeof(ktv)))
return -EFAULT;
}
- if (tz) {
+ if (unlikely(tz != NULL)) {
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
@@ -127,10 +124,10 @@ asmlinkage long sys_gettimeofday(struct timeval *tv, struct timezone *tz)
*/
inline static void warp_clock(void)
{
- write_lock_irq(&xtime_lock);
+ write_seqlock_irq(&xtime_lock);
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
last_time_offset = 0;
- write_unlock_irq(&xtime_lock);
+ write_sequnlock_irq(&xtime_lock);
}
/*
@@ -235,7 +232,7 @@ int do_adjtimex(struct timex *txc)
txc->tick > 1100000/USER_HZ)
return -EINVAL;
- write_lock_irq(&xtime_lock);
+ write_seqlock_irq(&xtime_lock);
result = time_state; /* mostly `TIME_OK' */
/* Save for later - semantics of adjtime is to return old value */
@@ -386,7 +383,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
txc->errcnt = pps_errcnt;
txc->stbcnt = pps_stbcnt;
last_time_offset = 0;
- write_unlock_irq(&xtime_lock);
+ write_sequnlock_irq(&xtime_lock);
do_gettimeofday(&txc->time);
return(result);
}
@@ -409,9 +406,13 @@ asmlinkage long sys_adjtimex(struct timex *txc_p)
struct timespec current_kernel_time(void)
{
struct timespec now;
- unsigned long flags;
- read_lock_irqsave(&xtime_lock,flags);
- now = xtime;
- read_unlock_irqrestore(&xtime_lock,flags);
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ now = xtime;
+ } while (read_seqretry(&xtime_lock, seq));
+
return now;
}