diff options
| author | Linus Torvalds <torvalds@penguin.transmeta.com> | 2003-02-04 23:30:12 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2003-02-04 23:30:12 -0800 |
| commit | b82507b1dd06bf8e080281625840575a3b12d834 (patch) | |
| tree | e33593b86302b67b5d87e5126af48d574cd25c9b /include/linux | |
| parent | 263cc27fa141984aed0e6de7474fc813449d02a3 (diff) | |
| parent | bb59cfa4c9113214f91fa0ce744fd92fe2745039 (diff) | |
Merge http://linux-acpi.bkbits.net/linux-acpi
into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/jiffies.h | 13 | ||||
| -rw-r--r-- | include/linux/seqlock.h | 123 | ||||
| -rw-r--r-- | include/linux/time.h | 3 |
3 files changed, 133 insertions, 6 deletions
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index c81b51bab1e3..0a60a4f52077 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/spinlock.h> +#include <linux/seqlock.h> #include <asm/system.h> #include <asm/param.h> /* for HZ */ @@ -17,13 +18,15 @@ extern unsigned long volatile jiffies; static inline u64 get_jiffies_64(void) { #if BITS_PER_LONG < 64 - extern rwlock_t xtime_lock; - unsigned long flags; + extern seqlock_t xtime_lock; + unsigned long seq; u64 tmp; - read_lock_irqsave(&xtime_lock, flags); - tmp = jiffies_64; - read_unlock_irqrestore(&xtime_lock, flags); + do { + seq = read_seqbegin(&xtime_lock); + tmp = jiffies_64; + } while (read_seqretry(&xtime_lock, seq)); + return tmp; #else return (u64)jiffies; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h new file mode 100644 index 000000000000..2660cf7634c1 --- /dev/null +++ b/include/linux/seqlock.h @@ -0,0 +1,123 @@ +#ifndef __LINUX_SEQLOCK_H +#define __LINUX_SEQLOCK_H +/* + * Reader/writer consistent mechanism without starving writers. This type of + * lock for data where the reader wants a consitent set of information + * and is willing to retry if the information changes. Readers never + * block but they may have to retry if a writer is in + * progress. Writers do not wait for readers. + * + * This is not as cache friendly as brlock. Also, this will not work + * for data that contains pointers, because any writer could + * invalidate a pointer that a reader was following. + * + * Expected reader usage: + * do { + * seq = read_seqbegin(&foo); + * ... + * } while (read_seqretry(&foo, seq)); + * + * + * On non-SMP the spin locks disappear but the writer still needs + * to increment the sequence variables because an interrupt routine could + * change the state of the data. + * + * Based on x86_64 vsyscall gettimeofday + * by Keith Owens and Andrea Arcangeli + */ + +#include <linux/config.h> +#include <linux/spinlock.h> +#include <linux/preempt.h> + +typedef struct { + unsigned sequence; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } +#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) + + +/* Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. + */ +static inline void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + ++sl->sequence; + smp_wmb(); +} + +static inline void write_sequnlock(seqlock_t *sl) +{ + smp_wmb(); + sl->sequence++; + spin_unlock(&sl->lock); +} + +static inline int write_tryseqlock(seqlock_t *sl) +{ + int ret = spin_trylock(&sl->lock); + + if (ret) { + ++sl->sequence; + smp_wmb(); + } + return ret; +} + +/* Start of read calculation -- fetch last complete writer token */ +static inline unsigned read_seqbegin(const seqlock_t *sl) +{ + unsigned ret = sl->sequence; + smp_rmb(); + return ret; +} + +/* Test if reader processed invalid data. + * If initial values is odd, + * then writer had already started when section was entered + * If sequence value changed + * then writer changed data while in section + * + * Using xor saves one conditional branch. + */ +static inline int read_seqretry(const seqlock_t *sl, unsigned iv) +{ + smp_rmb(); + return (iv & 1) | (sl->sequence ^ iv); +} + +/* + * Possible sw/hw IRQ protected versions of the interfaces. + */ +#define write_seqlock_irqsave(lock, flags) \ + do { local_irq_save(flags); write_seqlock(lock); } while (0) +#define write_seqlock_irq(lock) \ + do { local_irq_disable(); write_seqlock(lock); } while (0) +#define write_seqlock_bh(lock) \ + do { local_bh_disable(); write_seqlock(lock); } while (0) + +#define write_sequnlock_irqrestore(lock, flags) \ + do { write_sequnlock(lock); local_irq_restore(flags); } while(0) +#define write_sequnlock_irq(lock) \ + do { write_sequnlock(lock); local_irq_enable(); } while(0) +#define write_sequnlock_bh(lock) \ + do { write_sequnlock(lock); local_bh_enable(); } while(0) + +#define read_seqbegin_irqsave(lock, flags) \ + ({ local_irq_save(flags); read_seqbegin(lock); }) + +#define read_seqretry_irqrestore(lock, iv, flags) \ + ({int ret = read_seqretry(&(lock)->seq, iv); \ + local_irq_restore(flags); \ + ret; \ + }) + +#endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/time.h b/include/linux/time.h index 52d60ec2b364..7355ae1f78ca 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -25,6 +25,7 @@ struct timezone { #ifdef __KERNEL__ #include <linux/spinlock.h> +#include <linux/seqlock.h> /* * Change timeval to jiffies, trying to avoid the @@ -120,7 +121,7 @@ mktime (unsigned int year, unsigned int mon, } extern struct timespec xtime; -extern rwlock_t xtime_lock; +extern seqlock_t xtime_lock; static inline unsigned long get_seconds(void) { |
