summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@toomuch.toronto.redhat.com>2002-08-21 10:32:55 -0400
committerBenjamin LaHaise <bcrl@toomuch.toronto.redhat.com>2002-08-21 10:32:55 -0400
commit555dbefdfe6bfa613ebf59852ba82872dcc44233 (patch)
tree2ccbed4f787cccc6d93d41e0dca9bfda426b93a1 /include/linux
parentea5097be4e814a2a9457e60653052306295941e8 (diff)
parentd17e9bb6daa227e88f596fd0918e3af20e423261 (diff)
Merge toomuch.toronto.redhat.com:/md0/linux-2.5
into toomuch.toronto.redhat.com:/md0/net-aio/bk/aio-2.5
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/preempt.h18
-rw-r--r--include/linux/romfs_fs_sb.h10
-rw-r--r--include/linux/smp_lock.h54
-rw-r--r--include/linux/spinlock.h272
-rw-r--r--include/linux/ufs_fs.h10
6 files changed, 262 insertions, 106 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ec0f6edac31b..f773053fdbc5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -624,8 +624,6 @@ extern void __kill_fasync(struct fasync_struct *, int, int);
#include <linux/ext3_fs_sb.h>
#include <linux/hpfs_fs_sb.h>
-#include <linux/ufs_fs_sb.h>
-#include <linux/romfs_fs_sb.h>
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
@@ -670,8 +668,6 @@ struct super_block {
union {
struct ext3_sb_info ext3_sb;
struct hpfs_sb_info hpfs_sb;
- struct ufs_sb_info ufs_sb;
- struct romfs_sb_info romfs_sb;
void *generic_sbp;
} u;
/*
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 3864d46eadba..b4ff1a7c881c 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -1,9 +1,14 @@
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
+/*
+ * include/linux/preempt.h - macros for accessing and manipulating
+ * preempt_count (used for kernel preemption, interrupt count, etc.)
+ */
+
#include <linux/config.h>
-#define preempt_count() (current_thread_info()->preempt_count)
+#define preempt_count() (current_thread_info()->preempt_count)
#define inc_preempt_count() \
do { \
@@ -31,17 +36,16 @@ do { \
barrier(); \
} while (0)
-#define preempt_enable() \
+#define preempt_check_resched() \
do { \
- preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
-#define preempt_check_resched() \
+#define preempt_enable() \
do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
+ preempt_enable_no_resched(); \
+ preempt_check_resched(); \
} while (0)
#define inc_preempt_count_non_preempt() do { } while (0)
@@ -50,7 +54,7 @@ do { \
#else
#define preempt_disable() do { } while (0)
-#define preempt_enable_no_resched() do {} while(0)
+#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
diff --git a/include/linux/romfs_fs_sb.h b/include/linux/romfs_fs_sb.h
deleted file mode 100644
index 02da2280a6df..000000000000
--- a/include/linux/romfs_fs_sb.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ROMFS_FS_SB
-#define __ROMFS_FS_SB
-
-/* romfs superblock in-core data */
-
-struct romfs_sb_info {
- unsigned long s_maxsize;
-};
-
-#endif
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index cfb23f363e61..40f5358fc856 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -13,7 +13,59 @@
#else
-#include <asm/smplock.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+
+extern spinlock_t kernel_flag;
+
+#define kernel_locked() (current->lock_depth >= 0)
+
+#define get_kernel_lock() spin_lock(&kernel_flag)
+#define put_kernel_lock() spin_unlock(&kernel_flag)
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ put_kernel_lock(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ get_kernel_lock(); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void lock_kernel(void)
+{
+ int depth = current->lock_depth+1;
+ if (!depth)
+ get_kernel_lock();
+ current->lock_depth = depth;
+}
+
+static __inline__ void unlock_kernel(void)
+{
+ if (current->lock_depth < 0)
+ BUG();
+ if (--current->lock_depth < 0)
+ put_kernel_lock();
+}
#endif /* CONFIG_SMP */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 749d3054b2dc..6de41e91171f 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -1,52 +1,23 @@
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
+/*
+ * include/linux/spinlock.h - generic locking declarations
+ */
+
#include <linux/config.h>
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
+#include <linux/stringify.h>
#include <asm/system.h>
/*
- * These are the generic versions of the spinlocks and read-write
- * locks..
+ * Must define these before including other files, inline functions need them
*/
-#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
-#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
-#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
-
-#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
-#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
-#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
-
-#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
-#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
-#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
-
-#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
-#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
-#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
-#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
-
-#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
-#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
-#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
-
-#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
-#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
-#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
-#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
- __r = spin_trylock(lock); \
- if (!__r) local_bh_enable(); \
- __r; })
-
-/* Must define these before including other files, inline functions need them */
-
-#include <linux/stringify.h>
-
#define LOCK_SECTION_NAME \
".text.lock." __stringify(KBUILD_BASENAME)
@@ -60,11 +31,17 @@
#define LOCK_SECTION_END \
".previous\n\t"
+/*
+ * If CONFIG_SMP is set, pull in the _raw_* definitions
+ */
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
-#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously
- defined (e.g. by including asm/spinlock.h */
+/*
+ * !CONFIG_SMP and spin_lock_init not previously defined
+ * (e.g. by including include/asm/spinlock.h)
+ */
+#elif !defined(spin_lock_init)
#ifndef CONFIG_PREEMPT
# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
@@ -72,55 +49,42 @@
#endif
/*
- * Your basic spinlocks, allowing only a single CPU anywhere
- *
- * Most gcc versions have a nasty bug with empty initializers.
+ * gcc versions before ~2.95 have a nasty bug with empty initializers.
*/
#if (__GNUC__ > 2)
typedef struct { } spinlock_t;
-# define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+ typedef struct { } rwlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+ #define RW_LOCK_UNLOCKED (rwlock_t) { }
#else
typedef struct { int gcc_is_buggy; } spinlock_t;
-# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+ typedef struct { int gcc_is_buggy; } rwlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
+/*
+ * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
+ */
#define spin_lock_init(lock) do { (void)(lock); } while(0)
-#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
+#define _raw_spin_lock(lock) (void)(lock)
#define spin_is_locked(lock) ((void)(lock), 0)
#define _raw_spin_trylock(lock) ((void)(lock), 1)
#define spin_unlock_wait(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- *
- * Most gcc versions have a nasty bug with empty initializers.
- */
-#if (__GNUC__ > 2)
- typedef struct { } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { }
-#else
- typedef struct { int gcc_is_buggy; } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#endif
-
#define rwlock_init(lock) do { } while(0)
-#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
+#define _raw_read_lock(lock) (void)(lock)
#define _raw_read_unlock(lock) do { } while(0)
-#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
+#define _raw_write_lock(lock) (void)(lock)
#define _raw_write_unlock(lock) do { } while(0)
#endif /* !SMP */
-#ifdef CONFIG_PREEMPT
-
+/*
+ * Define the various spin_lock and rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
#define spin_lock(lock) \
do { \
preempt_disable(); \
@@ -129,31 +93,175 @@ do { \
#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
1 : ({preempt_enable(); 0;});})
+
#define spin_unlock(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
} while (0)
-#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
-#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
-#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
-#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
+#define read_lock(lock) \
+do { \
+ preempt_disable(); \
+ _raw_read_lock(lock); \
+} while(0)
+
+#define read_unlock(lock) \
+do { \
+ _raw_read_unlock(lock); \
+ preempt_enable(); \
+} while(0)
+
+#define write_lock(lock) \
+do { \
+ preempt_disable(); \
+ _raw_write_lock(lock); \
+} while(0)
+
+#define write_unlock(lock) \
+do { \
+ _raw_write_unlock(lock); \
+ preempt_enable(); \
+} while(0)
+
#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
1 : ({preempt_enable(); 0;});})
-#else
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ local_irq_save(flags); \
+ preempt_disable(); \
+ _raw_spin_lock(lock); \
+} while (0)
-#define spin_lock(lock) _raw_spin_lock(lock)
-#define spin_trylock(lock) _raw_spin_trylock(lock)
-#define spin_unlock(lock) _raw_spin_unlock(lock)
+#define spin_lock_irq(lock) \
+do { \
+ local_irq_disable(); \
+ preempt_disable(); \
+ _raw_spin_lock(lock); \
+} while (0)
-#define read_lock(lock) _raw_read_lock(lock)
-#define read_unlock(lock) _raw_read_unlock(lock)
-#define write_lock(lock) _raw_write_lock(lock)
-#define write_unlock(lock) _raw_write_unlock(lock)
-#define write_trylock(lock) _raw_write_trylock(lock)
-#endif
+#define spin_lock_bh(lock) \
+do { \
+ local_bh_disable(); \
+ preempt_disable(); \
+ _raw_spin_lock(lock); \
+} while (0)
+
+#define read_lock_irqsave(lock, flags) \
+do { \
+ local_irq_save(flags); \
+ preempt_disable(); \
+ _raw_read_lock(lock); \
+} while (0)
+
+#define read_lock_irq(lock) \
+do { \
+ local_irq_disable(); \
+ preempt_disable(); \
+ _raw_read_lock(lock); \
+} while (0)
+
+#define read_lock_bh(lock) \
+do { \
+ local_bh_disable(); \
+ preempt_disable(); \
+ _raw_read_lock(lock); \
+} while (0)
+
+#define write_lock_irqsave(lock, flags) \
+do { \
+ local_irq_save(flags); \
+ preempt_disable(); \
+ _raw_write_lock(lock); \
+} while (0)
+
+#define write_lock_irq(lock) \
+do { \
+ local_irq_disable(); \
+ preempt_disable(); \
+ _raw_write_lock(lock); \
+} while (0)
+
+#define write_lock_bh(lock) \
+do { \
+ local_bh_disable(); \
+ preempt_disable(); \
+ _raw_write_lock(lock); \
+} while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+do { \
+ _raw_spin_unlock(lock); \
+ local_irq_restore(flags); \
+ preempt_enable(); \
+} while (0)
+
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+do { \
+ _raw_spin_unlock(lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define spin_unlock_irq(lock) \
+do { \
+ _raw_spin_unlock(lock); \
+ local_irq_enable(); \
+ preempt_enable(); \
+} while (0)
+
+#define spin_unlock_bh(lock) \
+do { \
+ _raw_spin_unlock(lock); \
+ preempt_enable(); \
+ local_bh_enable(); \
+} while (0)
+
+#define read_unlock_irqrestore(lock, flags) \
+do { \
+ _raw_read_unlock(lock); \
+ local_irq_restore(flags); \
+ preempt_enable(); \
+} while (0)
+
+#define read_unlock_irq(lock) \
+do { \
+ _raw_read_unlock(lock); \
+ local_irq_enable(); \
+ preempt_enable(); \
+} while (0)
+
+#define read_unlock_bh(lock) \
+do { \
+ _raw_read_unlock(lock); \
+ preempt_enable(); \
+ local_bh_enable(); \
+} while (0)
+
+#define write_unlock_irqrestore(lock, flags) \
+do { \
+ _raw_write_unlock(lock); \
+ local_irq_restore(flags); \
+ preempt_enable(); \
+} while (0)
+
+#define write_unlock_irq(lock) \
+do { \
+ _raw_write_unlock(lock); \
+ local_irq_enable(); \
+ preempt_enable(); \
+} while (0)
+
+#define write_unlock_bh(lock) \
+do { \
+ _raw_write_unlock(lock); \
+ preempt_enable(); \
+ local_bh_enable(); \
+} while (0)
+
+#define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \
+ _raw_spin_trylock(lock) ? 1 : \
+ ({preempt_enable(); local_bh_enable(); 0;});})
/* "lock on reference count zero" */
#ifndef ATOMIC_DEC_AND_LOCK
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index 7ba4e3e66e4e..dd9bc72d795e 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -33,6 +33,9 @@
#include <linux/stat.h>
#include <linux/fs.h>
+#include <linux/ufs_fs_i.h>
+#include <linux/ufs_fs_sb.h>
+
#define UFS_BBLOCK 0
#define UFS_BBSIZE 8192
#define UFS_SBLOCK 8192
@@ -398,7 +401,7 @@ struct ufs_super_block {
* Convert cylinder group to base address of its global summary info.
*/
#define fs_cs(indx) \
- u.ufs_sb.s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask]
+ s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask]
/*
* Cylinder group block for a file system.
@@ -780,7 +783,10 @@ extern struct inode_operations ufs_fast_symlink_inode_operations;
/* truncate.c */
extern void ufs_truncate (struct inode *);
-#include <linux/ufs_fs_i.h>
+static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
+{
+ return sb->u.generic_sbp;
+}
static inline struct ufs_inode_info *UFS_I(struct inode *inode)
{