diff options
| author | Christoph Hellwig <hch@infradead.org> | 2002-05-05 02:56:07 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-05-05 02:56:07 -0700 |
| commit | 1832e93496fd47439ea36259c746ff7e7182ff72 (patch) | |
| tree | 8d4d19ba9af9f671ac5003697c61f7b6c5cab9ef /fs/buffer.c | |
| parent | a0f9f9c7c577539e7b49963425aa432003ea447a (diff) | |
[PATCH] remove global_bufferlist_lock
This patch addresses Andrew's FIXME in buffer.c and adds a spinlock that
can be passed to the buffer list manipulation functions to the reiserfs
journal. This obsoletes the cheks for lock beeing NULL in buffer.c and
the global_bufferlist_lock.
In addition osync_inode_list is changed to use list_for_each_prev as in 2.4
instead of a hand-crafted loop.
A little comment to the reiserfs folks: your code would be _sooo_ much
easier to understand if you used temporary local variables for often
referenced fields..
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 22 |
1 files changed, 2 insertions, 20 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 32147290aac5..e1ea3dca4687 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -49,15 +49,6 @@ static struct bh_wait_queue_head { } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER]; /* - * Several of these buffer list functions are exported to filesystems, - * so we do funny things with the spinlocking to support those - * filesystems while still using inode->i_bufferlist_lock for - * most applications. - * FIXME: put a spinlock in the reiserfs journal and kill this lock. - */ -static spinlock_t global_bufferlist_lock = SPIN_LOCK_UNLOCKED; - -/* * Debug/devel support stuff */ @@ -448,8 +439,6 @@ out: void buffer_insert_list(spinlock_t *lock, struct buffer_head *bh, struct list_head *list) { - if (lock == NULL) - lock = &global_bufferlist_lock; spin_lock(lock); list_del(&bh->b_inode_buffers); list_add(&bh->b_inode_buffers, list); @@ -701,14 +690,10 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list) struct list_head *p; int err = 0; - if (lock == NULL) - lock = &global_bufferlist_lock; - spin_lock(lock); repeat: - for (p = list->prev; - bh = BH_ENTRY(p), p != list; - p = bh->b_inode_buffers.prev) { + list_for_each_prev(p, list) { + bh = BH_ENTRY(p); if (buffer_locked(bh)) { get_bh(bh); spin_unlock(lock); @@ -749,9 +734,6 @@ int fsync_buffers_list(spinlock_t *lock, struct list_head *list) struct list_head tmp; int err = 0, err2; - if (lock == NULL) - lock = &global_bufferlist_lock; - INIT_LIST_HEAD(&tmp); spin_lock(lock); |
