summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2002-05-05 02:56:07 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-05-05 02:56:07 -0700
commit1832e93496fd47439ea36259c746ff7e7182ff72 (patch)
tree8d4d19ba9af9f671ac5003697c61f7b6c5cab9ef
parenta0f9f9c7c577539e7b49963425aa432003ea447a (diff)
[PATCH] remove global_bufferlist_lock
This patch addresses Andrew's FIXME in buffer.c and adds a spinlock that can be passed to the buffer list manipulation functions to the reiserfs journal. This obsoletes the cheks for lock beeing NULL in buffer.c and the global_bufferlist_lock. In addition osync_inode_list is changed to use list_for_each_prev as in 2.4 instead of a hand-crafted loop. A little comment to the reiserfs folks: your code would be _sooo_ much easier to understand if you used temporary local variables for often referenced fields..
-rw-r--r--fs/buffer.c22
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--include/linux/reiserfs_fs_sb.h1
4 files changed, 8 insertions, 23 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 32147290aac5..e1ea3dca4687 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -49,15 +49,6 @@ static struct bh_wait_queue_head {
} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
/*
- * Several of these buffer list functions are exported to filesystems,
- * so we do funny things with the spinlocking to support those
- * filesystems while still using inode->i_bufferlist_lock for
- * most applications.
- * FIXME: put a spinlock in the reiserfs journal and kill this lock.
- */
-static spinlock_t global_bufferlist_lock = SPIN_LOCK_UNLOCKED;
-
-/*
* Debug/devel support stuff
*/
@@ -448,8 +439,6 @@ out:
void buffer_insert_list(spinlock_t *lock,
struct buffer_head *bh, struct list_head *list)
{
- if (lock == NULL)
- lock = &global_bufferlist_lock;
spin_lock(lock);
list_del(&bh->b_inode_buffers);
list_add(&bh->b_inode_buffers, list);
@@ -701,14 +690,10 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
struct list_head *p;
int err = 0;
- if (lock == NULL)
- lock = &global_bufferlist_lock;
-
spin_lock(lock);
repeat:
- for (p = list->prev;
- bh = BH_ENTRY(p), p != list;
- p = bh->b_inode_buffers.prev) {
+ list_for_each_prev(p, list) {
+ bh = BH_ENTRY(p);
if (buffer_locked(bh)) {
get_bh(bh);
spin_unlock(lock);
@@ -749,9 +734,6 @@ int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
struct list_head tmp;
int err = 0, err2;
- if (lock == NULL)
- lock = &global_bufferlist_lock;
-
INIT_LIST_HEAD(&tmp);
spin_lock(lock);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 4c934f39557e..77ba9219bdfb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -105,9 +105,9 @@ inline void make_le_item_head (struct item_head * ih, const struct cpu_key * key
}
static void add_to_flushlist(struct inode *inode, struct buffer_head *bh) {
- struct list_head *list = &(SB_JOURNAL(inode->i_sb)->j_dirty_buffers) ;
+ struct reiserfs_journal *j = &(SB_JOURNAL(inode->i_sb)) ;
- buffer_insert_list(NULL, bh, list) ;
+ buffer_insert_list(j->dirty_buffers_lock, bh, j->j_dirty_buffers) ;
}
//
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index d62791ad5aed..6fccbbe01fc0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2131,6 +2131,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_dirty_buffers) ;
+ spin_lock_init(&SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock) ;
reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap,
SB_BMAP_NR(p_s_sb)) ;
allocate_bitmap_nodes(p_s_sb) ;
@@ -3125,7 +3126,8 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
/* write any buffers that must hit disk before this commit is done */
- fsync_buffers_list(NULL, &(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
+ fsync_buffers_list(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock),
+ &(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
/* honor the flush and async wishes from the caller */
if (flush) {
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 05addd1e35f8..4304ff623e72 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -241,6 +241,7 @@ struct reiserfs_journal {
int j_used_bitmap_nodes ;
struct list_head j_bitmap_nodes ;
struct list_head j_dirty_buffers ;
+ spinlock_t j_dirty_buffers_lock ; /* protects j_dirty_buffers */
struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS] ; /* array of bitmaps to record the deleted blocks */
struct reiserfs_journal_list j_journal_list[JOURNAL_LIST_COUNT] ; /* array of all the journal lists */
struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for real buffer heads in current trans */