summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@zip.com.au>2002-04-29 23:53:20 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-04-29 23:53:20 -0700
commit7d513234c45c6428985e77a5fd6d1382b8fa205b (patch)
treec53addf52a1f3c8207006c2ea433c271a6a262a9 /fs/buffer.c
parentdf6867ef84a7d9ee4d44f451751a3f4128de10f9 (diff)
[PATCH] remove i_dirty_data_buffers
Removes inode.i_dirty_data_buffers. It's no longer used - all dirty buffers have their pages marked dirty and filemap_fdatasync() / filemap_fdatawait() catches it all. Updates all callers. This required a change in JFS - it has "metapages" which are a container around a page which holds metadata. They were holding these pages locked and were relying on fsync_inode_data_buffers for writing them out. So fdatasync() deadlocked. I've changed JFS to not lock those pages. Change was acked by Dave Kleikamp <shaggy@austin.ibm.com> as the right thing to do, but may not be complete. Probably igrab() against ->host is needed to pin the address_space down.
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 88bb9b9f297d..fc575fd54f01 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -428,8 +428,7 @@ int inode_has_buffers(struct inode *inode)
int ret;
spin_lock(&inode->i_bufferlist_lock);
- ret = !list_empty(&inode->i_dirty_buffers) ||
- !list_empty(&inode->i_dirty_data_buffers);
+ ret = !list_empty(&inode->i_dirty_buffers);
spin_unlock(&inode->i_bufferlist_lock);
return ret;
@@ -694,9 +693,6 @@ void invalidate_inode_buffers(struct inode *inode)
while ((entry = inode->i_dirty_buffers.next) !=
&inode->i_dirty_buffers)
__remove_inode_queue(BH_ENTRY(entry));
- while ((entry = inode->i_dirty_data_buffers.next) !=
- &inode->i_dirty_data_buffers)
- __remove_inode_queue(BH_ENTRY(entry));
spin_unlock(&inode->i_bufferlist_lock);
}
@@ -954,10 +950,6 @@ __getblk(struct block_device *bdev, sector_t block, int size)
* block_read_full_page() against that page will discover all the uptodate
* buffers, will set the page uptodate and will perform no I/O.
*/
-static inline void __mark_dirty(struct buffer_head *bh)
-{
- __set_page_dirty_nobuffers(bh->b_page);
-}
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
@@ -973,7 +965,7 @@ static inline void __mark_dirty(struct buffer_head *bh)
void mark_buffer_dirty(struct buffer_head *bh)
{
if (!atomic_set_buffer_dirty(bh))
- __mark_dirty(bh);
+ __set_page_dirty_nobuffers(bh->b_page);
}
/*
@@ -1498,10 +1490,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
partial = 1;
} else {
mark_buffer_uptodate(bh, 1);
- if (!atomic_set_buffer_dirty(bh)) {
- __mark_dirty(bh);
- buffer_insert_inode_data_queue(bh, inode);
- }
+ mark_buffer_dirty(bh);
}
}