summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2004-11-03 09:11:59 +0000
committerAnton Altaparmakov <aia21@cantab.net>2004-11-03 09:11:59 +0000
commitbee6541d073232c70c0353a99df48258638d6450 (patch)
treea98806176d7bca8aae205022c1ebf6adb1567001
parentc373aaf00a9bff91bfe7a4c3a74a77e7fd77bd50 (diff)
NTFS: Modify fs/ntfs/aops.c::mark_ntfs_record_dirty() so it allocates
buffers for the page if they are not present and then marks the buffers belonging to the ntfs record dirty. This causes the buffers to become busy and hence they are safe from removal until the page has been written out. Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
-rw-r--r--fs/ntfs/ChangeLog5
-rw-r--r--fs/ntfs/aops.c57
2 files changed, 44 insertions, 18 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 375cc0326f64..1e6a5ee17fd8 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -61,6 +61,11 @@ ToDo/Notes:
attribute was found. (Thanks to Domen Puncer for the bug report.)
- Add MODULE_VERSION() to fs/ntfs/super.c.
- Make several functions and variables static. (Adrian Bunk)
+ - Modify fs/ntfs/aops.c::mark_ntfs_record_dirty() so it allocates
+ buffers for the page if they are not present and then marks the
+ buffers belonging to the ntfs record dirty. This causes the buffers
+ to become busy and hence they are safe from removal until the page
+ has been written out.
2.1.21 - Fix some races and bugs, rewrite mft write code, add mft allocator.
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index ddfc01a17fe4..44af201f9b00 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -2170,29 +2170,43 @@ struct address_space_operations ntfs_mst_aops = {
* @page: page containing the ntfs record to mark dirty
* @ofs: byte offset within @page at which the ntfs record begins
*
- * If the ntfs record is the same size as the page cache page @page, set all
- * buffers in the page dirty. Otherwise, set only the buffers in which the
- * ntfs record is located dirty.
+ * Set the buffers and the page in which the ntfs record is located dirty.
*
- * Also, set the page containing the ntfs record dirty, which also marks the
- * vfs inode the ntfs record belongs to dirty (I_DIRTY_PAGES).
+ * The latter also marks the vfs inode the ntfs record belongs to dirty
+ * (I_DIRTY_PAGES only).
+ *
+ * If the page does not have buffers, we create them and set them uptodate.
+ * The page may not be locked which is why we need to handle the buffers under
+ * the mapping->private_lock. Once the buffers are marked dirty we no longer
+ * need the lock since try_to_free_buffers() does not free dirty buffers.
*/
void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
- ntfs_inode *ni;
- struct buffer_head *bh, *head;
+ struct address_space *mapping = page->mapping;
+ ntfs_inode *ni = NTFS_I(mapping->host);
+ struct buffer_head *bh, *head, *buffers_to_free = NULL;
unsigned int end, bh_size, bh_ofs;
- BUG_ON(!page);
- BUG_ON(!page_has_buffers(page));
- ni = NTFS_I(page->mapping->host);
- BUG_ON(!ni);
- if (ni->itype.index.block_size == PAGE_CACHE_SIZE) {
- __set_page_dirty_buffers(page);
- return;
- }
+ BUG_ON(!PageUptodate(page));
end = ofs + ni->itype.index.block_size;
- bh_size = ni->vol->sb->s_blocksize;
- spin_lock(&page->mapping->private_lock);
+ bh_size = 1 << VFS_I(ni)->i_blkbits;
+ spin_lock(&mapping->private_lock);
+ if (unlikely(!page_has_buffers(page))) {
+ spin_unlock(&mapping->private_lock);
+ bh = head = alloc_page_buffers(page, bh_size, 1);
+ spin_lock(&mapping->private_lock);
+ if (likely(!page_has_buffers(page))) {
+ struct buffer_head *tail;
+
+ do {
+ set_buffer_uptodate(bh);
+ tail = bh;
+ bh = bh->b_this_page;
+ } while (bh);
+ tail->b_this_page = head;
+ attach_page_buffers(page, head);
+ } else
+ buffers_to_free = bh;
+ }
bh = head = page_buffers(page);
do {
bh_ofs = bh_offset(bh);
@@ -2202,8 +2216,15 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
break;
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&mapping->private_lock);
__set_page_dirty_nobuffers(page);
+ if (unlikely(buffers_to_free)) {
+ do {
+ bh = buffers_to_free->b_this_page;
+ free_buffer_head(buffers_to_free);
+ buffers_to_free = bh;
+ } while (buffers_to_free);
+ }
}
#endif /* NTFS_RW */