summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c42
-rw-r--r--fs/ext2/balloc.c12
-rw-r--r--fs/ext2/ialloc.c12
-rw-r--r--fs/ext2/inode.c9
-rw-r--r--fs/ext2/super.c3
-rw-r--r--fs/ext2/xattr.c9
-rw-r--r--fs/ext3/inode.c5
-rw-r--r--fs/ext3/super.c8
-rw-r--r--fs/jbd/commit.c3
-rw-r--r--fs/jbd/journal.c9
-rw-r--r--fs/jbd/recovery.c16
-rw-r--r--fs/jbd/transaction.c147
-rw-r--r--fs/jfs/jfs_imap.c3
-rw-r--r--fs/jfs/jfs_mount.c3
-rw-r--r--fs/jfs/namei.c6
-rw-r--r--fs/jfs/resize.c9
-rw-r--r--fs/minix/inode.c3
-rw-r--r--fs/ncpfs/sock.c4
-rw-r--r--fs/ntfs/super.c3
-rw-r--r--fs/qnx4/inode.c3
-rw-r--r--fs/reiserfs/journal.c6
-rw-r--r--fs/reiserfs/resize.c3
-rw-r--r--fs/sysv/inode.c3
-rw-r--r--fs/sysv/itree.c6
-rw-r--r--fs/udf/inode.c3
-rw-r--r--fs/ufs/balloc.c16
-rw-r--r--fs/ufs/dir.c18
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c12
-rw-r--r--fs/ufs/truncate.c3
30 files changed, 110 insertions, 271 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index bf6ae714c730..140aad55b292 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -31,7 +31,6 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/writeback.h>
-#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
@@ -2619,6 +2618,24 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
}
/*
+ * For a data-integrity writeout, we need to wait upon any in-progress I/O
+ * and then start new I/O and then wait upon it.
+ */
+void sync_dirty_buffer(struct buffer_head *bh)
+{
+ WARN_ON(atomic_read(&bh->b_count) < 1);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
+ get_bh(bh);
+ bh->b_end_io = end_buffer_io_sync;
+ submit_bh(WRITE, bh);
+ wait_on_buffer(bh);
+ } else {
+ unlock_buffer(bh);
+ }
+}
+
+/*
* Sanity checks for try_to_free_buffers.
*/
static void check_ttfb_buffer(struct page *page, struct buffer_head *bh)
@@ -2773,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data)
* Buffer-head allocation
*/
static kmem_cache_t *bh_cachep;
-static mempool_t *bh_mempool;
/*
* Once the number of bh's in the machine exceeds this level, we start
@@ -2807,7 +2823,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(void)
{
- struct buffer_head *ret = mempool_alloc(bh_mempool, GFP_NOFS);
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, GFP_NOFS);
if (ret) {
preempt_disable();
__get_cpu_var(bh_accounting).nr++;
@@ -2821,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
void free_buffer_head(struct buffer_head *bh)
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
- mempool_free(bh, bh_mempool);
+ kmem_cache_free(bh_cachep, bh);
preempt_disable();
__get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
@@ -2829,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
-static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void
+init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
@@ -2840,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla
}
}
-static void *bh_mempool_alloc(int gfp_mask, void *pool_data)
-{
- return kmem_cache_alloc(bh_cachep, gfp_mask);
-}
-
-static void bh_mempool_free(void *element, void *pool_data)
-{
- return kmem_cache_free(bh_cachep, element);
-}
-
-#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
-#define MAX_UNUSED_BUFFERS NR_RESERVED+20
-
static void buffer_init_cpu(int cpu)
{
struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
@@ -2889,8 +2893,6 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
0, init_buffer_head, NULL);
- bh_mempool = mempool_create(MAX_UNUSED_BUFFERS, bh_mempool_alloc,
- bh_mempool_free, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 02c62039d8da..e5bee153791a 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -233,10 +233,8 @@ do_more:
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
group_release_blocks(desc, bh2, group_freed);
freed += group_freed;
@@ -466,10 +464,8 @@ got_block:
write_unlock(&EXT2_I(inode)->i_meta_lock);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
ext2_debug ("allocating block %d. ", block);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index bb02b848b77f..aaa58ce59962 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -146,10 +146,8 @@ void ext2_free_inode (struct inode * inode)
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
sb->s_dirt = 1;
error_return:
brelse(bitmap_bh);
@@ -485,10 +483,8 @@ repeat:
ext2_set_bit(i, bitmap_bh->b_data);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS) {
- ll_rw_block(WRITE, 1, &bitmap_bh);
- wait_on_buffer(bitmap_bh);
- }
+ if (sb->s_flags & MS_SYNCHRONOUS)
+ sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 65e99034fcb6..e47f84e305cd 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -443,10 +443,8 @@ static int ext2_alloc_branch(struct inode *inode,
* But we now rely upon generic_osync_inode()
* and b_inode_buffers. But not for directories.
*/
- if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
+ sync_dirty_buffer(bh);
parent = nr;
}
if (n == num)
@@ -1208,8 +1206,7 @@ static int ext2_update_inode(struct inode * inode, int do_sync)
raw_inode->i_block[n] = ei->i_data[n];
mark_buffer_dirty(bh);
if (do_sync) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing ext2 inode [%s:%08lx]\n",
sb->s_id, (unsigned long) ino);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 2b0faad8aff4..c608b6f29909 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -842,8 +842,7 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
{
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
- ll_rw_block(WRITE, 1, &EXT2_SB(sb)->s_sbh);
- wait_on_buffer(EXT2_SB(sb)->s_sbh);
+ sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 5a4592b0b7b6..6edc79353c67 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -774,8 +774,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
- ll_rw_block(WRITE, 1, &new_bh);
- wait_on_buffer(new_bh);
+ sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
@@ -865,10 +864,8 @@ ext2_xattr_delete_inode(struct inode *inode)
HDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(HDR(bh)->h_refcount) - 1);
mark_buffer_dirty(bh);
- if (IS_SYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
DQUOT_FREE_BLOCK(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 24897acf33da..ca17eb33b07d 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1317,10 +1317,7 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc)
goto out_fail;
needed = ext3_writepage_trans_blocks(inode);
- if (wbc->for_reclaim)
- handle = ext3_journal_try_start(inode, needed);
- else
- handle = ext3_journal_start(inode, needed);
+ handle = ext3_journal_start(inode, needed);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3aeb04a1159c..765ec7d043f7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1343,9 +1343,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* superblock lock.
*/
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
- unlock_super(sb); /* akpm: sigh */
ext3_orphan_cleanup(sb, es);
- lock_super(sb);
EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
if (needs_recovery)
printk (KERN_INFO "EXT3-fs: recovery complete.\n");
@@ -1627,10 +1625,8 @@ static void ext3_commit_super (struct super_block * sb,
es->s_wtime = cpu_to_le32(get_seconds());
BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "marking dirty");
mark_buffer_dirty(EXT3_SB(sb)->s_sbh);
- if (sync) {
- ll_rw_block(WRITE, 1, &EXT3_SB(sb)->s_sbh);
- wait_on_buffer(EXT3_SB(sb)->s_sbh);
- }
+ if (sync)
+ sync_dirty_buffer(EXT3_SB(sb)->s_sbh);
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 12d4a744f07f..b1bb64d1b23f 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -562,8 +562,7 @@ start_journal_io:
{
struct buffer_head *bh = jh2bh(descriptor);
set_buffer_uptodate(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
__brelse(bh); /* One for getblk() */
journal_unlock_journal_head(descriptor);
}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index a106e23956f7..3a8be07f8c7e 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -38,7 +38,6 @@
#include <linux/proc_fs.h>
EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_try_start);
EXPORT_SYMBOL(journal_restart);
EXPORT_SYMBOL(journal_extend);
EXPORT_SYMBOL(journal_stop);
@@ -960,9 +959,10 @@ void journal_update_superblock(journal_t *journal, int wait)
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
if (wait)
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
+ else
+ ll_rw_block(WRITE, 1, &bh);
/* If we have just flushed the log (by marking s_start==0), then
* any future commit will have to be careful to update the
@@ -1296,8 +1296,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
bh = journal->j_sb_buffer;
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
return 0;
}
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index f82d7f3cc507..d9afa22f5de2 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -212,16 +212,14 @@ do { \
*
* The primary function for recovering the log contents when mounting a
* journaled device.
- */
-int journal_recover(journal_t *journal)
-{
-/*
+ *
* Recovery is done in three passes. In the first pass, we look for the
* end of the log. In the second, we assemble the list of revoke
* blocks. In the third and final pass, we replay any un-revoked blocks
* in the log.
*/
-
+int journal_recover(journal_t *journal)
+{
int err;
journal_superblock_t * sb;
@@ -273,15 +271,13 @@ int journal_recover(journal_t *journal)
* journal structures in memory to ignore it (presumably because the
* caller has evidence that it is out of date).
* This function does'nt appear to be exorted..
- */
-int journal_skip_recovery(journal_t *journal)
-{
-/*
+ *
* We perform one pass over the journal to allow us to tell the user how
* much recovery information is being erased, and to let us initialise
* the journal transaction sequence numbers to the next unused ID.
*/
-
+int journal_skip_recovery(journal_t *journal)
+{
int err;
journal_superblock_t * sb;
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 14ca5228e9d6..06d27895de7d 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -266,113 +266,6 @@ handle_t *journal_start(journal_t *journal, int nblocks)
return handle;
}
-/*
- * Return zero on success
- */
-static int try_start_this_handle(journal_t *journal, handle_t *handle)
-{
- transaction_t *transaction;
- int needed;
- int nblocks = handle->h_buffer_credits;
- int ret = 0;
-
- jbd_debug(3, "New handle %p maybe going live.\n", handle);
-
- lock_journal(journal);
-
- if (is_journal_aborted(journal) ||
- (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
- ret = -EROFS;
- goto fail_unlock;
- }
-
- if (journal->j_barrier_count)
- goto fail_unlock;
-
- if (!journal->j_running_transaction && get_transaction(journal, 1) == 0)
- goto fail_unlock;
-
- transaction = journal->j_running_transaction;
- if (transaction->t_state == T_LOCKED)
- goto fail_unlock;
-
- needed = transaction->t_outstanding_credits + nblocks;
- /* We could run log_start_commit here */
- if (needed > journal->j_max_transaction_buffers)
- goto fail_unlock;
-
- needed = journal->j_max_transaction_buffers;
- if (journal->j_committing_transaction)
- needed += journal->j_committing_transaction->
- t_outstanding_credits;
-
- if (log_space_left(journal) < needed)
- goto fail_unlock;
-
- handle->h_transaction = transaction;
- transaction->t_outstanding_credits += nblocks;
- transaction->t_updates++;
- jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
- handle, nblocks, transaction->t_outstanding_credits,
- log_space_left(journal));
- unlock_journal(journal);
- return 0;
-
-fail_unlock:
- unlock_journal(journal);
- if (ret >= 0)
- ret = -1;
- return ret;
-}
-
-/**
- * handle_t *journal_try_start() - Don't block, but try and get a handle
- * @journal: Journal to start transaction on.
- * @nblocks: number of block buffer we might modify
- *
- * Try to start a handle, but non-blockingly. If we weren't able
- * to, return an ERR_PTR value.
- */
-handle_t *journal_try_start(journal_t *journal, int nblocks)
-{
- handle_t *handle = journal_current_handle();
- int err;
-
- if (!journal)
- return ERR_PTR(-EROFS);
-
- if (handle) {
- jbd_debug(4, "h_ref %d -> %d\n",
- handle->h_ref,
- handle->h_ref + 1);
- J_ASSERT(handle->h_transaction->t_journal == journal);
- if (is_handle_aborted(handle))
- return ERR_PTR(-EIO);
- handle->h_ref++;
- return handle;
- } else {
- jbd_debug(4, "no current transaction\n");
- }
-
- if (is_journal_aborted(journal))
- return ERR_PTR(-EIO);
-
- handle = new_handle(nblocks);
- if (!handle)
- return ERR_PTR(-ENOMEM);
-
- current->journal_info = handle;
-
- err = try_start_this_handle(journal, handle);
- if (err < 0) {
- kfree(handle);
- current->journal_info = NULL;
- return ERR_PTR(err);
- }
-
- return handle;
-}
-
/**
* int journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
@@ -969,22 +862,23 @@ out:
}
/**
- * int journal_dirty_data() - mark a buffer as containing dirty data which needs to be flushed before we can commit the current transaction.
+ * int journal_dirty_data() - mark a buffer as containing dirty data which
+ * needs to be flushed before we can commit the
+ * current transaction.
* @handle: transaction
* @bh: bufferhead to mark
*
* The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction.
*
- * Returns error number or 0 on success.
- */
-int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
-{
-/*
+ * Returns error number or 0 on success.
+ *
* journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd. So it cannot block. Happily, there's nothing here
* which needs lock_journal if `async' is set.
*/
+int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
+{
journal_t *journal = handle->h_transaction->t_journal;
int need_brelse = 0;
struct journal_head *jh;
@@ -1079,8 +973,7 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
need_brelse = 1;
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
spin_lock(&journal_datalist_lock);
/* The buffer may become locked again at any
time if it is redirtied */
@@ -1130,23 +1023,22 @@ no_journal:
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
- * mark dirty metadata which needs to be journaled as part of the current transaction.
+ * mark dirty metadata which needs to be journaled as part of the current
+ * transaction.
*
* The buffer is placed on the transaction's metadata list and is marked
* as belonging to the transaction.
*
* Returns error number or 0 on success.
- */
-int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh)
-{
-/*
+ *
* Special care needs to be taken if the buffer already belongs to the
* current committing transaction (in which case we should have frozen
* data present for that commit). In that case, we don't relink the
* buffer: that only gets done when the old transaction finally
* completes its commit.
- *
*/
+int journal_dirty_metadata (handle_t *handle, struct buffer_head *bh)
+{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
struct journal_head *jh = bh2jh(bh);
@@ -1361,8 +1253,7 @@ void journal_sync_buffer(struct buffer_head *bh)
}
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
__brelse(bh);
goto out;
}
@@ -1728,13 +1619,6 @@ out:
* to be called. We do this if the page is releasable by try_to_free_buffers().
* We also do it if the page has locked or dirty buffers and the caller wants
* us to perform sync or async writeout.
- */
-int journal_try_to_free_buffers(journal_t *journal,
- struct page *page, int unused_gfp_mask)
-{
-/*
- * journal_try_to_free_buffers(). Try to remove all this page's buffers
- * from the journal.
*
* This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or
@@ -1754,6 +1638,9 @@ int journal_try_to_free_buffers(journal_t *journal,
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*/
+int journal_try_to_free_buffers(journal_t *journal,
+ struct page *page, int unused_gfp_mask)
+{
struct buffer_head *head;
struct buffer_head *bh;
int ret = 0;
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 598ee5e5fa2f..8a243c1f55b9 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -2980,8 +2980,7 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
j_sb->s_flag |= JFS_BAD_SAIT;
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
return;
}
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index c477cdb3ff82..3f2f6ac71f97 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -449,8 +449,7 @@ int updateSuper(struct super_block *sb, uint state)
}
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
return 0;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 736fd5dc6c04..dd5286bf74b1 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -972,10 +972,8 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name)
#if 0
set_buffer_uptodate(bp);
mark_buffer_dirty(bp, 1);
- if (IS_SYNC(dip)) {
- ll_rw_block(WRITE, 1, &bp);
- wait_on_buffer(bp);
- }
+ if (IS_SYNC(dip))
+ sync_dirty_buffer(bp);
brelse(bp);
#endif /* 0 */
ssize -= copy_size;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 07cde7e7cad8..9da18ef683fd 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -243,8 +243,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
/* synchronously update superblock */
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
/*
@@ -512,15 +511,13 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh2);
- wait_on_buffer(bh2);
+ sync_dirty_buffer(bh2);
brelse(bh2);
}
/* write primary superblock */
mark_buffer_dirty(bh);
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
brelse(bh);
goto resume;
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index a58cf733da35..ec5c8bffed08 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -517,8 +517,7 @@ int minix_sync_inode(struct inode * inode)
bh = minix_update_inode(inode);
if (bh && buffer_dirty(bh))
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing minix inode [%s:%08lx]\n",
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index f01c538eb282..a29294f3987c 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -757,9 +757,9 @@ static int ncp_do_request(struct ncp_server *server, int size,
What if we've blocked it ourselves? What about
alarms? Why, in fact, are we mucking with the
sigmask at all? -- r~ */
- if (current->sig->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
+ if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
mask |= sigmask(SIGINT);
- if (current->sig->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
+ if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
mask |= sigmask(SIGQUIT);
}
siginitsetinv(&current->blocked, mask);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 55a092114ecd..5361198e1b80 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -505,8 +505,7 @@ hotfix_primary_boot_sector:
memcpy(bh_primary->b_data, bh_backup->b_data,
sb->s_blocksize);
mark_buffer_dirty(bh_primary);
- ll_rw_block(WRITE, 1, &bh_primary);
- wait_on_buffer(bh_primary);
+ sync_dirty_buffer(bh_primary);
if (buffer_uptodate(bh_primary)) {
brelse(bh_backup);
return bh_primary;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 9da0bfe9a348..caac9561ab80 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -44,8 +44,7 @@ int qnx4_sync_inode(struct inode *inode)
bh = qnx4_update_inode(inode);
if (bh && buffer_dirty(bh))
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing qnx4 inode [%s:%08lx]\n",
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ea0ae71a89f2..045247937245 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -735,8 +735,7 @@ reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_l
}
mark_buffer_dirty(jl->j_commit_bh) ;
- ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
- wait_on_buffer(jl->j_commit_bh) ;
+ sync_dirty_buffer(jl->j_commit_bh) ;
if (!buffer_uptodate(jl->j_commit_bh)) {
reiserfs_panic(s, "journal-615: buffer write failed\n") ;
}
@@ -828,8 +827,7 @@ static int _update_journal_header_block(struct super_block *p_s_sb, unsigned lon
jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
- ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
- wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
+ sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
printk( "reiserfs: journal-837: IO error during journal replay\n" );
return -EIO ;
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 7651be142ebf..e5c6bc5aaf36 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -120,8 +120,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
mark_buffer_dirty(bitmap[i].bh) ;
set_buffer_uptodate(bitmap[i].bh);
- ll_rw_block(WRITE, 1, &bitmap[i].bh);
- wait_on_buffer(bitmap[i].bh);
+ sync_dirty_buffer(bitmap[i].bh);
// update bitmap_info stuff
bitmap[i].first_zero_hint=1;
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 23bc0fc34ec6..0a8c81d23b71 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -265,8 +265,7 @@ int sysv_sync_inode(struct inode * inode)
bh = sysv_update_inode(inode);
if (bh && buffer_dirty(bh)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing sysv inode [%s:%08lx]\n",
inode->i_sb->s_id, inode->i_ino);
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index a1c0b6361351..60b40506748d 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -15,10 +15,8 @@ enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */
static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
{
mark_buffer_dirty_inode(bh, inode);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
}
static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 19a6e06cd46e..2fa65ab370d7 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1520,8 +1520,7 @@ udf_update_inode(struct inode *inode, int do_sync)
mark_buffer_dirty(bh);
if (do_sync)
{
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
+ sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk("IO error syncing udf inode [%s:%08lx]\n",
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index f297d5365744..4209eb7f238d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -114,6 +114,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
@@ -199,6 +200,7 @@ do_more:
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
@@ -228,10 +230,8 @@ failed:
memset (bh->b_data, 0, sb->s_blocksize); \
set_buffer_uptodate(bh); \
mark_buffer_dirty (bh); \
- if (IS_SYNC(inode)) { \
- ll_rw_block (WRITE, 1, &bh); \
- wait_on_buffer (bh); \
- } \
+ if (IS_SYNC(inode)) \
+ sync_dirty_buffer(bh); \
brelse (bh); \
}
@@ -364,10 +364,8 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
clear_buffer_dirty(bh);
bh->b_blocknr = result + i;
mark_buffer_dirty (bh);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
brelse (bh);
}
else
@@ -459,6 +457,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
@@ -584,6 +583,7 @@ succed:
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 4f87116cfcad..55e81f6b5b83 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -356,10 +356,8 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
dir->i_version++;
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_DIRSYNC(dir))
+ sync_dirty_buffer(bh);
brelse (bh);
}
@@ -457,10 +455,8 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_DIRSYNC(dir))
+ sync_dirty_buffer(bh);
brelse (bh);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
dir->i_version++;
@@ -508,10 +504,8 @@ int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
mark_buffer_dirty(bh);
- if (IS_DIRSYNC(inode)) {
- ll_rw_block(WRITE, 1, &bh);
- wait_on_buffer(bh);
- }
+ if (IS_DIRSYNC(inode))
+ sync_dirty_buffer(bh);
brelse(bh);
UFSD(("EXIT\n"))
return 0;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 33080eda3f86..767cb8fbbebf 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -124,6 +124,7 @@ void ufs_free_inode (struct inode * inode)
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
@@ -248,6 +249,7 @@ cg_found:
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
+ ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 615f61a0b88d..442c28e17739 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -298,10 +298,8 @@ repeat:
}
mark_buffer_dirty(bh);
- if (IS_SYNC(inode)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (IS_SYNC(inode))
+ sync_dirty_buffer(bh);
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
out:
@@ -635,10 +633,8 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
memset (ufs_inode, 0, sizeof(struct ufs_inode));
mark_buffer_dirty(bh);
- if (do_sync) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
+ if (do_sync)
+ sync_dirty_buffer(bh);
brelse (bh);
UFSD(("EXIT\n"))
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 636bdbdbf3ce..04e50f696202 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -284,6 +284,7 @@ next:;
}
}
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
+ ubh_wait_on_buffer (ind_ubh);
ubh_ll_rw_block (WRITE, 1, &ind_ubh);
ubh_wait_on_buffer (ind_ubh);
}
@@ -351,6 +352,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
}
}
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
+ ubh_wait_on_buffer (dind_bh);
ubh_ll_rw_block (WRITE, 1, &dind_bh);
ubh_wait_on_buffer (dind_bh);
}
@@ -415,6 +417,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
}
}
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
+ ubh_wait_on_buffer (tind_bh);
ubh_ll_rw_block (WRITE, 1, &tind_bh);
ubh_wait_on_buffer (tind_bh);
}