summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/mpage.c2
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/vmscan.c1
7 files changed, 10 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3fc9e47c5a0a..bf6ae714c730 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -127,9 +127,10 @@ void __wait_on_buffer(struct buffer_head * bh)
get_bh(bh);
do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
- blk_run_queues();
- if (buffer_locked(bh))
+ if (buffer_locked(bh)) {
+ blk_run_queues();
io_schedule();
+ }
} while (buffer_locked(bh));
put_bh(bh);
finish_wait(wqh, &wait);
@@ -959,8 +960,6 @@ no_grow:
* the reserve list is empty, we're sure there are
* async buffer heads in use.
*/
- blk_run_queues();
-
free_more_memory();
goto try_again;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index d3db0faa9abe..1814f7a9b5ce 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -334,7 +334,6 @@ writeback_inodes(struct writeback_control *wbc)
}
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
- blk_run_queues();
}
/*
diff --git a/fs/mpage.c b/fs/mpage.c
index 846ca1aca1bc..3460144c1894 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -618,7 +618,6 @@ mpage_writepages(struct address_space *mapping,
int (*writepage)(struct page *page, struct writeback_control *wbc);
if (wbc->nonblocking && bdi_write_congested(bdi)) {
- blk_run_queues();
wbc->encountered_congestion = 1;
return 0;
}
@@ -673,7 +672,6 @@ mpage_writepages(struct address_space *mapping,
if (ret || (--(wbc->nr_to_write) <= 0))
done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
- blk_run_queues();
wbc->encountered_congestion = 1;
done = 1;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 185abe3c596c..a9659f20f74a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -259,9 +259,10 @@ void wait_on_page_bit(struct page *page, int bit_nr)
do {
prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
- sync_page(page);
- if (test_bit(bit_nr, &page->flags))
+ if (test_bit(bit_nr, &page->flags)) {
+ sync_page(page);
io_schedule();
+ }
} while (test_bit(bit_nr, &page->flags));
finish_wait(waitqueue, &wait);
}
@@ -326,9 +327,10 @@ void __lock_page(struct page *page)
while (TestSetPageLocked(page)) {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
- sync_page(page);
- if (PageLocked(page))
+ if (PageLocked(page)) {
+ sync_page(page);
io_schedule();
+ }
}
finish_wait(wqh, &wait);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0710834252dc..1ca41b80a581 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -237,7 +237,6 @@ static void background_writeout(unsigned long _min_pages)
break;
}
}
- blk_run_queues();
}
/*
@@ -308,7 +307,6 @@ static void wb_kupdate(unsigned long arg)
}
nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
}
- blk_run_queues();
if (time_before(next_jif, jiffies + HZ))
next_jif = jiffies + HZ;
mod_timer(&wb_timer, next_jif);
diff --git a/mm/readahead.c b/mm/readahead.c
index 77bd1ff6c630..5e20db90cab9 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -236,10 +236,8 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
- if (ret) {
+ if (ret)
read_pages(mapping, filp, &page_pool, ret);
- blk_run_queues();
- }
BUG_ON(!list_empty(&page_pool));
out:
return ret;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fc00047b27c..4b40c5483268 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -957,7 +957,6 @@ int kswapd(void *p)
finish_wait(&pgdat->kswapd_wait, &wait);
get_page_state(&ps);
balance_pgdat(pgdat, 0, &ps);
- blk_run_queues();
}
}