summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/block/ll_rw_blk.c17
-rw-r--r--fs/buffer.c16
-rw-r--r--include/linux/pagemap.h8
-rw-r--r--mm/filemap.c55
4 files changed, 40 insertions, 56 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index dc521fe7bcaf..a2595200d838 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1233,24 +1233,23 @@ static struct request *get_request(request_queue_t *q, int rw)
*/
static struct request *get_request_wait(request_queue_t *q, int rw)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct request_list *rl = &q->rq[rw];
struct request *rq;
spin_lock_prefetch(q->queue_lock);
generic_unplug_device(q);
- add_wait_queue_exclusive(&rl->wait, &wait);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ prepare_to_wait_exclusive(&rl->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
if (!rl->count)
schedule();
+ finish_wait(&rl->wait, &wait);
spin_lock_irq(q->queue_lock);
rq = get_request(q, rw);
spin_unlock_irq(q->queue_lock);
} while (rq == NULL);
- remove_wait_queue(&rl->wait, &wait);
- current->state = TASK_RUNNING;
return rq;
}
@@ -1460,18 +1459,16 @@ void blk_put_request(struct request *req)
*/
void blk_congestion_wait(int rw, long timeout)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct congestion_state *cs = &congestion_states[rw];
if (atomic_read(&cs->nr_congested_queues) == 0)
return;
blk_run_queues();
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&cs->wqh, &wait);
+ prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&cs->nr_congested_queues) != 0)
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&cs->wqh, &wait);
+ finish_wait(&cs->wqh, &wait);
}
/*
diff --git a/fs/buffer.c b/fs/buffer.c
index 0b9766099e3d..3b8477f2aca7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -128,22 +128,18 @@ void unlock_buffer(struct buffer_head *bh)
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_queue_head_t *wq = bh_waitq_head(bh);
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
+ wait_queue_head_t *wqh = bh_waitq_head(bh);
+ DEFINE_WAIT(wait);
get_bh(bh);
- add_wait_queue(wq, &wait);
do {
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
blk_run_queues();
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!buffer_locked(bh))
- break;
- schedule();
+ if (buffer_locked(bh))
+ schedule();
} while (buffer_locked(bh));
- tsk->state = TASK_RUNNING;
- remove_wait_queue(wq, &wait);
put_bh(bh);
+ finish_wait(wqh, &wait);
}
static inline void
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 43390b2e2ef4..bfc986131fe6 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -74,9 +74,15 @@ static inline void ___add_to_page_cache(struct page *page,
inc_page_state(nr_pagecache);
}
-extern void FASTCALL(lock_page(struct page *page));
+extern void FASTCALL(__lock_page(struct page *page));
extern void FASTCALL(unlock_page(struct page *page));
+static inline void lock_page(struct page *page)
+{
+ if (TestSetPageLocked(page))
+ __lock_page(page);
+}
+
/*
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
* Never use this directly!
diff --git a/mm/filemap.c b/mm/filemap.c
index 9118a5794f27..f45168a04974 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -632,19 +632,15 @@ static inline wait_queue_head_t *page_waitqueue(struct page *page)
void wait_on_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
+ DEFINE_WAIT(wait);
- add_wait_queue(waitqueue, &wait);
do {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!test_bit(bit_nr, &page->flags))
- break;
+ prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
sync_page(page);
- schedule();
+ if (test_bit(bit_nr, &page->flags))
+ schedule();
} while (test_bit(bit_nr, &page->flags));
- __set_task_state(tsk, TASK_RUNNING);
- remove_wait_queue(waitqueue, &wait);
+ finish_wait(waitqueue, &wait);
}
EXPORT_SYMBOL(wait_on_page_bit);
@@ -690,38 +686,27 @@ void end_page_writeback(struct page *page)
EXPORT_SYMBOL(end_page_writeback);
/*
- * Get a lock on the page, assuming we need to sleep
- * to get it..
+ * Get a lock on the page, assuming we need to sleep to get it.
+ *
+ * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
+ * random driver's requestfn sets TASK_RUNNING, we could busywait. However
+ * chances are that on the second loop, the block layer's plug list is empty,
+ * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-static void __lock_page(struct page *page)
+void __lock_page(struct page *page)
{
- wait_queue_head_t *waitqueue = page_waitqueue(page);
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
+ wait_queue_head_t *wqh = page_waitqueue(page);
+ DEFINE_WAIT(wait);
- add_wait_queue_exclusive(waitqueue, &wait);
- for (;;) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (PageLocked(page)) {
- sync_page(page);
+ while (TestSetPageLocked(page)) {
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ sync_page(page);
+ if (PageLocked(page))
schedule();
- }
- if (!TestSetPageLocked(page))
- break;
}
- __set_task_state(tsk, TASK_RUNNING);
- remove_wait_queue(waitqueue, &wait);
-}
-
-/*
- * Get an exclusive lock on the page, optimistically
- * assuming it's not locked..
- */
-void lock_page(struct page *page)
-{
- if (TestSetPageLocked(page))
- __lock_page(page);
+ finish_wait(wqh, &wait);
}
+EXPORT_SYMBOL(__lock_page);
/*
* a rather lightweight function, finding and getting a reference to a