summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilliam Lee Irwin III <wli@holomorphy.com>2004-10-18 17:59:53 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-10-18 17:59:53 -0700
commitbaa896b3ded47a7f6a401267f5995dcc09d5d5d4 (patch)
treec0d13ea74dd5342ce61bc9f8dd42762707bca262
parentfd4d36bf0d54e0b020b8ffeddf7552562eab17c5 (diff)
[PATCH] consolidate bit waiting code patterns
Consolidate bit waiting code patterns for page waitqueues using __wait_on_bit() and __wait_on_bit_lock(). Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/wait.h2
-rw-r--r--kernel/wait.c37
-rw-r--r--mm/filemap.c40
3 files changed, 57 insertions, 22 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 82068628327f..d9dfd7e32e7b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -139,6 +139,8 @@ void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *k
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
+int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, void *, int, int (*)(void *), unsigned));
+int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, void *, int, int (*)(void *), unsigned));
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
diff --git a/kernel/wait.c b/kernel/wait.c
index 78256a812ca0..29057f707dbd 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -143,6 +143,43 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
}
EXPORT_SYMBOL(wake_bit_function);
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking)
+ * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
+ * permitted return codes. Nonzero return codes halt waiting and return.
+ */
+int __sched fastcall
+__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
+ void *word, int bit, int (*action)(void *), unsigned mode)
+{
+ int ret = 0;
+
+ prepare_to_wait(wq, &q->wait, mode);
+ if (test_bit(bit, word))
+ ret = (*action)(word);
+ finish_wait(wq, &q->wait);
+ return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit);
+
+int __sched fastcall
+__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
+ void *word, int bit, int (*action)(void *), unsigned mode)
+{
+ int ret = 0;
+
+ while (test_and_set_bit(bit, word)) {
+ prepare_to_wait_exclusive(wq, &q->wait, mode);
+ if (test_bit(bit, word)) {
+ if ((ret = (*action)(word)))
+ break;
+ }
+ }
+ finish_wait(wq, &q->wait);
+ return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit_lock);
+
void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
diff --git a/mm/filemap.c b/mm/filemap.c
index 6318325e24ca..14159485b57c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -131,9 +131,12 @@ void remove_from_page_cache(struct page *page)
spin_unlock_irq(&mapping->tree_lock);
}
-static inline int sync_page(struct page *page)
+static int sync_page(void *word)
{
struct address_space *mapping;
+ struct page *page;
+
+ page = container_of((page_flags_t *)word, struct page, flags);
/*
* FIXME, fercrissake. What is this barrier here for?
@@ -141,7 +144,8 @@ static inline int sync_page(struct page *page)
smp_mb();
mapping = page_mapping(page);
if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
- return mapping->a_ops->sync_page(page);
+ mapping->a_ops->sync_page(page);
+ io_schedule();
return 0;
}
@@ -367,19 +371,19 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
+static inline void wake_up_page(struct page *page, int bit)
+{
+ __wake_up_bit(page_waitqueue(page), &page->flags, bit);
+}
+
void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{
- wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
- prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (test_bit(bit_nr, &page->flags)) {
- sync_page(page);
- io_schedule();
- }
- finish_wait(waitqueue, &wait.wait);
+ if (test_bit(bit_nr, &page->flags))
+ __wait_on_bit(page_waitqueue(page), &wait, wait.key.flags,
+ bit_nr, sync_page, TASK_UNINTERRUPTIBLE);
}
-
EXPORT_SYMBOL(wait_on_page_bit);
/**
@@ -403,7 +407,7 @@ void fastcall unlock_page(struct page *page)
if (!TestClearPageLocked(page))
BUG();
smp_mb__after_clear_bit();
- __wake_up_bit(page_waitqueue(page), &page->flags, PG_locked);
+ wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
@@ -419,7 +423,7 @@ void end_page_writeback(struct page *page)
BUG();
smp_mb__after_clear_bit();
}
- __wake_up_bit(page_waitqueue(page), &page->flags, PG_writeback);
+ wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
@@ -434,19 +438,11 @@ EXPORT_SYMBOL(end_page_writeback);
*/
void fastcall __lock_page(struct page *page)
{
- wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
- while (TestSetPageLocked(page)) {
- prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
- if (PageLocked(page)) {
- sync_page(page);
- io_schedule();
- }
- }
- finish_wait(wqh, &wait.wait);
+ __wait_on_bit_lock(page_waitqueue(page), &wait, wait.key.flags,
+ PG_locked, sync_page, TASK_UNINTERRUPTIBLE);
}
-
EXPORT_SYMBOL(__lock_page);
/*