diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-05-14 05:42:34 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-14 05:42:34 -0700 |
| commit | 70d1f017811daab3cdf75d69fa1e37b1a08f4bb8 (patch) | |
| tree | 066e6289ef3f7fdaf6b9a080466f8352c2e80dba /fs/buffer.c | |
| parent | 08aaf1cc54e61c4357828f7d029f9446144d0cc7 (diff) | |
[PATCH] filtered wakeups: apply to buffer_head functions
From: William Lee Irwin III <wli@holomorphy.com>
This patch implements wake-one semantics for buffer_head wakeups in a single
step. The buffer_head being waited on is passed to the waiter's wakeup
function by the waker, and the wakeup function compares that to the a pointer
stored in its on-stack structure and checking the readiness of the bit there
also. Wake-one semantics are achieved by using WQ_FLAG_EXCLUSIVE in the
codepaths waiting to acquire the bit for mutual exclusion.
Diffstat (limited to 'fs/buffer.c')
| -rw-r--r-- | fs/buffer.c | 74 |
1 files changed, 65 insertions, 9 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 033773300a5a..d791e8de08db 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -43,6 +43,26 @@ static void invalidate_bh_lrus(void); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) +struct bh_wait_queue { + struct buffer_head *bh; + wait_queue_t wait; +}; + +#define __DEFINE_BH_WAIT(name, b, f) \ + struct bh_wait_queue name = { \ + .bh = b, \ + .wait = { \ + .task = current, \ + .flags = f, \ + .func = bh_wake_function, \ + .task_list = \ + LIST_HEAD_INIT(name.wait.task_list),\ + }, \ + } +#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0) +#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \ + __DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE) + /* * Hashed waitqueue_head's for wait_on_buffer() */ @@ -74,10 +94,50 @@ void wake_up_buffer(struct buffer_head *bh) smp_mb(); if (waitqueue_active(wq)) - wake_up_all(wq); + __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh); } EXPORT_SYMBOL(wake_up_buffer); +static int bh_wake_function(wait_queue_t *wait, unsigned mode, + int sync, void *key) +{ + struct buffer_head *bh = key; + struct bh_wait_queue *wq; + + wq = container_of(wait, struct bh_wait_queue, wait); + if (wq->bh != bh || buffer_locked(bh)) + return 0; + else + return autoremove_wake_function(wait, mode, sync, key); +} + +static void sync_buffer(struct buffer_head *bh) +{ + struct block_device *bd; + + smp_mb(); + bd = bh->b_bdev; + if (bd) + blk_run_address_space(bd->bd_inode->i_mapping); +} + +void fastcall __lock_buffer(struct buffer_head *bh) +{ + wait_queue_head_t *wqh = bh_waitq_head(bh); + DEFINE_BH_WAIT_EXCLUSIVE(wait, bh); + + do { + prepare_to_wait_exclusive(wqh, &wait.wait, + TASK_UNINTERRUPTIBLE); + if (buffer_locked(bh)) { + sync_buffer(bh); + io_schedule(); + } + } while (test_set_buffer_locked(bh)); + finish_wait(wqh, &wait.wait); +} +EXPORT_SYMBOL(__lock_buffer); + void fastcall unlock_buffer(struct buffer_head *bh) { clear_buffer_locked(bh); @@ -93,20 +153,16 @@ void fastcall unlock_buffer(struct buffer_head *bh) void __wait_on_buffer(struct buffer_head * bh) { wait_queue_head_t *wqh = bh_waitq_head(bh); - DEFINE_WAIT(wait); + DEFINE_BH_WAIT(wait, bh); do { - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); if (buffer_locked(bh)) { - struct block_device *bd; - smp_mb(); - bd = bh->b_bdev; - if (bd) - blk_run_address_space(bd->bd_inode->i_mapping); + sync_buffer(bh); io_schedule(); } } while (buffer_locked(bh)); - finish_wait(wqh, &wait); + finish_wait(wqh, &wait.wait); } static void |
