summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSuparna Bhattacharya <suparna@in.ibm.com>2004-08-23 21:22:28 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-23 21:22:28 -0700
commit63b05203af57e7de4f3bb63b8b81d43bc196d32b (patch)
tree54eda61d139706294d69eb5b98762137e97b32f2 /kernel
parent86b9159a20df3e43fe5d8c01d85d23880db1279e (diff)
[PATCH] AIO: retry infrastructure fixes and enhancements
From: Daniel McNeil <daniel@osdl.org> From: Chris Mason <mason@suse.com> AIO: retry infrastructure fixes and enhancements Reorganises, comments and fixes the AIO retry logic. Fixes and enhancements include: - Split iocb setup and execution in io_submit (also fixes io_submit error reporting) - Use aio workqueue instead of keventd for retries - Default high level retry methods - Subtle use_mm/unuse_mm fix - Code commenting - Fix aio process hang on EINVAL (Daniel McNeil) - Hold the context lock across unuse_mm - Acquire task_lock in use_mm() - Allow fops to override the retry method with their own - Elevated ref count for AIO retries (Daniel McNeil) - set_fs needed when calling use_mm - Flush workqueue on __put_ioctx (Chris Mason) - Fix io_cancel to work with retries (Chris Mason) - Read-immediate option for socket/pipe retry support Note on default high-level retry methods support ================================================ High-level retry methods allows an AIO request to be executed as a series of non-blocking iterations, where each iteration retries the remaining part of the request from where the last iteration left off, by reissuing the corresponding AIO fop routine with modified arguments representing the remaining I/O. The retries are "kicked" via the AIO waitqueue callback aio_wake_function() which replaces the default wait queue entry used for blocking waits. The high level retry infrastructure is responsible for running the iterations in the mm context (address space) of the caller, and ensures that only one retry instance is active at a given time, thus relieving the fops themselves from having to deal with potential races of that sort. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 2e36a5786e96..9af20c8efbdc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -152,7 +152,12 @@ void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int stat
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
- set_current_state(state);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
@@ -167,7 +172,12 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
- set_current_state(state);
+ /*
+ * don't alter the task state if this is just going to
+ * queue an async wait queue callback
+ */
+ if (is_sync_wait(wait))
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
@@ -965,6 +975,7 @@ static task_t *copy_process(unsigned long clone_flags,
p->start_time = get_jiffies_64();
p->security = NULL;
p->io_context = NULL;
+ p->io_wait = NULL;
p->audit_context = NULL;
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);