summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@home.osdl.org>2003-12-13 05:36:30 -0800
committerLinus Torvalds <torvalds@home.osdl.org>2003-12-13 05:36:30 -0800
commite220fdf7a39b54a758f4102bdd9d0d5706aa32a7 (patch)
treeb92e82b310d996d9131089b87e31ad51f9c07a60
parent14c5d2d2e97f75e744fd7b62c8e5bd826c1402c7 (diff)
More subtle SMP bugs in prepare_to_wait()/finish_wait().
This time we have a SMP memory ordering issue in prepare_to_wait(), where we really need to make sure that subsequent tests for the event we are waiting for can not migrate up to before the wait queue has been set up.
-rw-r--r--kernel/fork.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 6fa0ce76acc5..381b80b510ba 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(remove_wait_queue);
+
+/*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+ * wake-function that tests for the wait-queue being active
+ * will be guaranteed to see waitqueue addition _or_ subsequent
+ * tests in this thread will see the wakeup having taken place.
+ *
+ * The spin_unlock() itself is semi-permeable and only protects
+ * one way (it only protects stuff inside the critical region and
+ * stops them from bleeding out - it would still allow subsequent
+ * loads to move into the the critical region).
+ */
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
- __set_current_state(state);
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
- __set_current_state(state);
wait->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue_tail(q, wait);
+ set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}