summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2003-07-04 19:37:12 -0700
committerLinus Torvalds <torvalds@home.osdl.org>2003-07-04 19:37:12 -0700
commit80af89ca709d4dfe41178abe29217a0fefa1af12 (patch)
tree3ad4b252781fd06a26ce2108d3c2f3aa2a2c526e /include
parentf67198fbeb5ac1cf51f124791db1f8d1b7a04b85 (diff)
[PATCH] block batching fairness
From: Nick Piggin <piggin@cyberone.com.au> This patch fixes the request batching fairness/starvation issue. Its not clear what is going on with 2.4, but it seems that its a problem around this area. Anyway, previously: * request queue fills up * process 1 calls get_request, sleeps * a couple of requests are freed * process 2 calls get_request, proceeds * a couple of requests are freed * process 2 calls get_request... Now as unlikely as it seems, it could be a problem. Its a fairness problem that process 2 can skip ahead of process 1 anyway. With the patch: * request queue fills up * any process calling get_request will sleep * once the queue gets below the batch watermark, processes start being worken, and may allocate. This patch includes Chris Mason's fix to only clear queue_full when all tasks have been woken. Previously I think starvation and unfairness could still occur. With this change to the blk-fair-batches patch, Chris is showing some much improved numbers for 2.4 - 170 ms max wait vs 2700ms without blk-fair-batches for a dbench 90 run. He didn't indicate how much difference his patch alone made, but it is an important fix I think.
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4295d60bf661..d3a8f6ecd806 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -307,6 +307,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
+#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */
+#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */
#define blk_queue_plugged(q) !list_empty(&(q)->plug_list)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
@@ -322,6 +324,30 @@ struct request_queue
#define rq_data_dir(rq) ((rq)->flags & 1)
+static inline int blk_queue_full(struct request_queue *q, int rw)
+{
+ if (rw == READ)
+ return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+ return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+}
+
+static inline void blk_set_queue_full(struct request_queue *q, int rw)
+{
+ if (rw == READ)
+ set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+ else
+ set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+}
+
+static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+{
+ if (rw == READ)
+ clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+ else
+ clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+}
+
+
/*
* mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
* it already be started by driver.