diff options
| author | Jens Axboe <axboe@suse.de> | 2002-05-28 05:39:17 -0700 |
|---|---|---|
| committer | Jens Axboe <axboe@suse.de> | 2002-05-28 05:39:17 -0700 |
| commit | eba5b46c3c8002cf528a0472b70356d77438ca98 (patch) | |
| tree | c053b6a6e69830a23230d966d4f1e8f90cadc851 /include/linux/blkdev.h | |
| parent | c43626f4822b7c6183fa864b53d3b39c2180cdae (diff) | |
[PATCH] block plugging reworked
This patch provides the ability for a block driver to signal it's too
busy to receive more work and temporarily halt the request queue. In
concept it's similar to the networking netif_{start,stop}_queue helpers.
To do this cleanly, I've ripped out the old tq_disk task queue. Instead
an internal list of plugged queues is maintained which will honor the
current queue state (see QUEUE_FLAG_STOPPED bit). Execution of
request_fn has been moved to tasklet context. blk_run_queues() provides
similar functionality to the old run_task_queue(&tq_disk).
Now, this only works at the request_fn level and not at the
make_request_fn level. This is on purpose: drivers working at the
make_request_fn level are essentially providing a piece of the block
level infrastructure themselves. There are basically two reasons for
doing make_request_fn style setups:
o block remappers. start/stop functionality will be done at the target
device in this case, which is the level that will signal hardware full
(or continue) anyways.
o drivers who wish to receive single entities of "buffers" and not
merged requests etc. This could use the start/stop functionality. I'd
suggest _still_ using a request_fn for these, but set the queue
options so that no merging etc ever takes place. This has the added
bonus of providing the usual request depletion throttling at the block
level.
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bfdb755f261f..b9972fe4fc70 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/pagemap.h> #include <linux/backing-dev.h> +#include <linux/interrupt.h> #include <asm/scatterlist.h> @@ -136,6 +137,11 @@ struct blk_queue_tag { int max_depth; }; +struct blk_plug { + struct list_head list; + struct tasklet_struct task; +}; + /* * Default nr free requests per queue, ll_rw_blk will scale it down * according to available RAM at init time @@ -177,10 +183,7 @@ struct request_queue unsigned long bounce_pfn; int bounce_gfp; - /* - * This is used to remove the plug when tq_disk runs. - */ - struct tq_struct plug_tq; + struct blk_plug plug; /* * various queue flags, see QUEUE_* below @@ -217,6 +220,7 @@ struct request_queue #define QUEUE_FLAG_PLUGGED 0 /* queue is plugged */ #define QUEUE_FLAG_CLUSTER 1 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 2 /* uses generic tag queueing */ +#define QUEUE_FLAG_STOPPED 3 /* queue is stopped */ #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) @@ -303,6 +307,8 @@ extern void blk_recount_segments(request_queue_t *, struct bio *); extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); extern int block_ioctl(struct block_device *, unsigned int, unsigned long); +extern void blk_start_queue(request_queue_t *q); +extern void blk_stop_queue(request_queue_t *q); /* * get ready for proper ref counting |
