diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 28 |
1 files changed, 25 insertions, 3 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4efe45d1af7e..b2059869cb92 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -19,8 +19,8 @@ struct request_queue; typedef struct request_queue request_queue_t; -struct elevator_s; -typedef struct elevator_s elevator_t; +struct elevator_queue; +typedef struct elevator_queue elevator_t; struct request_pm_state; #define BLKDEV_MIN_RQ 4 @@ -52,6 +52,20 @@ struct as_io_context { sector_t seek_mean; }; +struct cfq_queue; +struct cfq_io_context { + void (*dtor)(struct cfq_io_context *); + void (*exit)(struct cfq_io_context *); + + struct io_context *ioc; + + /* + * circular list of cfq_io_contexts belonging to a process io context + */ + struct list_head list; + struct cfq_queue *cfqq; +}; + /* * This is the per-process I/O subsystem state. It is refcounted and * kmalloc'ed. Currently all fields are modified in process io context @@ -67,7 +81,10 @@ struct io_context { unsigned long last_waited; /* Time last woken after wait for request */ int nr_batch_requests; /* Number of requests left in the batch */ + spinlock_t lock; + struct as_io_context *aic; + struct cfq_io_context *cic; }; void put_io_context(struct io_context *ioc); @@ -80,6 +97,7 @@ struct request_list { int count[2]; mempool_t *rq_pool; wait_queue_head_t wait[2]; + wait_queue_head_t drain; }; #define BLK_MAX_CDB 16 @@ -279,7 +297,7 @@ struct request_queue */ struct list_head queue_head; struct request *last_merge; - elevator_t elevator; + elevator_t *elevator; /* * the queue request freelist, one for reads and one for writes @@ -342,6 +360,7 @@ struct request_queue unsigned long nr_requests; /* Max # of requests */ unsigned int nr_congestion_on; unsigned int nr_congestion_off; + unsigned int nr_batching; unsigned short max_sectors; unsigned short max_hw_sectors; @@ -381,6 +400,7 @@ struct request_queue #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_ORDERED 8 /* supports ordered writes */ +#define QUEUE_FLAG_DRAIN 9 /* draining queue for sched switch */ #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) @@ -617,6 +637,8 @@ extern void blk_dump_rq_flags(struct request *, char *); extern void generic_unplug_device(request_queue_t *); extern void __generic_unplug_device(request_queue_t *); extern long nr_blockdev_pages(void); +extern void blk_wait_queue_drained(request_queue_t *); +extern void blk_finish_queue_drain(request_queue_t *); int blk_get_queue(request_queue_t *); request_queue_t *blk_alloc_queue(int); |
