diff options
| author | Martin Dalecki <dalecki@evision.ag> | 2002-07-26 01:14:19 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2002-07-26 01:14:19 -0700 |
| commit | cc1d784041442a6eb4d5b0fadf2fcb095c093e89 (patch) | |
| tree | 26d758e40f03fde6948e3312fcbd6ed727eafb12 | |
| parent | 22a22e0a4bf4f3638f7a55a6afd031d7d2aa4462 (diff) | |
[PATCH] 2.5.28 small REQ_SPECIAL abstraction
The attached patch does the following:
1. Remove blkdev_release_request(Request); it was an unnecessary wrapper
around blk_put_request(Request). Likely some leftover from pre-BIO
time...
2. Abstract out the fine __scsi_insert_special() function out from
the SCSI code.
Now that I have finally managed to kill all those IDE 'specific'
REQ_BLAH request types, we can do this final step, and it will be
used soon at least by ATA code as well. The goal is that
scsi_request_fn and do_ide_request should start to look similar
like silblings.
Its called blk_insert_request() now and even documented in code.
3. Change some stuff over from extern inline to static inline in
blkdev.h. (trivia...)
This patch doesn't change *any* functionality, so its not exposing
SCSI to any danger :-).
| -rw-r--r-- | drivers/block/DAC960.c | 2 | ||||
| -rw-r--r-- | drivers/block/ll_rw_blk.c | 52 | ||||
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 51 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 31 | ||||
| -rw-r--r-- | include/linux/nbd.h | 2 |
5 files changed, 64 insertions, 74 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index dc372aa4dbb1..59e4b53dfce5 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2884,7 +2884,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller, Command->BufferHeader = Request->bio; Command->RequestBuffer = Request->buffer; blkdev_dequeue_request(Request); - blkdev_release_request(Request); + blk_put_request(Request); DAC960_QueueReadWriteCommand(Command); return true; } diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index e73c1d823428..53f1706d2877 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -1233,9 +1233,47 @@ struct request *__blk_get_request(request_queue_t *q, int rw) return rq; } -void blk_put_request(struct request *rq) +/** + * blk_insert_request - insert a special request in to a request queue + * @q: request queue where request should be inserted + * @rq: request to be inserted + * @at_head: insert request at head or tail of queue + * @data: private data + * + * Description: + * Many block devices need to execute commands asynchronously, so they don't + * block the whole kernel from preemption during request execution. This is + * accomplished normally by inserting aritficial requests tagged as + * REQ_SPECIAL in to the corresponding request queue, and letting them be + * scheduled for actual execution by the request queue. + * + * We have the option of inserting the head or the tail of the queue. + * Typically we use the tail for new ioctls and so forth. We use the head + * of the queue for things like a QUEUE_FULL message from a device, or a + * host that is unable to accept a particular command. + */ +void blk_insert_request(request_queue_t *q, struct request *rq, + int at_head, void *data) { - blkdev_release_request(rq); + unsigned long flags; + + /* + * tell I/O scheduler that this isn't a regular read/write (ie it + * must not attempt merges on this) and that it acts as a soft + * barrier + */ + rq->flags &= REQ_QUEUED; + rq->flags |= REQ_SPECIAL | REQ_BARRIER; + + rq->special = data; + + spin_lock_irqsave(q->queue_lock, flags); + /* If command is tagged, release the tag */ + if(blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + _elv_add_request(q, rq, !at_head, 0); + q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); } /* RO fail safe mechanism */ @@ -1307,7 +1345,7 @@ static inline void add_request(request_queue_t * q, struct request * req, /* * Must be called with queue lock held and interrupts disabled */ -void blkdev_release_request(struct request *req) +void blk_put_request(struct request *req) { struct request_list *rl = req->rl; request_queue_t *q = req->q; @@ -1370,7 +1408,7 @@ static void attempt_merge(request_queue_t *q, struct request *req, req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; - blkdev_release_request(next); + blk_put_request(next); } } @@ -1568,7 +1606,7 @@ get_rq: add_request(q, req, insert_here); out: if (freereq) - blkdev_release_request(freereq); + blk_put_request(freereq); spin_unlock_irq(q->queue_lock); return 0; @@ -2003,7 +2041,7 @@ void end_that_request_last(struct request *req) if (req->waiting) complete(req->waiting); - blkdev_release_request(req); + blk_put_request(req); } #define MB(kb) ((kb) << 10) @@ -2064,7 +2102,6 @@ EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(generic_make_request); -EXPORT_SYMBOL(blkdev_release_request); EXPORT_SYMBOL(generic_unplug_device); EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_remove_plug); @@ -2088,6 +2125,7 @@ EXPORT_SYMBOL(blk_hw_contig_segment); EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(__blk_get_request); EXPORT_SYMBOL(blk_put_request); +EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_queue_prep_rq); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0bbb215e7a8d..9f516d0d204d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -51,53 +51,6 @@ */ /* - * Function: __scsi_insert_special() - * - * Purpose: worker for scsi_insert_special_*() - * - * Arguments: q - request queue where request should be inserted - * rq - request to be inserted - * data - private data - * at_head - insert request at head or tail of queue - * - * Lock status: Assumed that queue lock is not held upon entry. - * - * Returns: Nothing - */ -static void __scsi_insert_special(request_queue_t *q, struct request *rq, - void *data, int at_head) -{ - unsigned long flags; - - ASSERT_LOCK(q->queue_lock, 0); - - /* - * tell I/O scheduler that this isn't a regular read/write (ie it - * must not attempt merges on this) and that it acts as a soft - * barrier - */ - rq->flags &= REQ_QUEUED; - rq->flags |= REQ_SPECIAL | REQ_BARRIER; - - rq->special = data; - - /* - * We have the option of inserting the head or the tail of the queue. - * Typically we use the tail for new ioctls and so forth. We use the - * head of the queue for things like a QUEUE_FULL message from a - * device, or a host that is unable to accept a particular command. - */ - spin_lock_irqsave(q->queue_lock, flags); - /* If command is tagged, release the tag */ - if(blk_rq_tagged(rq)) - blk_queue_end_tag(q, rq); - _elv_add_request(q, rq, !at_head, 0); - q->request_fn(q); - spin_unlock_irqrestore(q->queue_lock, flags); -} - - -/* * Function: scsi_insert_special_cmd() * * Purpose: Insert pre-formed command into request queue. @@ -121,7 +74,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) { request_queue_t *q = &SCpnt->device->request_queue; - __scsi_insert_special(q, SCpnt->request, SCpnt, at_head); + blk_insert_request(q, SCpnt->request, at_head, SCpnt); return 0; } @@ -149,7 +102,7 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) { request_queue_t *q = &SRpnt->sr_device->request_queue; - __scsi_insert_special(q, SRpnt->sr_request, SRpnt, at_head); + blk_insert_request(q, SRpnt->sr_request, at_head, SRpnt); return 0; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 726492d4c45a..f83c52f82ab0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -281,12 +281,13 @@ extern int wipe_partitions(kdev_t dev); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void generic_make_request(struct bio *bio); extern inline request_queue_t *bdev_get_queue(struct block_device *bdev); -extern void blkdev_release_request(struct request *); +extern void blk_put_request(struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *); extern void __blk_attempt_remerge(request_queue_t *, struct request *); extern struct request *blk_get_request(request_queue_t *, int, int); extern struct request *__blk_get_request(request_queue_t *, int); extern void blk_put_request(struct request *); +extern void blk_insert_request(request_queue_t *, struct request *, int, void *); extern void blk_plug_device(request_queue_t *); extern int blk_remove_plug(request_queue_t *); extern void blk_recount_segments(request_queue_t *, struct bio *); @@ -309,20 +310,21 @@ extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_bounce_limit(request_queue_t *, u64); -extern void blk_queue_max_sectors(request_queue_t *q, unsigned short); -extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short); -extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short); -extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int); -extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short); -extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long); -extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *); -extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn); +extern void blk_queue_max_sectors(request_queue_t *, unsigned short); +extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); +extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); +extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); +extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); +extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); +extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *); +extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); extern void generic_unplug_device(void *); + /* * tag stuff */ @@ -348,15 +350,12 @@ extern int * blk_size[MAX_BLKDEV]; /* in units of 1024 bytes */ extern void drive_stat_acct(struct request *, int, int); -extern inline void blk_clear(int major) +static inline void blk_clear(int major) { blk_size[major] = NULL; -#if 0 - blk_size_in_bytes[major] = NULL; -#endif } -extern inline int queue_hardsect_size(request_queue_t *q) +static inline int queue_hardsect_size(request_queue_t *q) { int retval = 512; @@ -366,7 +365,7 @@ extern inline int queue_hardsect_size(request_queue_t *q) return retval; } -extern inline int bdev_hardsect_size(struct block_device *bdev) +static inline int bdev_hardsect_size(struct block_device *bdev) { return queue_hardsect_size(bdev_get_queue(bdev)); } @@ -375,7 +374,7 @@ extern inline int bdev_hardsect_size(struct block_device *bdev) #define blk_started_io(nsects) do { } while (0) /* assumes size > 256 */ -extern inline unsigned int blksize_bits(unsigned int size) +static inline unsigned int blksize_bits(unsigned int size) { unsigned int bits = 8; do { diff --git a/include/linux/nbd.h b/include/linux/nbd.h index eae4f5bbb65a..e2c507ba6b08 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -61,7 +61,7 @@ nbd_end_request(struct request *req) bio->bi_next = NULL; bio_endio(bio, uptodate); } - blkdev_release_request(req); + blk_put_request(req); spin_unlock_irqrestore(q->queue_lock, flags); } |
