summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2003-05-13 21:03:29 -0700
committerJens Axboe <axboe@suse.de>2003-05-13 21:03:29 -0700
commit21eca8527c4e8554a5172c9730a32392959600b7 (patch)
tree47bf748f556ac383be6a5812b0a892c3053948cd /include
parent29b2559436ac67959dcc17c0407b8df1a0922cbb (diff)
[PATCH] bio walking code
Add bio traversal functionality. This is a prereq for doing ide multiwrites safely and sanely. Patch was originally done by Suparna, Bartlomiej picked it up and changed the design somewhat. From Bart: Main idea is now reversed - instead of introducing rq->hard_bio as pointer for bio to be completed and using rq->bio as pointer for bio to be submitted, rq->cbio is introduced for submissions and rq->bio is used for completions This minimizes changes to block layer and assures that all existing block users are not affected by this patch.
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h60
2 files changed, 68 insertions, 18 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 19451364a58e..bbc69dd67b28 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -131,6 +131,7 @@ struct bio {
#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
#define bio_page(bio) bio_iovec((bio))->bv_page
#define bio_offset(bio) bio_iovec((bio))->bv_offset
+#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
@@ -226,12 +227,12 @@ extern void bio_check_pages_dirty(struct bio *bio);
#ifdef CONFIG_HIGHMEM
/*
* remember to add offset! and never ever reenable interrupts between a
- * bio_kmap_irq and bio_kunmap_irq!!
+ * bvec_kmap_irq and bvec_kunmap_irq!!
*
* This function MUST be inlined - it plays with the CPU interrupt flags.
* Hence the `extern inline'.
*/
-extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
+extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
unsigned long addr;
@@ -240,15 +241,15 @@ extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
* balancing is a lot nicer this way
*/
local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bio_page(bio), KM_BIO_SRC_IRQ);
+ addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
if (addr & ~PAGE_MASK)
BUG();
- return (char *) addr + bio_offset(bio);
+ return (char *) addr + bvec->bv_offset;
}
-extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
+extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
@@ -257,8 +258,19 @@ extern inline void bio_kunmap_irq(char *buffer, unsigned long *flags)
}
#else
-#define bio_kmap_irq(bio, flags) (bio_data(bio))
-#define bio_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
+#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
+#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
#endif
+extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+ unsigned long *flags)
+{
+ return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+}
+#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
+
+#define bio_kmap_irq(bio, flags) \
+ __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6e0fc430b428..aa38c5f33c21 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -11,6 +11,7 @@
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/mempool.h>
+#include <linux/bio.h>
#include <asm/scatterlist.h>
@@ -36,25 +37,35 @@ struct request {
* blkdev_dequeue_request! */
unsigned long flags; /* see REQ_ bits below */
- sector_t sector;
- unsigned long nr_sectors;
+ /* Maintain bio traversal state for part by part I/O submission.
+ * hard_* are block layer internals, no driver should touch them!
+ */
+
+ sector_t sector; /* next sector to submit */
+ unsigned long nr_sectors; /* no. of sectors left to submit */
+ /* no. of sectors left to submit in the current segment */
unsigned int current_nr_sectors;
+ sector_t hard_sector; /* next sector to complete */
+ unsigned long hard_nr_sectors; /* no. of sectors left to complete */
+ /* no. of sectors left to complete in the current segment */
+ unsigned int hard_cur_sectors;
+
+ /* no. of segments left to submit in the current bio */
+ unsigned short nr_cbio_segments;
+ /* no. of sectors left to submit in the current bio */
+ unsigned long nr_cbio_sectors;
+
+ struct bio *cbio; /* next bio to submit */
+ struct bio *bio; /* next unfinished bio to complete */
+ struct bio *biotail;
+
void *elevator_private;
int rq_status; /* should split this into a few status bits */
struct gendisk *rq_disk;
int errors;
unsigned long start_time;
- sector_t hard_sector; /* the hard_* are block layer
- * internals, no driver should
- * touch them
- */
- unsigned long hard_nr_sectors;
- unsigned int hard_cur_sectors;
-
- struct bio *bio;
- struct bio *biotail;
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
@@ -284,6 +295,32 @@ struct request_queue
*/
#define blk_queue_headactive(q, head_active)
+/* current index into bio being processed for submission */
+#define blk_rq_idx(rq) ((rq)->cbio->bi_vcnt - (rq)->nr_cbio_segments)
+
+/* current bio vector being processed */
+#define blk_rq_vec(rq) (bio_iovec_idx((rq)->cbio, blk_rq_idx(rq)))
+
+/* current offset with respect to start of the segment being submitted */
+#define blk_rq_offset(rq) \
+ (((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9)
+
+/*
+ * temporarily mapping a (possible) highmem bio (typically for PIO transfer)
+ */
+
+/* Assumes rq->cbio != NULL */
+static inline char * rq_map_buffer(struct request *rq, unsigned long *flags)
+{
+ return (__bio_kmap_irq(rq->cbio, blk_rq_idx(rq), flags)
+ + blk_rq_offset(rq));
+}
+
+static inline void rq_unmap_buffer(char *buffer, unsigned long *flags)
+{
+ __bio_kunmap_irq(buffer, flags);
+}
+
/*
* q->prep_rq_fn return values
*/
@@ -362,6 +399,7 @@ static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *);
+extern int process_that_request_first(struct request *, unsigned int);
extern void end_request(struct request *req, int uptodate);
static inline void blkdev_dequeue_request(struct request *req)