summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2002-10-27 17:15:58 -0800
committerJens Axboe <axboe@suse.de>2002-10-27 17:15:58 -0800
commitad519c6902fb66990ad206cdf2c38da259f0dee9 (patch)
tree13626848a77431f88f66722ddbc3a7da8b5711e4
parent0717c0a96379ba634411b99a806587acad9f9fee (diff)
[PATCH] queue dma alignment
Make it possible for a device to specify the dma alignment restrictions it has. This will be used by future infrastructure when mapping in user pages, and allows us to dma to on ATAPI even though user address and length is not sector size aligned.
-rw-r--r--drivers/block/ll_rw_blk.c17
-rw-r--r--include/linux/blkdev.h17
2 files changed, 34 insertions, 0 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index e4db6ecf4faf..45d1b8ee1ce6 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.state = 0;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
+ blk_queue_dma_alignment(q, 511);
/*
* by default assume old behaviour and bounce for any highmem page
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q->seg_boundary_mask = mask;
}
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q: the request queue for the device
+ * @dma_mask: alignment mask
+ *
+ * description:
+ * set required memory and length aligment for direct dma transactions.
+ * this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(request_queue_t *q, int mask)
+{
+ q->dma_alignment = mask;
+}
+
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
@@ -2124,6 +2140,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
+EXPORT_SYMBOL(blk_queue_dma_alignment);
EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7ac2b3bb88e5..cd9b016a8993 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -215,6 +215,7 @@ struct request_queue
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
+ unsigned int dma_alignment;
wait_queue_head_t queue_wait;
@@ -346,6 +347,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
@@ -392,6 +394,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev));
}
+static inline int queue_dma_alignment(request_queue_t *q)
+{
+ int retval = 511;
+
+ if (q && q->dma_alignment)
+ retval = q->dma_alignment;
+
+ return retval;
+}
+
+static inline int bdev_dma_aligment(struct block_device *bdev)
+{
+ return queue_dma_alignment(bdev_get_queue(bdev));
+}
+
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)