summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/block/ll_rw_blk.c17
-rw-r--r--include/linux/blkdev.h17
2 files changed, 34 insertions, 0 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index e4db6ecf4faf..45d1b8ee1ce6 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.state = 0;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
+ blk_queue_dma_alignment(q, 511);
/*
* by default assume old behaviour and bounce for any highmem page
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q->seg_boundary_mask = mask;
}
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q: the request queue for the device
+ * @dma_mask: alignment mask
+ *
+ * description:
+ * set required memory and length aligment for direct dma transactions.
+ * this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(request_queue_t *q, int mask)
+{
+ q->dma_alignment = mask;
+}
+
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
@@ -2124,6 +2140,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
+EXPORT_SYMBOL(blk_queue_dma_alignment);
EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7ac2b3bb88e5..cd9b016a8993 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -215,6 +215,7 @@ struct request_queue
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
+ unsigned int dma_alignment;
wait_queue_head_t queue_wait;
@@ -346,6 +347,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
@@ -392,6 +394,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev));
}
+static inline int queue_dma_alignment(request_queue_t *q)
+{
+ int retval = 511;
+
+ if (q && q->dma_alignment)
+ retval = q->dma_alignment;
+
+ return retval;
+}
+
+static inline int bdev_dma_aligment(struct block_device *bdev)
+{
+ return queue_dma_alignment(bdev_get_queue(bdev));
+}
+
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)