diff options
| author | Linus Torvalds <torvalds@home.transmeta.com> | 2002-10-27 17:54:16 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.transmeta.com> | 2002-10-27 17:54:16 -0800 |
| commit | 8baa8006a3ea056450ba7f314c79091d066c066c (patch) | |
| tree | 2f90aa6571dc4ae75cd9004b3677852ba704869f /include/linux/blkdev.h | |
| parent | 27b727c8708962c62525a58286c1d95051ae2111 (diff) | |
| parent | 97565ed3ac5944e98c95d33d30626c5d3a3b18b3 (diff) | |
Merge http://gkernel.bkbits.net/misc-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 42e81a4a0cab..de87dee16f83 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -26,6 +26,8 @@ struct request { struct list_head queuelist; /* looking for ->queue? you must _not_ * access it directly, use * blkdev_dequeue_request! */ + int ref_count; + void *elevator_private; unsigned char cmd[16]; @@ -215,6 +217,7 @@ struct request_queue unsigned int max_segment_size; unsigned long seg_boundary_mask; + unsigned int dma_alignment; wait_queue_head_t queue_wait; @@ -254,6 +257,13 @@ struct request_queue */ #define blk_queue_headactive(q, head_active) +/* + * q->prep_rq_fn return values + */ +#define BLKPREP_OK 0 /* serve it */ +#define BLKPREP_KILL 1 /* fatal error, kill */ +#define BLKPREP_DEFER 2 /* leave on queue */ + extern unsigned long blk_max_low_pfn, blk_max_pfn; /* @@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) extern int init_emergency_isa_pool(void); -void blk_queue_bounce(request_queue_t *q, struct bio **bio); +inline void blk_queue_bounce(request_queue_t *q, struct bio **bio); #define rq_for_each_bio(bio, rq) \ if ((rq->bio)) \ @@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); +extern void blk_queue_dma_alignment(request_queue_t *, int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); @@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev) return queue_hardsect_size(bdev_get_queue(bdev)); } +static inline int queue_dma_alignment(request_queue_t *q) +{ + int retval = 511; + + if (q && q->dma_alignment) + retval = q->dma_alignment; + + return retval; +} + +static inline int bdev_dma_aligment(struct block_device *bdev) +{ + return queue_dma_alignment(bdev_get_queue(bdev)); +} + #define blk_finished_io(nsects) do { } while (0) #define blk_started_io(nsects) do { } while (0) |
