summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 23:59:17 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 23:59:17 -0800
commit51f4a834d5661caada395e99fd713065f04bfc69 (patch)
tree3c409bfa49dd93aefccd9a05dcec76899c059828 /include/linux/blkdev.h
parentfe0976511d3b5cf2894da54bc451e561bd6b1482 (diff)
v2.5.0.11 -> v2.5.1
- Al Viro: floppy_eject cleanup, mount cleanups - Jens Axboe: bio updates - Ingo Molnar: mempool fixes - GOTO Masanori: Fix O_DIRECT error handling
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h33
1 files changed, 26 insertions, 7 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fad87a308171..620b149ec617 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -41,8 +41,19 @@ struct request {
* touch them
*/
unsigned long hard_nr_sectors;
- unsigned short nr_segments;
+
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+ /* Number of scatter-gather addr+len pairs after
+ * physical and DMA remapping hardware coalescing is performed.
+ * This is the number of scatter-gather entries the driver
+ * will actually have to deal with after DMA mapping is done.
+ */
unsigned short nr_hw_segments;
+
unsigned int current_nr_sectors;
unsigned int hard_cur_sectors;
void *special;
@@ -146,6 +157,7 @@ struct request_queue
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
+ int bounce_gfp;
/*
* This is used to remove the plug when tq_disk runs.
@@ -166,7 +178,8 @@ struct request_queue
* queue settings
*/
unsigned short max_sectors;
- unsigned short max_segments;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;
@@ -202,19 +215,22 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#ifdef CONFIG_HIGHMEM
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{
- create_bounce(q->bounce_pfn, bio);
+ create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
}
#else /* CONFIG_HIGHMEM */
#define blk_queue_bounce(q, bio) do { } while (0)
+#define init_emergency_isa_pool() do { } while (0)
#endif /* CONFIG_HIGHMEM */
@@ -257,7 +273,8 @@ extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -270,7 +287,8 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -284,7 +302,8 @@ extern int * blksize_size[MAX_BLKDEV];
extern int * max_readahead[MAX_BLKDEV];
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
#define MAX_SEGMENT_SIZE 65536