summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h216
1 files changed, 151 insertions, 65 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cdb196ca50fb..f3e54a537508 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -6,60 +6,57 @@
#include <linux/genhd.h>
#include <linux/tqueue.h>
#include <linux/list.h>
+#include <linux/mm.h>
+
+#include <asm/scatterlist.h>
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_s;
typedef struct elevator_s elevator_t;
-/*
- * Ok, this is an expanded form so that we can use the same
- * request for paging requests.
- */
struct request {
- struct list_head queue;
+ struct list_head queuelist; /* looking for ->queue? you must _not_
+ * access it directly, use
+ * blkdev_dequeue_request! */
int elevator_sequence;
- volatile int rq_status; /* should split this into a few status bits */
-#define RQ_INACTIVE (-1)
-#define RQ_ACTIVE 1
-#define RQ_SCSI_BUSY 0xffff
-#define RQ_SCSI_DONE 0xfffe
-#define RQ_SCSI_DISCONNECTING 0xffe0
+ int inactive; /* driver hasn't seen it yet */
+ int rq_status; /* should split this into a few status bits */
kdev_t rq_dev;
int cmd; /* READ or WRITE */
int errors;
- unsigned long sector;
+ sector_t sector;
unsigned long nr_sectors;
unsigned long hard_sector, hard_nr_sectors;
- unsigned int nr_segments;
- unsigned int nr_hw_segments;
- unsigned long current_nr_sectors;
- void * special;
- char * buffer;
- struct completion * waiting;
- struct buffer_head * bh;
- struct buffer_head * bhtail;
+ unsigned short nr_segments;
+ unsigned short nr_hw_segments;
+ unsigned int current_nr_sectors;
+ unsigned int hard_cur_sectors;
+ void *special;
+ char *buffer;
+ struct completion *waiting;
+ struct bio *bio, *biotail;
request_queue_t *q;
};
#include <linux/elevator.h>
-typedef int (merge_request_fn) (request_queue_t *q,
- struct request *req,
- struct buffer_head *bh,
- int);
-typedef int (merge_requests_fn) (request_queue_t *q,
- struct request *req,
- struct request *req2,
- int);
+typedef int (merge_request_fn) (request_queue_t *, struct request *,
+ struct bio *);
+typedef int (merge_requests_fn) (request_queue_t *, struct request *,
+ struct request *);
typedef void (request_fn_proc) (request_queue_t *q);
typedef request_queue_t * (queue_proc) (kdev_t dev);
-typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
-typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
+typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef void (unplug_device_fn) (void *q);
+enum blk_queue_state {
+ Queue_down,
+ Queue_up,
+};
+
/*
* Default nr free requests per queue, ll_rw_blk will scale it down
* according to available RAM at init time
@@ -69,6 +66,7 @@ typedef void (unplug_device_fn) (void *q);
struct request_list {
unsigned int count;
struct list_head free;
+ wait_queue_head_t wait;
};
struct request_queue
@@ -89,7 +87,7 @@ struct request_queue
merge_request_fn * front_merge_fn;
merge_requests_fn * merge_requests_fn;
make_request_fn * make_request_fn;
- plug_device_fn * plug_device_fn;
+
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
@@ -97,33 +95,111 @@ struct request_queue
void * queuedata;
/*
- * This is used to remove the plug when tq_disk runs.
+ * queue needs bounce pages for pages above this limit
*/
- struct tq_struct plug_tq;
+ unsigned long bounce_pfn;
/*
- * Boolean that indicates whether this queue is plugged or not.
+ * for memory zoning (<= 4GB and > 4GB)
*/
- char plugged;
+ int bounce_gfp;
/*
- * Boolean that indicates whether current_request is active or
- * not.
+ * This is used to remove the plug when tq_disk runs.
*/
- char head_active;
+ struct tq_struct plug_tq;
/*
- * Is meant to protect the queue in the future instead of
- * io_request_lock
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ /*
+ * protects queue structures from reentrancy
*/
spinlock_t queue_lock;
/*
- * Tasks wait here for free request
+ * queue settings
*/
- wait_queue_head_t wait_for_request;
+ unsigned short max_sectors;
+ unsigned short max_segments;
+ unsigned short hardsect_size;
+ unsigned int max_segment_size;
+
+ wait_queue_head_t queue_wait;
+
+ unsigned int hash_valid_counter;
};
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+#define QUEUE_FLAG_PLUGGED 0 /* queue is plugged */
+#define QUEUE_FLAG_NOSPLIT 1 /* can process bio over several goes */
+
+#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+
+#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+
+#define blk_queue_empty(q) elv_queue_empty(q)
+
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+/*
+ * noop, requests are automagically marked as active/inactive by I/O
+ * scheduler -- see elv_next_request
+ */
+#define blk_queue_headactive(q, head_active)
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+#define __elv_next_request(q) (q)->elevator.elevator_next_req_fn((q))
+
+extern inline struct request *elv_next_request(request_queue_t *q)
+{
+ struct request *rq = __elv_next_request(q);
+
+ if (rq) {
+ rq->inactive = 0;
+ wmb();
+
+ if (rq->bio)
+ bio_hash_remove(rq->bio);
+ if (rq->biotail)
+ bio_hash_remove(rq->biotail);
+ }
+
+ return rq;
+}
+
+#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+
+#ifdef CONFIG_HIGHMEM
+
+extern void create_bounce(struct bio **bio_orig, int gfp_mask);
+
+extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+{
+ struct page *page = bio_page(*bio);
+
+ if (page - page->zone->zone_mem_map > q->bounce_pfn)
+ create_bounce(bio, q->bounce_gfp);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define blk_queue_bounce(q, bio) do { } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#define rq_for_each_bio(bio, rq) \
+ for (bio = (rq)->bio; bio; bio = bio->bi_next)
+
struct blk_dev_struct {
/*
* queue_proc has to be atomic
@@ -148,68 +224,78 @@ struct sec_size {
extern struct sec_size * blk_sec[MAX_BLKDEV];
extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
-extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
+extern void grok_partitions(kdev_t dev, long size);
+extern int wipe_partitions(kdev_t dev);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
-extern void generic_make_request(int rw, struct buffer_head * bh);
+extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *blk_get_queue(kdev_t dev);
extern void blkdev_release_request(struct request *);
+extern void blk_attempt_remerge(request_queue_t *, struct request *);
/*
* Access functions for manipulating queue properties
*/
-extern void blk_init_queue(request_queue_t *, request_fn_proc *);
+extern int blk_init_queue(request_queue_t *, request_fn_proc *, char *);
extern void blk_cleanup_queue(request_queue_t *);
-extern void blk_queue_headactive(request_queue_t *, int);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void blk_queue_bounce_limit(request_queue_t *, unsigned long long);
+extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
+extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
+extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void generic_unplug_device(void *);
extern int * blk_size[MAX_BLKDEV];
extern int * blksize_size[MAX_BLKDEV];
-extern int * hardsect_size[MAX_BLKDEV];
-
extern int * max_readahead[MAX_BLKDEV];
-extern int * max_sectors[MAX_BLKDEV];
-
-extern int * max_segments[MAX_BLKDEV];
-
#define MAX_SEGMENTS 128
#define MAX_SECTORS 255
-#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
+#define MAX_SEGMENT_SIZE 65536
/* read-ahead in pages.. */
#define MAX_READAHEAD 31
#define MIN_READAHEAD 3
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
-#define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
-#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
extern void drive_stat_acct (kdev_t dev, int rw,
unsigned long nr_sectors, int new_io);
-static inline int get_hardsect_size(kdev_t dev)
+extern inline void blk_clear(int major)
{
+ blk_size[major] = NULL;
+#if 0
+ blk_size_in_bytes[major] = NULL;
+#endif
+ blksize_size[major] = NULL;
+ max_readahead[major] = NULL;
+ read_ahead[major] = 0;
+}
+
+extern inline int get_hardsect_size(kdev_t dev)
+{
+ request_queue_t *q = blk_get_queue(dev);
int retval = 512;
- int major = MAJOR(dev);
- if (hardsect_size[major]) {
- int minor = MINOR(dev);
- if (hardsect_size[major][minor])
- retval = hardsect_size[major][minor];
- }
+ if (q && q->hardsect_size)
+ retval = q->hardsect_size;
+
return retval;
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
-static inline unsigned int blksize_bits(unsigned int size)
+extern inline unsigned int blksize_bits(unsigned int size)
{
unsigned int bits = 8;
do {
@@ -219,7 +305,7 @@ static inline unsigned int blksize_bits(unsigned int size)
return bits;
}
-static inline unsigned int block_size(kdev_t dev)
+extern inline unsigned int block_size(kdev_t dev)
{
int retval = BLOCK_SIZE;
int major = MAJOR(dev);