summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.(none)>2002-10-28 00:01:06 -0600
committerJames Bottomley <jejb@mulgrave.(none)>2002-10-28 00:01:06 -0600
commitbd24aecd5ef5b33f812baeebed78b80cf6b9bc5f (patch)
treecd8fb2f2afee4260774c25a622ee18274f989ac3 /drivers/block
parentea3bedb62dbd2ea5ffba44f347949a56909a217b (diff)
parentc5868eb784c333e89aa404ac4936a3e29f224303 (diff)
merge by hand of axboe/jejb changes
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/deadline-iosched.c6
-rw-r--r--drivers/block/elevator.c71
-rw-r--r--drivers/block/ll_rw_blk.c44
3 files changed, 92 insertions, 29 deletions
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index cdd73931df6d..c104ed722da3 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) {
next = entry->next;
+ prefetch(next);
+
drq = list_entry_hash(entry);
BUG_ON(!drq->hash_valid_count);
@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry);
+ prefetch(entry->prev);
+
BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD))
@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
+ prefetch(nxt);
+
/*
* take it off the sort and fifo list, move
* to dispatch queue
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 0b1517f93501..0b6444379796 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next);
}
-/*
- * add_request and next_request are required to be supported, naturally
- */
-void __elv_add_request(request_queue_t *q, struct request *rq,
- struct list_head *insert_here)
+void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
+ int plug)
+{
+ struct list_head *insert = &q->queue_head;
+
+ if (at_end)
+ insert = insert->prev;
+ if (plug)
+ blk_plug_device(q);
+
+ q->elevator.elevator_add_req_fn(q, rq, insert);
+}
+
+void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
+ int plug)
{
- q->elevator.elevator_add_req_fn(q, rq, insert_here);
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __elv_add_request(q, rq, at_end, plug);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
static inline struct request *__elv_next_request(request_queue_t *q)
@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q)
{
struct request *rq;
+ int ret;
while ((rq = __elv_next_request(q))) {
+ /*
+ * just mark as started even if we don't start it, a request
+ * that has been delayed should not be passed by new incoming
+ * requests
+ */
rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge)
@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
- /*
- * all ok, break and return it
- */
- if (!q->prep_rq_fn(q, rq))
+ ret = q->prep_rq_fn(q, rq);
+ if (ret == BLKPREP_OK) {
break;
-
- /*
- * prep said no-go, kill it
- */
- blkdev_dequeue_request(rq);
- if (end_that_request_first(rq, 0, rq->nr_sectors))
- BUG();
-
- end_that_request_last(rq);
+ } else if (ret == BLKPREP_DEFER) {
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+ blkdev_dequeue_request(rq);
+ rq->flags |= REQ_QUIET;
+ while (end_that_request_first(rq, 0, rq->nr_sectors))
+ ;
+ end_that_request_last(rq);
+ } else {
+ printk("%s: bad return=%d\n", __FUNCTION__, ret);
+ break;
+ }
}
return rq;
@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = &q->elevator;
+ /*
+ * the main clearing point for q->last_merge is on retrieval of
+ * request by driver (it calls elv_next_request()), but it _can_
+ * also happen here if a request is added to the queue but later
+ * deleted without ever being given to driver (merged with another
+ * request).
+ */
+ if (&rq->queuelist == q->last_merge)
+ q->last_merge = NULL;
+
if (e->elevator_remove_req_fn)
e->elevator_remove_req_fn(q, rq);
}
@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop);
+EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request);
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index c5697e05a6b8..edf890268759 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.state = 0;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
+ blk_queue_dma_alignment(q, 511);
/*
* by default assume old behaviour and bounce for any highmem page
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q->seg_boundary_mask = mask;
}
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q: the request queue for the device
+ * @dma_mask: alignment mask
+ *
+ * description:
+ * set required memory and length aligment for direct dma transactions.
+ * this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(request_queue_t *q, int mask)
+{
+ q->dma_alignment = mask;
+}
+
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
@@ -639,7 +655,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED;
- elv_add_request(q, rq, 0);
+ __elv_add_request(q, rq, 0, 0);
}
}
@@ -655,14 +671,19 @@ static char *rq_flags[] = {
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
+ "REQ_FAILED",
+ "REQ_QUIET",
"REQ_SPECIAL"
+ "REQ_DRIVE_CMD",
+ "REQ_DRIVE_TASK",
+ "REQ_DRIVE_TASKFILE",
};
void blk_dump_rq_flags(struct request *rq, char *msg)
{
int bit;
- printk("%s: dev %02x:%02x: ", msg, major(rq->rq_dev), minor(rq->rq_dev));
+ printk("%s: dev %02x:%02x: flags = ", msg, major(rq->rq_dev), minor(rq->rq_dev));
bit = 0;
do {
if (rq->flags & (1 << bit))
@@ -670,10 +691,17 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
bit++;
} while (bit < __REQ_NR_BITS);
- printk("sector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
+ printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
rq->nr_sectors,
rq->current_nr_sectors);
- printk("bio %p, biotail %p\n", rq->bio, rq->biotail);
+ printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+
+ if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
+ printk("cdb: ");
+ for (bit = 0; bit < sizeof(rq->cmd); bit++)
+ printk("%02x ", rq->cmd[bit]);
+ printk("\n");
+ }
}
void blk_recount_segments(request_queue_t *q, struct bio *bio)
@@ -1466,7 +1494,7 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/
- __elv_add_request(q, req, insert_here);
+ __elv_add_request_pos(q, req, insert_here);
}
/*
@@ -1481,11 +1509,6 @@ void blk_put_request(struct request *req)
req->q = NULL;
req->rl = NULL;
- if (q) {
- if (q->last_merge == &req->queuelist)
- q->last_merge = NULL;
- }
-
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -2112,6 +2135,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
+EXPORT_SYMBOL(blk_queue_dma_alignment);
EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);