summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2024-04-03 09:13:09 +0200
committerIngo Molnar <mingo@kernel.org>2024-04-03 09:13:09 +0200
commit9b4e528557944dff694c8afa5a8912de81503bf2 (patch)
tree7d4ff38fbf55e1d32e3384cac0ae2fc80d4ae179 /block
parentdfbc411e0a5ea72fdd563b2c7d627e9d993d865c (diff)
parent39cd87c4eb2b893354f3b850f916353f2658ae6f (diff)
Merge tag 'v6.9-rc2' into perf/core, to pick up dependent commits
Pick up fixes that followup patches are going to depend on. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/bdev.c7
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq.c9
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk.h1
5 files changed, 11 insertions, 11 deletions
diff --git a/block/bdev.c b/block/bdev.c
index e7adaaf1c219..7a5f611c3d2e 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -583,6 +583,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock);
+
+ if (hops && hops->get_holder)
+ hops->get_holder(holder);
}
/**
@@ -605,6 +608,7 @@ EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder)
{
struct block_device *whole = bdev_whole(bdev);
+ const struct blk_holder_ops *hops = bdev->bd_holder_ops;
bool unblock = false;
/*
@@ -627,6 +631,9 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
whole->bd_holder = NULL;
mutex_unlock(&bdev_lock);
+ if (hops && hops->put_holder)
+ hops->put_holder(holder);
+
/*
* If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder.
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2a06fd33039d..4e3483a16b75 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -726,7 +726,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
* which can be mixed are set in each bio and mark @rq as mixed
* merged.
*/
-void blk_rq_set_mixed_merge(struct request *rq)
+static void blk_rq_set_mixed_merge(struct request *rq)
{
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 555ada922cf0..32afb87efbd0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -770,16 +770,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
/*
* Partial zone append completions cannot be supported as the
* BIO fragments may end up not being written sequentially.
- * For such case, force the completed nbytes to be equal to
- * the BIO size so that bio_advance() sets the BIO remaining
- * size to 0 and we end up calling bio_endio() before returning.
*/
- if (bio->bi_iter.bi_size != nbytes) {
+ if (bio->bi_iter.bi_size != nbytes)
bio->bi_status = BLK_STS_IOERR;
- nbytes = bio->bi_iter.bi_size;
- } else {
+ else
bio->bi_iter.bi_sector = rq->__sector;
- }
}
bio_advance(bio, nbytes);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3c7d8d638ab5..cdbaef159c4b 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -146,8 +146,7 @@ static int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors > max_hw_sectors ||
- lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else {
diff --git a/block/blk.h b/block/blk.h
index 5cac4e29ae17..d9f584984bc4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -339,7 +339,6 @@ int ll_back_merge_fn(struct request *req, struct bio *bio,
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
-void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);