summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bdev.c25
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--block/blk-crypto.c2
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-settings.c10
-rw-r--r--block/fops.c5
9 files changed, 46 insertions, 21 deletions
diff --git a/block/bdev.c b/block/bdev.c
index 810707cca970..b8fbb9576110 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -67,7 +67,7 @@ static void bdev_write_inode(struct block_device *bdev)
int ret;
spin_lock(&inode->i_lock);
- while (inode->i_state & I_DIRTY) {
+ while (inode_state_read(inode) & I_DIRTY) {
spin_unlock(&inode->i_lock);
ret = write_inode_now(inode, true);
if (ret)
@@ -217,9 +217,26 @@ int set_blocksize(struct file *file, int size)
EXPORT_SYMBOL(set_blocksize);
+static int sb_validate_large_blocksize(struct super_block *sb, int size)
+{
+ const char *err_str = NULL;
+
+ if (!(sb->s_type->fs_flags & FS_LBS))
+ err_str = "not supported by filesystem";
+ else if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ err_str = "is only supported with CONFIG_TRANSPARENT_HUGEPAGE";
+
+ if (!err_str)
+ return 0;
+
+ pr_warn_ratelimited("%s: block size(%d) > page size(%lu) %s\n",
+ sb->s_type->name, size, PAGE_SIZE, err_str);
+ return -EINVAL;
+}
+
int sb_set_blocksize(struct super_block *sb, int size)
{
- if (!(sb->s_type->fs_flags & FS_LBS) && size > PAGE_SIZE)
+ if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size))
return 0;
if (set_blocksize(sb->s_bdev_file, size))
return 0;
@@ -231,7 +248,7 @@ int sb_set_blocksize(struct super_block *sb, int size)
EXPORT_SYMBOL(sb_set_blocksize);
-int sb_min_blocksize(struct super_block *sb, int size)
+int __must_check sb_min_blocksize(struct super_block *sb, int size)
{
int minsize = bdev_logical_block_size(sb->s_bdev);
if (size < minsize)
@@ -1265,7 +1282,7 @@ void sync_bdevs(bool wait)
struct block_device *bdev;
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
+ if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) ||
mapping->nrpages == 0) {
spin_unlock(&inode->i_lock);
continue;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f93de34fe87d..3cffb68ba5d8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
}
/*
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
- * acquires q->elevator_lock, and ensures the correct locking order
- * between q->elevator_lock and q->rq_qos_mutex.
+ * ensures the correct locking order between freeze queue and q->rq_qos_mutex.
*
* This function returns negative error on failure. On success it returns
* memflags which must be saved and later passed to blkg_conf_exit_frozen
@@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
* At this point, we haven’t started protecting anything related to QoS,
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
- * the queue and acquiring q->elevator_lock to maintain the correct
- * locking order.
+ * the queue to maintain the correct locking order.
*/
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
- mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
return memflags;
@@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
EXPORT_SYMBOL_GPL(blkg_conf_exit);
/*
- * Similar to blkg_conf_exit, but also unfreezes the queue and releases
- * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
- * is used to open the bdev.
+ * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
+ * when blkg_conf_open_bdev_frozen is used to open the bdev.
*/
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
{
@@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
struct request_queue *q = ctx->bdev->bd_queue;
blkg_conf_exit(ctx);
- mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
}
}
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 4b1ad84d1b5a..3e7bf1974cbd 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
}
if (!bio_crypt_check_alignment(bio)) {
- bio->bi_status = BLK_STS_IOERR;
+ bio->bi_status = BLK_STS_INVAL;
goto fail;
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index d06bb137a743..e0bed16485c3 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
if (blk_mq_is_shared_tags(flags)) {
/* Shared tags are stored at index 0 in @et->tags. */
q->sched_shared_tags = et->tags[0];
- blk_mq_tag_update_sched_shared_tags(q);
+ blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
}
queue_for_each_hw_ctx(q, hctx, i) {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c7a4d4b9cc87..5b664dbdf655 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
}
-void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
+void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
+ unsigned int nr)
{
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
- q->nr_requests - q->tag_set->reserved_tags);
+ nr - q->tag_set->reserved_tags);
}
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 09f579414161..d626d32f6e57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
* tags can't grow, see blk_mq_alloc_sched_tags().
*/
if (q->elevator)
- blk_mq_tag_update_sched_shared_tags(q);
+ blk_mq_tag_update_sched_shared_tags(q, nr);
else
blk_mq_tag_resize_shared_tags(set, nr);
} else if (!q->elevator) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index af42dc018808..c4fccdeb5441 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size);
-void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
+void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
+ unsigned int nr);
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 54cffaae4df4..d74b13ec8e54 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
+ /*
+ * The PI generation / validation helpers do not expect intervals to
+ * straddle multiple bio_vecs. Enforce alignment so that those are
+ * never generated, and that each buffer is aligned as expected.
+ */
+ if (bi->csum_type) {
+ lim->dma_alignment = max(lim->dma_alignment,
+ (1U << bi->interval_exp) - 1);
+ }
+
return 0;
}
diff --git a/block/fops.c b/block/fops.c
index 5e3db9fead77..4dad9c2d5796 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -540,12 +540,13 @@ const struct address_space_operations def_blk_aops = {
#else /* CONFIG_BUFFER_HEAD */
static int blkdev_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &blkdev_iomap_ops);
+ iomap_bio_read_folio(folio, &blkdev_iomap_ops);
+ return 0;
}
static void blkdev_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &blkdev_iomap_ops);
+ iomap_bio_readahead(rac, &blkdev_iomap_ops);
}
static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,