summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-17 08:48:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-17 08:48:45 -0800
commit99dfe2d4da67d863ff8f185d1e8033cce28e4c49 (patch)
treea5afa6d4923bf972103f10c176d2c65d0f1ba9a1 /kernel
parent7b751b01ade7f666de2f5c365bd9562c2dcd7d60 (diff)
parentdfe48ea179733be948c432f6af2fc3913cf5dd28 (diff)
Merge tag 'block-7.0-20260216' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull more block updates from Jens Axboe: - Fix partial IOVA mapping cleanup in error handling - Minor prep series ignoring discard return value, as the inline value is always known - Ensure BLK_FEAT_STABLE_WRITES is set for drbd - Fix leak of folio in bio_iov_iter_bounce_read() - Allow IOC_PR_READ_* for read-only open - Another debugfs deadlock fix - A few doc updates * tag 'block-7.0-20260216' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: blk-mq: use NOIO context to prevent deadlock during debugfs creation blk-stat: convert struct blk_stat_callback to kernel-doc block: fix enum descriptions kernel-doc block: update docs for bio and bvec_iter block: change return type to void nvmet: ignore discard return value md: ignore discard return value block: fix partial IOVA mapping cleanup in blk_rq_dma_map_iova block: fix folio leak in bio_iov_iter_bounce_read() block: allow IOC_PR_READ_* ioctls with BLK_OPEN_READ drbd: always set BLK_FEAT_STABLE_WRITES
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/blktrace.c38
1 files changed, 21 insertions, 17 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f2de9cf15d0e..e6988929ead2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -559,9 +559,9 @@ int blk_trace_remove(struct request_queue *q)
{
int ret;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
ret = __blk_trace_remove(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
@@ -767,6 +767,7 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct blk_user_trace_setup2 buts2;
struct blk_user_trace_setup buts;
struct blk_trace *bt;
+ unsigned int memflags;
int ret;
ret = copy_from_user(&buts, arg, sizeof(buts));
@@ -785,16 +786,16 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
.pid = buts.pid,
};
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts.buf_size, buts.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
strscpy(buts.name, buts2.name, BLKTRACE_BDEV_SIZE);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts, sizeof(buts))) {
blk_trace_remove(q);
@@ -809,6 +810,7 @@ static int blk_trace_setup2(struct request_queue *q, char *name, dev_t dev,
{
struct blk_user_trace_setup2 buts2;
struct blk_trace *bt;
+ unsigned int memflags;
if (copy_from_user(&buts2, arg, sizeof(buts2)))
return -EFAULT;
@@ -819,15 +821,15 @@ static int blk_trace_setup2(struct request_queue *q, char *name, dev_t dev,
if (buts2.flags != 0)
return -EINVAL;
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 2, bt, &buts2);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts2, sizeof(buts2))) {
blk_trace_remove(q);
@@ -844,6 +846,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
struct blk_user_trace_setup2 buts2;
struct compat_blk_user_trace_setup cbuts;
struct blk_trace *bt;
+ unsigned int memflags;
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT;
@@ -860,15 +863,15 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
.pid = cbuts.pid,
};
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts2.name, ARRAY_SIZE(buts2.name))) {
blk_trace_remove(q);
@@ -898,9 +901,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
ret = __blk_trace_startstop(q, start);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
@@ -2041,7 +2044,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct blk_trace *bt;
ssize_t ret = -ENXIO;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->debugfs_mutex));
@@ -2062,7 +2065,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
@@ -2073,6 +2076,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct block_device *bdev = dev_to_bdev(dev);
struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
+ unsigned int memflags;
u64 value;
ssize_t ret = -EINVAL;
@@ -2092,7 +2096,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
goto out;
}
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->debugfs_mutex));
@@ -2127,7 +2131,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
}
out_unlock_bdev:
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
out:
return ret ? ret : count;
}