summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2026-01-17 08:27:23 -0700
committerJens Axboe <axboe@kernel.dk>2026-01-27 11:10:46 -0700
commite7c30675a7fb79d94400987865a3bd620458ca1a (patch)
tree59e7c274e821fc3f683702d96ed07bf73de96140
parent8768770cf5d76d177fa2200e6957a372e61e06b5 (diff)
io_uring/bpf_filter: cache lookup table in ctx->bpf_filters
Currently a few pointer dereferences need to be made to both check if BPF filters are installed, and then also to retrieve the actual filter for the opcode. Cache the table in ctx->bpf_filters to avoid that. Add a bit of debug info on ring exit to show if we ever got this wrong. Small risk of that given that the table is currently only updated in one spot, but once task forking is enabled, that will add one more spot. Reviewed-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/io_uring_types.h2
-rw-r--r--io_uring/bpf_filter.c7
-rw-r--r--io_uring/bpf_filter.h10
-rw-r--r--io_uring/io_uring.c11
-rw-r--r--io_uring/register.c3
5 files changed, 23 insertions, 10 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 74bf98362876..7617df247238 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -287,6 +287,8 @@ struct io_ring_ctx {
struct task_struct *submitter_task;
struct io_rings *rings;
+ /* cache of ->restrictions.bpf_filters->filters */
+ struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
diff --git a/io_uring/bpf_filter.c b/io_uring/bpf_filter.c
index ff723ec44828..1409d625b686 100644
--- a/io_uring/bpf_filter.c
+++ b/io_uring/bpf_filter.c
@@ -58,14 +58,15 @@ static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
* __io_uring_run_bpf_filters() returns 0 on success, allow running the
* request, and -EACCES when a request is denied.
*/
-int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req)
+int __io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
+ struct io_kiocb *req)
{
struct io_bpf_filter *filter;
struct io_uring_bpf_ctx bpf_ctx;
int ret;
/* Fast check for existence of filters outside of RCU */
- if (!rcu_access_pointer(res->bpf_filters->filters[req->opcode]))
+ if (!rcu_access_pointer(filters[req->opcode]))
return 0;
/*
@@ -73,7 +74,7 @@ int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req)
* of what we expect, io_init_req() does this.
*/
guard(rcu)();
- filter = rcu_dereference(res->bpf_filters->filters[req->opcode]);
+ filter = rcu_dereference(filters[req->opcode]);
if (!filter)
return 0;
else if (filter == &dummy_filter)
diff --git a/io_uring/bpf_filter.h b/io_uring/bpf_filter.h
index 27eae9705473..9f3cdb92eb16 100644
--- a/io_uring/bpf_filter.h
+++ b/io_uring/bpf_filter.h
@@ -6,18 +6,18 @@
#ifdef CONFIG_IO_URING_BPF
-int __io_uring_run_bpf_filters(struct io_restriction *res, struct io_kiocb *req);
+int __io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters, struct io_kiocb *req);
int io_register_bpf_filter(struct io_restriction *res,
struct io_uring_bpf __user *arg);
void io_put_bpf_filters(struct io_restriction *res);
-static inline int io_uring_run_bpf_filters(struct io_restriction *res,
+static inline int io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
struct io_kiocb *req)
{
- if (res->bpf_filters)
- return __io_uring_run_bpf_filters(res, req);
+ if (filters)
+ return __io_uring_run_bpf_filters(filters, req);
return 0;
}
@@ -29,7 +29,7 @@ static inline int io_register_bpf_filter(struct io_restriction *res,
{
return -EINVAL;
}
-static inline int io_uring_run_bpf_filters(struct io_restriction *res,
+static inline int io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
struct io_kiocb *req)
{
return 0;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 9b9794dfc27a..049454278563 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1875,8 +1875,8 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
- if (unlikely(ctx->restrictions.bpf_filters)) {
- ret = io_uring_run_bpf_filters(&ctx->restrictions, req);
+ if (unlikely(ctx->bpf_filters)) {
+ ret = io_uring_run_bpf_filters(ctx->bpf_filters, req);
if (ret)
return io_submit_fail_init(sqe, req, ret);
}
@@ -2168,6 +2168,13 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
io_req_caches_free(ctx);
+
+ if (ctx->restrictions.bpf_filters) {
+ WARN_ON_ONCE(ctx->bpf_filters !=
+ ctx->restrictions.bpf_filters->filters);
+ } else {
+ WARN_ON_ONCE(ctx->bpf_filters);
+ }
io_put_bpf_filters(&ctx->restrictions);
WARN_ON_ONCE(ctx->nr_req_allocated);
diff --git a/io_uring/register.c b/io_uring/register.c
index 30957c2cb5eb..40de9b8924b9 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -837,6 +837,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (nr_args != 1)
break;
ret = io_register_bpf_filter(&ctx->restrictions, arg);
+ if (!ret)
+ WRITE_ONCE(ctx->bpf_filters,
+ ctx->restrictions.bpf_filters->filters);
break;
default:
ret = -EINVAL;