summaryrefslogtreecommitdiff
path: root/io_uring/kbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/kbuf.c')
-rw-r--r--io_uring/kbuf.c39
1 files changed, 25 insertions, 14 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index aad655e38672..8a329556f8df 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -155,6 +155,27 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
+static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
+ */
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return true;
+
+ /* uring_cmd commits kbuf upfront, no need to auto-commit */
+ if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD)
+ return true;
+ return false;
+}
+
static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl,
unsigned int issue_flags)
@@ -181,17 +202,7 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
sel.buf_list = bl;
sel.addr = u64_to_user_ptr(buf->addr);
- if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
- /*
- * If we came in unlocked, we have no choice but to consume the
- * buffer here, otherwise nothing ensures that the buffer won't
- * get used by others. This does mean it'll be pinned until the
- * IO completes, coming in unlocked means we're being called from
- * io-wq context and there may be further retries in async hybrid
- * mode. For the locked case, the caller must call commit when
- * the transfer completes (or if we get -EAGAIN and must poll of
- * retry).
- */
+ if (io_should_commit(req, issue_flags)) {
io_kbuf_commit(req, sel.buf_list, *len, 1);
sel.buf_list = NULL;
}
@@ -417,7 +428,7 @@ static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{
if (bl->flags & IOBL_BUF_RING)
- io_free_region(ctx, &bl->region);
+ io_free_region(ctx->user, &bl->region);
else
io_remove_buffers_legacy(ctx, bl, -1U);
@@ -630,7 +641,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
rd.user_addr = reg.ring_addr;
rd.flags |= IORING_MEM_REGION_TYPE_USER;
}
- ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
+ ret = io_create_region(ctx, &bl->region, &rd, mmap_offset);
if (ret)
goto fail;
br = io_region_get_ptr(&bl->region);
@@ -661,7 +672,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
io_buffer_add_list(ctx, bl, reg.bgid);
return 0;
fail:
- io_free_region(ctx, &bl->region);
+ io_free_region(ctx->user, &bl->region);
kfree(bl);
return ret;
}