summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c63
1 files changed, 35 insertions, 28 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index af5a54b5db12..08882648d569 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -15,6 +15,7 @@
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "opdef.h"
#include "kbuf.h"
@@ -107,34 +108,35 @@ static int io_import_vec(int ddir, struct io_kiocb *req,
}
static int __io_import_rw_buffer(int ddir, struct io_kiocb *req,
- struct io_async_rw *io,
- unsigned int issue_flags)
+ struct io_async_rw *io, struct io_br_sel *sel,
+ unsigned int issue_flags)
{
const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- void __user *buf = u64_to_user_ptr(rw->addr);
size_t sqe_len = rw->len;
+ sel->addr = u64_to_user_ptr(rw->addr);
if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT))
- return io_import_vec(ddir, req, io, buf, sqe_len);
+ return io_import_vec(ddir, req, io, sel->addr, sqe_len);
if (io_do_buffer_select(req)) {
- buf = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
- if (!buf)
+ *sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
+ if (!sel->addr)
return -ENOBUFS;
- rw->addr = (unsigned long) buf;
+ rw->addr = (unsigned long) sel->addr;
rw->len = sqe_len;
}
- return import_ubuf(ddir, buf, sqe_len, &io->iter);
+ return import_ubuf(ddir, sel->addr, sqe_len, &io->iter);
}
static inline int io_import_rw_buffer(int rw, struct io_kiocb *req,
struct io_async_rw *io,
+ struct io_br_sel *sel,
unsigned int issue_flags)
{
int ret;
- ret = __io_import_rw_buffer(rw, req, io, issue_flags);
+ ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags);
if (unlikely(ret < 0))
return ret;
@@ -153,10 +155,8 @@ static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP)
io_vec_free(&rw->vec);
- if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
- req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
- }
+ if (io_alloc_cache_put(&req->ctx->rw_cache, rw))
+ io_req_async_data_clear(req, 0);
}
static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
@@ -306,10 +306,12 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
static int io_rw_do_import(struct io_kiocb *req, int ddir)
{
+ struct io_br_sel sel = { };
+
if (io_do_buffer_select(req))
return 0;
- return io_import_rw_buffer(ddir, req, req->async_data, 0);
+ return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0);
}
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -576,7 +578,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
- req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
@@ -645,7 +647,7 @@ static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
}
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
- unsigned int issue_flags)
+ struct io_br_sel *sel, unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned final_ret = io_fixup_rw_res(req, ret);
@@ -659,7 +661,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
* from the submission path.
*/
io_req_io_end(req);
- io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags));
+ io_req_set_res(req, final_ret, io_put_kbuf(req, ret, sel->buf_list));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
@@ -902,7 +904,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
return 0;
}
-static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
+ unsigned int issue_flags)
{
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
@@ -916,7 +919,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret))
return ret;
} else if (io_do_buffer_select(req)) {
- ret = io_import_rw_buffer(ITER_DEST, req, io, issue_flags);
+ ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags);
if (unlikely(ret < 0))
return ret;
}
@@ -1018,18 +1021,22 @@ done:
int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
+ struct io_br_sel sel = { };
int ret;
- ret = __io_read(req, issue_flags);
+ ret = __io_read(req, &sel, issue_flags);
if (ret >= 0)
- return kiocb_done(req, ret, issue_flags);
+ return kiocb_done(req, ret, &sel, issue_flags);
+ if (req->flags & REQ_F_BUFFERS_COMMIT)
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
return ret;
}
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ struct io_br_sel sel = { };
unsigned int cflags = 0;
int ret;
@@ -1041,7 +1048,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
/* make it sync, multishot doesn't support async execution */
rw->kiocb.ki_complete = NULL;
- ret = __io_read(req, issue_flags);
+ ret = __io_read(req, &sel, issue_flags);
/*
* If we get -EAGAIN, recycle our buffer and just let normal poll
@@ -1052,15 +1059,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* Reset rw->len to 0 again to avoid clamping future mshot
* reads, in case the buffer size varies.
*/
- if (io_kbuf_recycle(req, issue_flags))
+ if (io_kbuf_recycle(req, sel.buf_list, issue_flags))
rw->len = 0;
return IOU_RETRY;
} else if (ret <= 0) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- cflags = io_put_kbuf(req, ret, issue_flags);
+ cflags = io_put_kbuf(req, ret, sel.buf_list);
} else {
/*
* Any successful return value will keep the multishot read
@@ -1068,7 +1075,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
- cflags = io_put_kbuf(req, ret, issue_flags);
+ cflags = io_put_kbuf(req, ret, sel.buf_list);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@@ -1197,7 +1204,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
done:
- return kiocb_done(req, ret2, issue_flags);
+ return kiocb_done(req, ret2, NULL, issue_flags);
} else {
ret_eagain:
iov_iter_restore(&io->iter, &io->iter_state);
@@ -1365,7 +1372,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}