summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 6310a3d08409..331af6bf4234 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -186,7 +186,7 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
* This is really a bug in the core code that does this, any issue
* path should assume that a successful (or -EIOCBQUEUED) return can
* mean that the underlying data can be gone at any time. But that
- * should be fixed seperately, and then this check could be killed.
+ * should be fixed separately, and then this check could be killed.
*/
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -348,7 +348,7 @@ static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/*
* Have to do this validation here, as this is in io_read() rw->len
- * might have chanaged due to buffer selection
+ * might have changed due to buffer selection
*/
return io_iov_buffer_select_prep(req);
}
@@ -566,15 +566,17 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
return res;
}
-void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
+void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *req = tw_req.req;
+
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
io_req_rw_cleanup(req, 0);
- io_req_task_complete(req, tw);
+ io_req_task_complete(tw_req, tw);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -1010,7 +1012,7 @@ static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
iov_iter_restore(&io->iter, &io->iter_state);
} while (ret > 0);
done:
- /* it's faster to check here then delegate to kfree */
+ /* it's faster to check here than delegate to kfree */
return ret;
}