summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-16 23:28:05 +0300
committerJens Axboe <axboe@kernel.dk>2020-07-24 13:00:45 -0600
commit7fbb1b541f4286cc337b9bca1e5bad0ce4ee978c (patch)
tree15c4e1399e78d6b9cafd4ba8205cd2d9fe4f579a /fs/io_uring.c
parent8ff069bf2efd7b7aeb90b56ea8edc165c93d8940 (diff)
io_uring: don't open-code recv kbuf managment
Don't implement fast path of kbuf freeing and management inlined into io_recv{,msg}(), that's error prone and duplicates handling. Replace it with a helper io_put_recv_kbuf(), which mimics io_put_rw_kbuf() in the io_read/write(). This also keeps cflags calculation in one place, removing duplication between rw and recv/send. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 871ada2a29c3..6e5ea7991c08 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4098,7 +4098,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
}
static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
- int *cflags, bool needs_lock)
+ bool needs_lock)
{
struct io_sr_msg *sr = &req->sr_msg;
struct io_buffer *kbuf;
@@ -4109,12 +4109,14 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
sr->kbuf = kbuf;
req->flags |= REQ_F_BUFFER_SELECTED;
-
- *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
- *cflags |= IORING_CQE_F_BUFFER;
return kbuf;
}
+static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
+{
+ return io_put_kbuf(req, req->sr_msg.kbuf);
+}
+
static int io_recvmsg_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
@@ -4152,7 +4154,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
- struct io_buffer *kbuf = NULL;
+ struct io_buffer *kbuf;
unsigned flags;
int ret, cflags = 0;
@@ -4175,7 +4177,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
}
if (req->flags & REQ_F_BUFFER_SELECT) {
- kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
+ kbuf = io_recv_buffer_select(req, !force_nonblock);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
@@ -4196,12 +4198,11 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
if (ret == -ERESTARTSYS)
ret = -EINTR;
- if (kbuf)
- kfree(kbuf);
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_recv_kbuf(req);
if (kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
- req->flags &= ~(REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED);
-
+ req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
__io_req_complete(req, ret, cflags, cs);
@@ -4225,7 +4226,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
return ret;
if (req->flags & REQ_F_BUFFER_SELECT) {
- kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
+ kbuf = io_recv_buffer_select(req, !force_nonblock);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
buf = u64_to_user_ptr(kbuf->addr);
@@ -4254,9 +4255,8 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
if (ret == -ERESTARTSYS)
ret = -EINTR;
out_free:
- if (kbuf)
- kfree(kbuf);
- req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_recv_kbuf(req);
if (ret < 0)
req_set_fail_links(req);
__io_req_complete(req, ret, cflags, cs);