summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-10-01 18:07:01 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:54 -0600
commite0eb71dcfc4b862261d99f7f90169142867beb0a (patch)
tree3eac89edf3fa2b8b99a96411bede0525bdf467ef
parent22b2ca310afcea319c72e051df0371f668192b10 (diff)
io_uring: don't return from io_drain_req()
Never return from io_drain_req() but punt to tw if we've got there but it's a false positive and we shouldn't actually drain. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/93583cee51b8783706b76c73196c155b28d9e762.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c36
1 files changed, 14 insertions, 22 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fb073915fa5c..b0b4c5b00f37 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req)
return seq;
}
-static bool io_drain_req(struct io_kiocb *req)
+static void io_drain_req(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
int ret;
- u32 seq;
+ u32 seq = io_get_sequence(req);
/* Still need defer if there is pending req in defer list. */
- if (likely(list_empty_careful(&ctx->defer_list) &&
- !(req->flags & REQ_F_IO_DRAIN))) {
- ctx->drain_active = false;
- return false;
- }
-
- seq = io_get_sequence(req);
- /* Still a chance to pass the sequence check */
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
+queue:
ctx->drain_active = false;
- return false;
+ io_req_task_queue(req);
+ return;
}
ret = io_req_prep_async(req);
- if (ret)
- goto fail;
+ if (ret) {
+fail:
+ io_req_complete_failed(req, ret);
+ return;
+ }
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
ret = -ENOMEM;
-fail:
- io_req_complete_failed(req, ret);
- return true;
+ goto fail;
}
spin_lock(&ctx->completion_lock);
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock(&ctx->completion_lock);
kfree(de);
- io_queue_async_work(req, NULL);
- ctx->drain_active = false;
- return true;
+ goto queue;
}
trace_io_uring_defer(ctx, req, req->user_data);
@@ -6475,7 +6468,6 @@ fail:
de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list);
spin_unlock(&ctx->completion_lock);
- return true;
}
static void io_clean_op(struct io_kiocb *req)
@@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
{
if (req->flags & REQ_F_FAIL) {
io_req_complete_fail_submit(req);
- } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) {
- return;
+ } else if (unlikely(req->ctx->drain_active)) {
+ io_drain_req(req);
} else {
int ret = io_req_prep_async(req);