summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorHao Xu <haoxu@linux.alibaba.com>2021-04-13 15:20:39 +0800
committerJens Axboe <axboe@kernel.dk>2021-04-27 07:38:58 -0600
commit7b289c38335ec7bebe45ed31137d596c808e23ac (patch)
tree5e9b1321046c439869ba6a328d46806150876945 /fs/io_uring.c
parent6d042ffb598ed83e7d5623cc961d249def5b9829 (diff)
io_uring: maintain drain logic for multishot poll requests
Now that we have multishot poll requests, one SQE can emit multiple CQEs. given below example: sqe0(multishot poll)-->sqe1-->sqe2(drain req) sqe2 is designed to issue after sqe0 and sqe1 completed, but since sqe0 is a multishot poll request, sqe2 may be issued after sqe0's event triggered twice before sqe1 completed. This isn't what users leverage drain requests for. Here the solution is to wait for multishot poll requests fully completed. To achieve this, we should reconsider the req_need_defer equation, the original one is: all_sqes(excluding dropped ones) == all_cqes(including dropped ones) This means we issue a drain request when all the previous submitted SQEs have generated their CQEs. Now we should consider multishot requests, we deduct all the multishot CQEs except the cancellation one, In this way a multishot poll request behave like a normal request, so: all_sqes == all_cqes - multishot_cqes(except cancellations) Here we introduce cq_extra for it. Signed-off-by: Hao Xu <haoxu@linux.alibaba.com> Link: https://lore.kernel.org/r/1618298439-136286-1-git-send-email-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 81096f3b01ea..63ff70587d4f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -428,6 +428,7 @@ struct io_ring_ctx {
unsigned cq_mask;
atomic_t cq_timeouts;
unsigned cq_last_tm_flush;
+ unsigned cq_extra;
unsigned long cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
@@ -1193,7 +1194,7 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
struct io_ring_ctx *ctx = req->ctx;
- return seq != ctx->cached_cq_tail
+ return seq + ctx->cq_extra != ctx->cached_cq_tail
+ READ_ONCE(ctx->cached_cq_overflow);
}
@@ -4901,6 +4902,9 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
req->poll.done = true;
flags = 0;
}
+ if (flags & IORING_CQE_F_MORE)
+ ctx->cq_extra++;
+
io_commit_cqring(ctx);
return !(flags & IORING_CQE_F_MORE);
}