summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-10-01 18:07:00 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:54 -0600
commit22b2ca310afcea319c72e051df0371f668192b10 (patch)
treee313a972a13c44af5b026551265bc8b72f26d36e
parent5e371265ea1d3e0cd02236b1a6d79fe322523ae8 (diff)
io_uring: extra a helper for drain init
Add a helper io_init_req_drain for initialising requests with IOSQE_DRAIN set. Also move bits from preambule of io_drain_req() in there, because we already modify all the bits needed inside the helper. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/dcb412825b35b1cb8891245a387d7d69f8d14cef.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c48
1 files changed, 22 insertions, 26 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fd3dc6432a34..fb073915fa5c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6430,28 +6430,11 @@ static u32 io_get_sequence(struct io_kiocb *req)
static bool io_drain_req(struct io_kiocb *req)
{
- struct io_kiocb *pos;
struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
int ret;
u32 seq;
- /* not interested in head, start from the first linked */
- io_for_each_link(pos, req->link) {
- /*
- * If we need to drain a request in the middle of a link, drain
- * the head request and the next request/link after the current
- * link. Considering sequential execution of links,
- * IOSQE_IO_DRAIN will be maintained for every request of our
- * link.
- */
- if (pos->flags & REQ_F_IO_DRAIN) {
- ctx->drain_next = true;
- req->flags |= REQ_F_IO_DRAIN;
- break;
- }
- }
-
/* Still need defer if there is pending req in defer list. */
if (likely(list_empty_careful(&ctx->defer_list) &&
!(req->flags & REQ_F_IO_DRAIN))) {
@@ -6992,6 +6975,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true;
}
+static void io_init_req_drain(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *head = ctx->submit_state.link.head;
+
+ ctx->drain_active = true;
+ if (head) {
+ /*
+ * If we need to drain a request in the middle of a link, drain
+ * the head request and the next request/link after the current
+ * link. Considering sequential execution of links,
+ * IOSQE_IO_DRAIN will be maintained for every request of our
+ * link.
+ */
+ head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
+ ctx->drain_next = true;
+ }
+}
+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
@@ -7018,14 +7020,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP;
- if (sqe_flags & IOSQE_IO_DRAIN) {
- struct io_submit_link *link = &ctx->submit_state.link;
-
- ctx->drain_active = true;
- req->flags |= REQ_F_FORCE_ASYNC;
- if (link->head)
- link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
- }
+ if (sqe_flags & IOSQE_IO_DRAIN)
+ io_init_req_drain(req);
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
@@ -7037,7 +7033,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
ctx->drain_next = false;
ctx->drain_active = true;
- req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN;
+ req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
}
}