summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-09-24 21:59:57 +0100
committerJens Axboe <axboe@kernel.dk>2021-10-19 05:49:53 -0600
commit2a56a9bd64dbdff4fe5cbe00b20014da07694a78 (patch)
tree8b88d171b8ba8e2fa791465b5c15400d6dd4d863
parentf15a3431775a598ed89028acee334200160cc2d6 (diff)
io_uring: remove drain_active check from hot path
req->ctx->active_drain is a bit too expensive, partially because of two dereferences. Do a trick, if we see it set in io_init_req(), set REQ_F_FORCE_ASYNC and it automatically goes through a slower path where we can catch it. It's nearly free to do in io_init_req() because there is already ->restricted check and it's in the same byte of a bitmask. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d7e7ddc63c15e8a300833132abb3eb8fd3918aef.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9b99ec46da25..f0fb458e81f3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6438,23 +6438,15 @@ static bool io_drain_req(struct io_kiocb *req)
int ret;
u32 seq;
- if (req->flags & REQ_F_FAIL) {
- io_req_complete_fail_submit(req);
- return true;
- }
-
- /*
- * If we need to drain a request in the middle of a link, drain the
- * head request and the next request/link after the current link.
- * Considering sequential execution of links, IOSQE_IO_DRAIN will be
- * maintained for every request of our link.
- */
- if (ctx->drain_next) {
- req->flags |= REQ_F_IO_DRAIN;
- ctx->drain_next = false;
- }
/* not interested in head, start from the first linked */
io_for_each_link(pos, req->link) {
+ /*
+ * If we need to drain a request in the middle of a link, drain
+ * the head request and the next request/link after the current
+ * link. Considering sequential execution of links,
+ * IOSQE_IO_DRAIN will be maintained for every request of our
+ * link.
+ */
if (pos->flags & REQ_F_IO_DRAIN) {
ctx->drain_next = true;
req->flags |= REQ_F_IO_DRAIN;
@@ -6946,13 +6938,12 @@ issue_sqe:
static inline void io_queue_sqe(struct io_kiocb *req)
__must_hold(&req->ctx->uring_lock)
{
- if (unlikely(req->ctx->drain_active) && io_drain_req(req))
- return;
-
if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
__io_queue_sqe(req);
} else if (req->flags & REQ_F_FAIL) {
io_req_complete_fail_submit(req);
+ } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) {
+ return;
} else {
int ret = io_req_prep_async(req);
@@ -6972,9 +6963,6 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
struct io_kiocb *req,
unsigned int sqe_flags)
{
- if (likely(!ctx->restricted))
- return true;
-
if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
return false;
@@ -7015,11 +7003,28 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP;
- if (sqe_flags & IOSQE_IO_DRAIN)
+ if (sqe_flags & IOSQE_IO_DRAIN) {
+ struct io_submit_link *link = &ctx->submit_state.link;
+
ctx->drain_active = true;
+ req->flags |= REQ_F_FORCE_ASYNC;
+ if (link->head)
+ link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
+ }
+ }
+ if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
+ if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
+ return -EACCES;
+ /* knock it to the slow queue path, will be drained there */
+ if (ctx->drain_active)
+ req->flags |= REQ_F_FORCE_ASYNC;
+ /* if there is no link, we're at "next" request and need to drain */
+ if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
+ ctx->drain_next = false;
+ ctx->drain_active = true;
+ req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN;
+ }
}
- if (!io_check_restriction(ctx, req, sqe_flags))
- return -EACCES;
personality = READ_ONCE(sqe->personality);
if (personality) {