diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-04-25 23:34:45 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-04-26 06:59:25 -0600 |
commit | 28090c133869b461c5366195a856d73469ab87d9 (patch) | |
tree | 5da5a4787a2f03207d74cf1f72d9ed4eb903b774 /fs/io_uring.c | |
parent | 615cee49b3ca55f54d527f7a6a7d0fd4fd6fef6b (diff) |
io_uring: fix work_exit sqpoll cancellations
After closing an SQPOLL ring, io_ring_exit_work() kicks in and starts
doing cancellations via io_uring_try_cancel_requests(). It will go
through io_uring_try_cancel_iowq(), which uses ctx->tctx_list, but as
SQPOLL task don't have a ctx note, its io-wq won't be reachable and so
is left not cancelled.
It will eventually cancelled when one of the tasks dies, but if a thread
group survives for long and changes rings, it will spawn lots of
unreclaimed resources and live locked works.
Cancel SQPOLL task's io-wq separately in io_ring_exit_work().
Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/a71a7fe345135d684025bb529d5cb1d8d6b46e10.1619389911.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 577520445fa0..f20622bd963b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8679,6 +8679,13 @@ static void io_tctx_exit_cb(struct callback_head *cb) complete(&work->completion); } +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + return req->ctx == data; +} + static void io_ring_exit_work(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); @@ -8695,6 +8702,17 @@ static void io_ring_exit_work(struct work_struct *work) */ do { io_uring_try_cancel_requests(ctx, NULL, NULL); + if (ctx->sq_data) { + struct io_sq_data *sqd = ctx->sq_data; + struct task_struct *tsk; + + io_sq_thread_park(sqd); + tsk = sqd->thread; + if (tsk && tsk->io_uring && tsk->io_uring->io_wq) + io_wq_cancel_cb(tsk->io_uring->io_wq, + io_cancel_ctx_cb, ctx, true); + io_sq_thread_unpark(sqd); + } WARN_ON_ONCE(time_after(jiffies, timeout)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); @@ -8844,13 +8862,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx, return true; } -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) -{ - struct io_kiocb *req = container_of(work, struct io_kiocb, work); - - return req->ctx == data; -} - static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { struct io_tctx_node *node; |