summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-04-25 23:34:46 +0100
committerJens Axboe <axboe@kernel.dk>2021-04-26 06:59:25 -0600
commit9f59a9d88d3bb2708d08e0e1d03899c469c27190 (patch)
tree73b49f0805107b074112981b1fb40700200e086c /fs/io_uring.c
parent28090c133869b461c5366195a856d73469ab87d9 (diff)
io_uring: simplify SQPOLL cancellations
All sqpoll rings (even sharing sqpoll task) are currently dead bound to the task that created them, iow when owner task dies it kills all its SQPOLL rings and their inflight requests via task_work infra. It's neither the nicist way nor the most convenient as adds extra locking/waiting and dependencies. Leave it alone and rely on SIGKILL being delivered on its thread group exit, so there are only two cases left: 1) thread group is dying, so sqpoll task gets a signal and exit itself cancelling all requests. 2) an sqpoll ring is dying. Because refs_kill() is called the sqpoll not going to submit any new request, and that's what we need. And io_ring_exit_work() will do all the cancellation itself before actually killing ctx, so sqpoll doesn't need to worry about it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/3cd7f166b9c326a2c932b70e71a655b03257b366.1619389911.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c45
1 files changed, 3 insertions, 42 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f20622bd963b..6b578c380e73 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9021,41 +9021,6 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
return percpu_counter_sum(&tctx->inflight);
}
-static void io_sqpoll_cancel_cb(struct callback_head *cb)
-{
- struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
- struct io_sq_data *sqd = work->ctx->sq_data;
-
- if (sqd->thread)
- io_uring_cancel_sqpoll(sqd);
- list_del_init(&work->ctx->sqd_list);
- io_sqd_update_thread_idle(sqd);
- complete(&work->completion);
-}
-
-static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
-{
- struct io_sq_data *sqd = ctx->sq_data;
- struct io_tctx_exit work = { .ctx = ctx, };
- struct task_struct *task;
-
- io_sq_thread_park(sqd);
- task = sqd->thread;
- if (task) {
- init_completion(&work.completion);
- init_task_work(&work.task_work, io_sqpoll_cancel_cb);
- io_task_work_add_head(&sqd->park_task_work, &work.task_work);
- wake_up_process(task);
- } else {
- list_del_init(&ctx->sqd_list);
- io_sqd_update_thread_idle(sqd);
- }
- io_sq_thread_unpark(sqd);
-
- if (task)
- wait_for_completion(&work.completion);
-}
-
static void io_uring_try_cancel(struct files_struct *files)
{
struct io_uring_task *tctx = current->io_uring;
@@ -9065,11 +9030,9 @@ static void io_uring_try_cancel(struct files_struct *files)
xa_for_each(&tctx->xa, index, node) {
struct io_ring_ctx *ctx = node->ctx;
- if (ctx->sq_data) {
- io_sqpoll_cancel_sync(ctx);
- continue;
- }
- io_uring_try_cancel_requests(ctx, current, files);
+ /* sqpoll task will cancel all its requests */
+ if (!ctx->sq_data)
+ io_uring_try_cancel_requests(ctx, current, files);
}
}
@@ -9117,8 +9080,6 @@ void __io_uring_cancel(struct files_struct *files)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
- io_uring_try_cancel(files);
-
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx, !!files);