summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@fb.com>2022-06-22 06:40:23 -0700
committerJens Axboe <axboe@kernel.dk>2022-07-24 18:39:15 -0600
commitf88262e60bb9cb5740891672ce9f405e7f9393e5 (patch)
treebca01661645c35ef7958fd5b0a5f9438d9b9b477 /include
parentc34398a8c018e0d3d2d30b718d03c7290c696f51 (diff)
io_uring: lockless task list
With networking use cases we see contention on the spinlock used to protect the task_list when multiple threads try and add completions at once. Instead we can use a lockless list, and assume that the first caller to add to the list is responsible for kicking off task work. Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220622134028.2013417-4-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/io_uring_types.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 5987f8acca38..918165a20053 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -428,7 +428,7 @@ typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
struct io_task_work {
union {
- struct io_wq_work_node node;
+ struct llist_node node;
struct llist_node fallback_node;
};
io_req_tw_func_t func;