Skip to content

Commit 6971253

Browse files
isilenceaxboe
authored andcommitted
io_uring: revise completion_lock locking
io_kill_timeouts() doesn't post any events but queues everything to task_work. Locking there is needed for protecting linked requests traversing, we should grab completion_lock directly instead of using io_cq_[un]lock helpers. Same goes for __io_req_find_next_prep(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/88e75d481a65dc295cb59722bb1cf76402d1c06b.1670002973.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent ea011ee commit 6971253

3 files changed

Lines changed: 20 additions & 15 deletions

File tree

io_uring/io_uring.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -597,6 +597,18 @@ static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
597597
spin_unlock(&ctx->completion_lock);
598598
}
599599

600+
static inline void io_cq_lock(struct io_ring_ctx *ctx)
601+
__acquires(ctx->completion_lock)
602+
{
603+
spin_lock(&ctx->completion_lock);
604+
}
605+
606+
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
607+
__releases(ctx->completion_lock)
608+
{
609+
spin_unlock(&ctx->completion_lock);
610+
}
611+
600612
/* keep it inlined for io_submit_flush_completions() */
601613
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
602614
__releases(ctx->completion_lock)
@@ -1074,9 +1086,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
10741086
{
10751087
struct io_ring_ctx *ctx = req->ctx;
10761088

1077-
io_cq_lock(ctx);
1089+
spin_lock(&ctx->completion_lock);
10781090
io_disarm_next(req);
1079-
io_cq_unlock_post(ctx);
1091+
spin_unlock(&ctx->completion_lock);
10801092
}
10811093

10821094
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)

io_uring/io_uring.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -87,17 +87,6 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
8787
#define io_for_each_link(pos, head) \
8888
for (pos = (head); pos; pos = pos->link)
8989

90-
static inline void io_cq_lock(struct io_ring_ctx *ctx)
91-
__acquires(ctx->completion_lock)
92-
{
93-
spin_lock(&ctx->completion_lock);
94-
}
95-
96-
static inline void io_cq_unlock(struct io_ring_ctx *ctx)
97-
{
98-
spin_unlock(&ctx->completion_lock);
99-
}
100-
10190
void io_cq_unlock_post(struct io_ring_ctx *ctx);
10291

10392
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,

io_uring/timeout.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
624624
struct io_timeout *timeout, *tmp;
625625
int canceled = 0;
626626

627-
io_cq_lock(ctx);
627+
/*
628+
* completion_lock is needed for io_match_task(). Take it before
629+
* timeout_lockfirst to keep locking ordering.
630+
*/
631+
spin_lock(&ctx->completion_lock);
628632
spin_lock_irq(&ctx->timeout_lock);
629633
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
630634
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
@@ -634,6 +638,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
634638
canceled++;
635639
}
636640
spin_unlock_irq(&ctx->timeout_lock);
637-
io_cq_unlock_post(ctx);
641+
spin_unlock(&ctx->completion_lock);
638642
return canceled != 0;
639643
}

0 commit comments

Comments
 (0)