diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 886368cd2c40..887521656bca 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -660,11 +660,16 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) __io_cqring_overflow_flush(ctx, true); } -static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) +static int io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) { - mutex_lock(&ctx->uring_lock); + int ret; + + ret = mutex_lock_killable(&ctx->uring_lock); + if (unlikely(ret)) + return ret; __io_cqring_overflow_flush(ctx, false); mutex_unlock(&ctx->uring_lock); + return 0; } /* must to be called somewhat shortly after putting a request */ @@ -2612,8 +2617,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags, max(IO_LOCAL_TW_DEFAULT_MAX, min_events)); io_run_task_work(); - if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) - io_cqring_do_overflow_flush(ctx); + if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) { + ret = io_cqring_do_overflow_flush(ctx); + if (ret) + return ret; + } if (__io_cqring_events_user(ctx) >= min_events) return 0; @@ -2698,8 +2706,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags, check_cq = READ_ONCE(ctx->check_cq); if (unlikely(check_cq)) { /* let the caller flush overflows, retry */ - if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) - io_cqring_do_overflow_flush(ctx); + if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) { + ret = io_cqring_do_overflow_flush(ctx); + if (ret) + break; + } if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { ret = -EBADR; break;