io_uring: always arm linked timeouts prior to issue
Commit b53e523261bf058ea4a518b482222e7a277b186b upstream. There are a few spots where linked timeouts are armed, and not all of them adhere to the pre-arm, attempt issue, post-arm pattern. This can be problematic if the linked request returns that it will trigger a callback later, and does so before the linked timeout is fully armed. Consolidate all the linked timeout handling into __io_issue_sqe(), rather than have it spread throughout the various issue entry points. Cc: stable@vger.kernel.org Link: https://github.com/axboe/liburing/issues/1390 Reported-by: Chase Hiltz <chase@path.net> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
00f0dd1a01
commit
51f1389b5f
@@ -422,24 +422,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
||||
return req->link;
|
||||
}
|
||||
|
||||
static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
|
||||
{
|
||||
if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
|
||||
return NULL;
|
||||
return __io_prep_linked_timeout(req);
|
||||
}
|
||||
|
||||
static noinline void __io_arm_ltimeout(struct io_kiocb *req)
|
||||
{
|
||||
io_queue_linked_timeout(__io_prep_linked_timeout(req));
|
||||
}
|
||||
|
||||
static inline void io_arm_ltimeout(struct io_kiocb *req)
|
||||
{
|
||||
if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
|
||||
__io_arm_ltimeout(req);
|
||||
}
|
||||
|
||||
static void io_prep_async_work(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_issue_def *def = &io_issue_defs[req->opcode];
|
||||
@@ -493,7 +475,6 @@ static void io_prep_async_link(struct io_kiocb *req)
|
||||
|
||||
static void io_queue_iowq(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *link = io_prep_linked_timeout(req);
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
|
||||
BUG_ON(!tctx);
|
||||
@@ -518,8 +499,6 @@ static void io_queue_iowq(struct io_kiocb *req)
|
||||
|
||||
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
|
||||
io_wq_enqueue(tctx->io_wq, &req->work);
|
||||
if (link)
|
||||
io_queue_linked_timeout(link);
|
||||
}
|
||||
|
||||
static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
|
||||
@@ -1863,17 +1842,24 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
|
||||
return !!req->file;
|
||||
}
|
||||
|
||||
#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
|
||||
|
||||
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
const struct io_issue_def *def = &io_issue_defs[req->opcode];
|
||||
const struct cred *creds = NULL;
|
||||
struct io_kiocb *link = NULL;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!io_assign_file(req, def, issue_flags)))
|
||||
return -EBADF;
|
||||
|
||||
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
|
||||
creds = override_creds(req->creds);
|
||||
if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
|
||||
if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
|
||||
creds = override_creds(req->creds);
|
||||
if (req->flags & REQ_F_ARM_LTIMEOUT)
|
||||
link = __io_prep_linked_timeout(req);
|
||||
}
|
||||
|
||||
if (!def->audit_skip)
|
||||
audit_uring_entry(req->opcode);
|
||||
@@ -1883,8 +1869,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (!def->audit_skip)
|
||||
audit_uring_exit(!ret, ret);
|
||||
|
||||
if (creds)
|
||||
revert_creds(creds);
|
||||
if (unlikely(creds || link)) {
|
||||
if (creds)
|
||||
revert_creds(creds);
|
||||
if (link)
|
||||
io_queue_linked_timeout(link);
|
||||
}
|
||||
|
||||
if (ret == IOU_OK) {
|
||||
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
|
||||
@@ -1939,8 +1929,6 @@ void io_wq_submit_work(struct io_wq_work *work)
|
||||
else
|
||||
req_ref_get(req);
|
||||
|
||||
io_arm_ltimeout(req);
|
||||
|
||||
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
|
||||
if (work->flags & IO_WQ_WORK_CANCEL) {
|
||||
fail:
|
||||
@@ -2036,15 +2024,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
|
||||
static void io_queue_async(struct io_kiocb *req, int ret)
|
||||
__must_hold(&req->ctx->uring_lock)
|
||||
{
|
||||
struct io_kiocb *linked_timeout;
|
||||
|
||||
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
|
||||
io_req_defer_failed(req, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
linked_timeout = io_prep_linked_timeout(req);
|
||||
|
||||
switch (io_arm_poll_handler(req, 0)) {
|
||||
case IO_APOLL_READY:
|
||||
io_kbuf_recycle(req, 0);
|
||||
@@ -2057,9 +2041,6 @@ static void io_queue_async(struct io_kiocb *req, int ret)
|
||||
case IO_APOLL_OK:
|
||||
break;
|
||||
}
|
||||
|
||||
if (linked_timeout)
|
||||
io_queue_linked_timeout(linked_timeout);
|
||||
}
|
||||
|
||||
static inline void io_queue_sqe(struct io_kiocb *req)
|
||||
@@ -2073,9 +2054,7 @@ static inline void io_queue_sqe(struct io_kiocb *req)
|
||||
* We async punt it if the file wasn't marked NOWAIT, or if the file
|
||||
* doesn't support non-blocking read/write attempts
|
||||
*/
|
||||
if (likely(!ret))
|
||||
io_arm_ltimeout(req);
|
||||
else
|
||||
if (unlikely(ret))
|
||||
io_queue_async(req, ret);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user