From 90e11232a62bbd85e18c579c4b6b24030aa23d63 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 28 Jan 2024 20:08:24 -0700 Subject: [PATCH] io_uring: add io_file_can_poll() helper Commit 95041b93e90a06bb613ec4bef9cd4d61570f68e4 upstream. This adds a flag to avoid dipping dereferencing file and then f_op to figure out if the file has a poll handler defined or not. We generally call this at least twice for networked workloads, and if using ring provided buffers, we do it on every buffer selection. Particularly the latter is troublesome, as it's otherwise a very fast operation. Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- include/linux/io_uring_types.h | 3 +++ io_uring/io_uring.c | 2 +- io_uring/io_uring.h | 12 ++++++++++++ io_uring/kbuf.c | 3 +-- io_uring/poll.c | 2 +- io_uring/rw.c | 4 ++-- 6 files changed, 20 insertions(+), 6 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 0fadef2983e0..dad2f9e4d53d 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -416,6 +416,7 @@ enum { /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, + REQ_F_CAN_POLL_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -483,6 +484,8 @@ enum { REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), + /* file is pollable */ + REQ_F_CAN_POLL = BIT(REQ_F_CAN_POLL_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 43b46098279a..9c383afeaf8e 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1953,7 +1953,7 @@ fail: if (req->flags & REQ_F_FORCE_ASYNC) { bool opcode_poll = def->pollin || def->pollout; - if (opcode_poll && file_can_poll(req->file)) { + if (opcode_poll && io_file_can_poll(req)) { needs_poll = true; issue_flags |= IO_URING_F_NONBLOCK; } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 0ffec66deee7..59f5f71037ff 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "io-wq.h" @@ -410,4 +411,15 @@ static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) return 2 * sizeof(struct io_uring_sqe); return sizeof(struct io_uring_sqe); } + +static inline bool io_file_can_poll(struct io_kiocb *req) +{ + if (req->flags & REQ_F_CAN_POLL) + return true; + if (file_can_poll(req->file)) { + req->flags |= REQ_F_CAN_POLL; + return true; + } + return false; +} #endif diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 8c6611fe4f46..addd7c973657 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -148,8 +148,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, req->buf_list = bl; req->buf_index = buf->bid; - if (issue_flags & IO_URING_F_UNLOCKED || - (req->file && !file_can_poll(req->file))) { + if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { /* * If we came in unlocked, we have no choice but to consume the * buffer here, otherwise nothing ensures that the buffer won't diff --git a/io_uring/poll.c b/io_uring/poll.c index cf8e86bc96de..2390bf5f1710 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -717,7 +717,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; - if (!file_can_poll(req->file)) + if (!io_file_can_poll(req)) return IO_APOLL_ABORTED; if (!(req->flags & REQ_F_APOLL_MULTISHOT)) mask |= EPOLLONESHOT; diff --git a/io_uring/rw.c b/io_uring/rw.c index 75b001febb4d..230f553d1428 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -629,7 +629,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) * just use poll if we can, and don't attempt if the fs doesn't * support callback based unlocks */ - if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) + if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC)) return false; wait->wait.func = io_async_buf_func; @@ -783,7 +783,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; /* if we can poll, just do that */ - if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) + if (req->opcode == IORING_OP_READ && io_file_can_poll(req)) return -EAGAIN; /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))