mirror of https://github.com/torvalds/linux.git
io_uring: add wrapper type for io_req_tw_func_t arg
In preparation for uring_cmd implementations to implement functions with the io_req_tw_func_t signature, introduce a wrapper struct io_tw_req to hide the struct io_kiocb * argument. The intention is for only the io_uring core to access the inner struct io_kiocb *. uring_cmd implementations should instead call a helper from io_uring/cmd.h to convert struct io_tw_req to struct io_uring_cmd *. Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4531d165ee
commit
c33e779aba
|
|
@ -615,7 +615,11 @@ enum {
|
|||
REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw);
|
||||
struct io_tw_req {
|
||||
struct io_kiocb *req;
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
|
||||
struct io_task_work {
|
||||
struct llist_node node;
|
||||
|
|
|
|||
|
|
@ -41,24 +41,26 @@ void io_futex_cache_free(struct io_ring_ctx *ctx)
|
|||
io_alloc_cache_free(&ctx->futex_cache, kfree);
|
||||
}
|
||||
|
||||
static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void __io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
hlist_del_init(&req->hash_node);
|
||||
io_req_task_complete(req, tw);
|
||||
hlist_del_init(&tw_req.req->hash_node);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_tw_lock(ctx, tw);
|
||||
io_cache_free(&ctx->futex_cache, req->async_data);
|
||||
io_req_async_data_clear(req, 0);
|
||||
__io_futex_complete(req, tw);
|
||||
__io_futex_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static void io_futexv_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_futexv_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
|
||||
struct futex_vector *futexv = req->async_data;
|
||||
|
||||
|
|
@ -73,7 +75,7 @@ static void io_futexv_complete(struct io_kiocb *req, io_tw_token_t tw)
|
|||
}
|
||||
|
||||
io_req_async_data_free(req);
|
||||
__io_futex_complete(req, tw);
|
||||
__io_futex_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static bool io_futexv_claim(struct io_futex *iof)
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
|
|||
mutex_lock(&ctx->uring_lock);
|
||||
ts.cancel = io_should_terminate_tw(ctx);
|
||||
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
|
||||
req->io_task_work.func(req, ts);
|
||||
req->io_task_work.func((struct io_tw_req){req}, ts);
|
||||
io_submit_flush_completions(ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
|
|
@ -539,9 +539,9 @@ static void io_queue_iowq(struct io_kiocb *req)
|
|||
io_wq_enqueue(tctx->io_wq, &req->work);
|
||||
}
|
||||
|
||||
static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_req_queue_iowq_tw(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
io_queue_iowq(req);
|
||||
io_queue_iowq(tw_req.req);
|
||||
}
|
||||
|
||||
void io_req_queue_iowq(struct io_kiocb *req)
|
||||
|
|
@ -1166,7 +1166,7 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
|
|||
}
|
||||
INDIRECT_CALL_2(req->io_task_work.func,
|
||||
io_poll_task_func, io_req_rw_complete,
|
||||
req, ts);
|
||||
(struct io_tw_req){req}, ts);
|
||||
node = next;
|
||||
(*count)++;
|
||||
if (unlikely(need_resched())) {
|
||||
|
|
@ -1389,7 +1389,7 @@ static int __io_run_local_work_loop(struct llist_node **node,
|
|||
io_task_work.node);
|
||||
INDIRECT_CALL_2(req->io_task_work.func,
|
||||
io_poll_task_func, io_req_rw_complete,
|
||||
req, tw);
|
||||
(struct io_tw_req){req}, tw);
|
||||
*node = next;
|
||||
if (++ret >= events)
|
||||
break;
|
||||
|
|
@ -1459,14 +1459,17 @@ static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void io_req_task_cancel(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_req_task_cancel(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
|
||||
io_tw_lock(req->ctx, tw);
|
||||
io_req_defer_failed(req, req->cqe.res);
|
||||
}
|
||||
|
||||
void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw)
|
||||
void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_tw_lock(ctx, tw);
|
||||
|
|
@ -1702,9 +1705,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
io_req_complete_defer(req);
|
||||
io_req_complete_defer(tw_req.req);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -149,9 +149,9 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
|||
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
|
||||
void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
|
||||
void io_req_task_queue(struct io_kiocb *req);
|
||||
void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
|
||||
void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
|
||||
void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw);
|
||||
void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
|
||||
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
|
||||
void tctx_task_work(struct callback_head *cb);
|
||||
|
|
|
|||
|
|
@ -70,8 +70,9 @@ static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
|
|||
return target_ctx->task_complete;
|
||||
}
|
||||
|
||||
static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
|
||||
|
|
|
|||
|
|
@ -11,8 +11,9 @@
|
|||
|
||||
static const struct ubuf_info_ops io_ubuf_ops;
|
||||
|
||||
static void io_notif_tw_complete(struct io_kiocb *notif, io_tw_token_t tw)
|
||||
static void io_notif_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *notif = tw_req.req;
|
||||
struct io_notif_data *nd = io_notif_to_data(notif);
|
||||
struct io_ring_ctx *ctx = notif->ctx;
|
||||
|
||||
|
|
@ -34,7 +35,7 @@ static void io_notif_tw_complete(struct io_kiocb *notif, io_tw_token_t tw)
|
|||
}
|
||||
|
||||
nd = nd->next;
|
||||
io_req_task_complete(notif, tw);
|
||||
io_req_task_complete((struct io_tw_req){notif}, tw);
|
||||
} while (nd);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -310,8 +310,9 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
|
|||
return IOU_POLL_NO_ACTION;
|
||||
}
|
||||
|
||||
void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
|
||||
void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
int ret;
|
||||
|
||||
ret = io_poll_check_events(req, tw);
|
||||
|
|
@ -332,7 +333,7 @@ void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
|
|||
poll = io_kiocb_to_cmd(req, struct io_poll);
|
||||
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
|
||||
} else if (ret == IOU_POLL_REISSUE) {
|
||||
io_req_task_submit(req, tw);
|
||||
io_req_task_submit(tw_req, tw);
|
||||
return;
|
||||
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
|
||||
req->cqe.res = ret;
|
||||
|
|
@ -340,14 +341,14 @@ void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
|
|||
}
|
||||
|
||||
io_req_set_res(req, req->cqe.res, 0);
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
} else {
|
||||
io_tw_lock(req->ctx, tw);
|
||||
|
||||
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
|
||||
io_req_task_submit(req, tw);
|
||||
io_req_task_submit(tw_req, tw);
|
||||
else
|
||||
io_req_defer_failed(req, ret);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,4 +46,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
|||
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
|
||||
void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw);
|
||||
void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
|
|
|
|||
|
|
@ -564,8 +564,9 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
|
|||
return res;
|
||||
}
|
||||
|
||||
void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct kiocb *kiocb = &rw->kiocb;
|
||||
|
||||
|
|
@ -581,7 +582,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
|
|||
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
|
||||
|
||||
io_req_rw_cleanup(req, 0);
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags);
|
|||
int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags);
|
||||
void io_readv_writev_cleanup(struct io_kiocb *req);
|
||||
void io_rw_fail(struct io_kiocb *req);
|
||||
void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw);
|
||||
void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
|
||||
void io_rw_cache_free(const void *entry);
|
||||
|
|
|
|||
|
|
@ -68,8 +68,9 @@ static inline bool io_timeout_finish(struct io_timeout *timeout,
|
|||
|
||||
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
|
||||
|
||||
static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_timeout_complete(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_timeout_data *data = req->async_data;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
|
@ -85,7 +86,7 @@ static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw)
|
|||
}
|
||||
}
|
||||
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
|
||||
|
|
@ -157,8 +158,10 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
|
|||
io_flush_killed_timeouts(&list, 0);
|
||||
}
|
||||
|
||||
static void io_req_tw_fail_links(struct io_kiocb *link, io_tw_token_t tw)
|
||||
static void io_req_tw_fail_links(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *link = tw_req.req;
|
||||
|
||||
io_tw_lock(link->ctx, tw);
|
||||
while (link) {
|
||||
struct io_kiocb *nxt = link->link;
|
||||
|
|
@ -168,7 +171,7 @@ static void io_req_tw_fail_links(struct io_kiocb *link, io_tw_token_t tw)
|
|||
res = link->cqe.res;
|
||||
link->link = NULL;
|
||||
io_req_set_res(link, res, 0);
|
||||
io_req_task_complete(link, tw);
|
||||
io_req_task_complete((struct io_tw_req){link}, tw);
|
||||
link = nxt;
|
||||
}
|
||||
}
|
||||
|
|
@ -317,8 +320,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_req_task_link_timeout(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_kiocb *prev = timeout->prev;
|
||||
int ret;
|
||||
|
|
@ -335,11 +339,11 @@ static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
|
|||
ret = -ECANCELED;
|
||||
}
|
||||
io_req_set_res(req, ret ?: -ETIME, 0);
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
io_put_req(prev);
|
||||
} else {
|
||||
io_req_set_res(req, -ETIME, 0);
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,8 +113,9 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
|
||||
|
||||
static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_uring_cmd_work(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
#include "waitid.h"
|
||||
#include "../kernel/exit.h"
|
||||
|
||||
static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw);
|
||||
static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
|
||||
#define IO_WAITID_CANCEL_FLAG BIT(31)
|
||||
#define IO_WAITID_REF_MASK GENMASK(30, 0)
|
||||
|
|
@ -194,8 +194,9 @@ static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw)
|
||||
static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw)
|
||||
{
|
||||
struct io_kiocb *req = tw_req.req;
|
||||
struct io_waitid_async *iwa = req->async_data;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
|
|
@ -229,7 +230,7 @@ static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw)
|
|||
}
|
||||
|
||||
io_waitid_complete(req, ret);
|
||||
io_req_task_complete(req, tw);
|
||||
io_req_task_complete(tw_req, tw);
|
||||
}
|
||||
|
||||
static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
|
||||
|
|
|
|||
Loading…
Reference in New Issue