mirror of https://github.com/torvalds/linux.git
io_uring: add support for IORING_SETUP_SQE_MIXED
Normal rings support 64b SQEs for posting submissions, while certain
features require the ring to be configured with IORING_SETUP_SQE128, as
they need to convey more information per submission. This, in turn,
makes ALL the SQEs be 128b in size. This is somewhat wasteful and
inefficient, particularly when only certain SQEs need to be of the
bigger variant.
This adds support for setting up a ring with mixed SQE sizes, using
IORING_SETUP_SQE_MIXED. When setup in this mode, SQEs posted to the ring
may be either 64b or 128b in size. If a SQE is 128b in size, then opcode
will be set to a variante to indicate that this is the case. Any other
non-128b opcode will assume the SQ's default size.
SQEs on these types of mixed rings may also utilize NOP with skip
success set. This can happen if the ring is one (small) SQE entry away
from wrapping, and an attempt is made to get a 128b SQE. As SQEs must be
contiguous in the SQ ring, a 128b SQE cannot wrap the ring. For this
case, a single NOP SQE should be inserted with the SKIP_SUCCESS flag
set. The kernel will process this as a normal NOP and without posting a
CQE.
Signed-off-by: Keith Busch <kbusch@kernel.org>
[axboe: {} style fix and assign sqe before opcode read]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5b6d8a032e
commit
1cba30bf9f
|
|
@ -231,6 +231,12 @@ enum io_uring_sqe_flags_bit {
|
|||
*/
|
||||
#define IORING_SETUP_CQE_MIXED (1U << 18)
|
||||
|
||||
/*
|
||||
* Allow both 64b and 128b SQEs. If a 128b SQE is posted, it will have
|
||||
* a 128b opcode.
|
||||
*/
|
||||
#define IORING_SETUP_SQE_MIXED (1U << 19)
|
||||
|
||||
enum io_uring_op {
|
||||
IORING_OP_NOP,
|
||||
IORING_OP_READV,
|
||||
|
|
@ -295,6 +301,8 @@ enum io_uring_op {
|
|||
IORING_OP_READV_FIXED,
|
||||
IORING_OP_WRITEV_FIXED,
|
||||
IORING_OP_PIPE,
|
||||
IORING_OP_NOP128,
|
||||
IORING_OP_URING_CMD128,
|
||||
|
||||
/* this goes last, obviously */
|
||||
IORING_OP_LAST,
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include "fdinfo.h"
|
||||
#include "cancel.h"
|
||||
#include "rsrc.h"
|
||||
#include "opdef.h"
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx,
|
||||
|
|
@ -66,7 +67,6 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
|
|||
unsigned int cq_head = READ_ONCE(r->cq.head);
|
||||
unsigned int cq_tail = READ_ONCE(r->cq.tail);
|
||||
unsigned int sq_shift = 0;
|
||||
unsigned int sq_entries;
|
||||
int sq_pid = -1, sq_cpu = -1;
|
||||
u64 sq_total_time = 0, sq_work_time = 0;
|
||||
unsigned int i;
|
||||
|
|
@ -89,26 +89,45 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
|
|||
seq_printf(m, "CqTail:\t%u\n", cq_tail);
|
||||
seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
|
||||
seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
|
||||
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
|
||||
for (i = 0; i < sq_entries; i++) {
|
||||
unsigned int entry = i + sq_head;
|
||||
while (sq_head < sq_tail) {
|
||||
struct io_uring_sqe *sqe;
|
||||
unsigned int sq_idx;
|
||||
bool sqe128 = false;
|
||||
u8 opcode;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_NO_SQARRAY)
|
||||
break;
|
||||
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
|
||||
sq_idx = READ_ONCE(ctx->sq_array[sq_head & sq_mask]);
|
||||
if (sq_idx > sq_mask)
|
||||
continue;
|
||||
|
||||
sqe = &ctx->sq_sqes[sq_idx << sq_shift];
|
||||
opcode = READ_ONCE(sqe->opcode);
|
||||
if (sq_shift) {
|
||||
sqe128 = true;
|
||||
} else if (io_issue_defs[opcode].is_128) {
|
||||
if (!(ctx->flags & IORING_SETUP_SQE_MIXED)) {
|
||||
seq_printf(m,
|
||||
"%5u: invalid sqe, 128B entry on non-mixed sq\n",
|
||||
sq_idx);
|
||||
break;
|
||||
}
|
||||
if ((++sq_head & sq_mask) == 0) {
|
||||
seq_printf(m,
|
||||
"%5u: corrupted sqe, wrapping 128B entry\n",
|
||||
sq_idx);
|
||||
break;
|
||||
}
|
||||
sqe128 = true;
|
||||
}
|
||||
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
|
||||
"addr:0x%llx, rw_flags:0x%x, buf_index:%d "
|
||||
"user_data:%llu",
|
||||
sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
|
||||
sq_idx, io_uring_get_opcode(opcode), sqe->fd,
|
||||
sqe->flags, (unsigned long long) sqe->off,
|
||||
(unsigned long long) sqe->addr, sqe->rw_flags,
|
||||
sqe->buf_index, sqe->user_data);
|
||||
if (sq_shift) {
|
||||
if (sqe128) {
|
||||
u64 *sqeb = (void *) (sqe + 1);
|
||||
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
|
||||
int j;
|
||||
|
|
@ -120,6 +139,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
|
|||
}
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
sq_head++;
|
||||
}
|
||||
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
|
||||
while (cq_head < cq_tail) {
|
||||
|
|
|
|||
|
|
@ -2164,7 +2164,7 @@ static __cold int io_init_fail_req(struct io_kiocb *req, int err)
|
|||
}
|
||||
|
||||
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
const struct io_uring_sqe *sqe, unsigned int *left)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
const struct io_issue_def *def;
|
||||
|
|
@ -2190,6 +2190,24 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||
opcode = array_index_nospec(opcode, IORING_OP_LAST);
|
||||
|
||||
def = &io_issue_defs[opcode];
|
||||
if (def->is_128 && !(ctx->flags & IORING_SETUP_SQE128)) {
|
||||
/*
|
||||
* A 128b op on a non-128b SQ requires mixed SQE support as
|
||||
* well as 2 contiguous entries.
|
||||
*/
|
||||
if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 ||
|
||||
!(ctx->cached_sq_head & (ctx->sq_entries - 1)))
|
||||
return io_init_fail_req(req, -EINVAL);
|
||||
/*
|
||||
* A 128b operation on a mixed SQ uses two entries, so we have
|
||||
* to increment the head and cached refs, and decrement what's
|
||||
* left.
|
||||
*/
|
||||
current->io_uring->cached_refs++;
|
||||
ctx->cached_sq_head++;
|
||||
(*left)--;
|
||||
}
|
||||
|
||||
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
|
||||
/* enforce forwards compatibility on users */
|
||||
if (sqe_flags & ~SQE_VALID_FLAGS)
|
||||
|
|
@ -2299,13 +2317,13 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
|
|||
}
|
||||
|
||||
static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
const struct io_uring_sqe *sqe, unsigned int *left)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
struct io_submit_link *link = &ctx->submit_state.link;
|
||||
int ret;
|
||||
|
||||
ret = io_init_req(ctx, req, sqe);
|
||||
ret = io_init_req(ctx, req, sqe, left);
|
||||
if (unlikely(ret))
|
||||
return io_submit_fail_init(sqe, req, ret);
|
||||
|
||||
|
|
@ -2457,7 +2475,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
|
|||
* Continue submitting even for sqe failure if the
|
||||
* ring was setup with IORING_SETUP_SUBMIT_ALL
|
||||
*/
|
||||
if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
|
||||
if (unlikely(io_submit_sqe(ctx, req, sqe, &left)) &&
|
||||
!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
|
||||
left--;
|
||||
break;
|
||||
|
|
@ -2802,6 +2820,10 @@ unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
|||
if (cq_entries < 2)
|
||||
return SIZE_MAX;
|
||||
}
|
||||
if (flags & IORING_SETUP_SQE_MIXED) {
|
||||
if (sq_entries < 2)
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
off = ALIGN(off, SMP_CACHE_BYTES);
|
||||
|
|
@ -3726,6 +3748,13 @@ static int io_uring_sanitise_params(struct io_uring_params *p)
|
|||
if ((flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)) ==
|
||||
(IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Nonsensical to ask for SQE128 and mixed SQE support, it's not
|
||||
* supported to post 64b SQEs on a ring setup with SQE128.
|
||||
*/
|
||||
if ((flags & (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED)) ==
|
||||
(IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,8 @@
|
|||
IORING_SETUP_REGISTERED_FD_ONLY |\
|
||||
IORING_SETUP_NO_SQARRAY |\
|
||||
IORING_SETUP_HYBRID_IOPOLL |\
|
||||
IORING_SETUP_CQE_MIXED)
|
||||
IORING_SETUP_CQE_MIXED |\
|
||||
IORING_SETUP_SQE_MIXED)
|
||||
|
||||
#define IORING_ENTER_FLAGS (IORING_ENTER_GETEVENTS |\
|
||||
IORING_ENTER_SQ_WAKEUP |\
|
||||
|
|
@ -565,17 +566,6 @@ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
|||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
|
||||
* slot.
|
||||
*/
|
||||
static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (ctx->flags & IORING_SETUP_SQE128)
|
||||
return 2 * sizeof(struct io_uring_sqe);
|
||||
return sizeof(struct io_uring_sqe);
|
||||
}
|
||||
|
||||
static inline bool io_file_can_poll(struct io_kiocb *req)
|
||||
{
|
||||
if (req->flags & REQ_F_CAN_POLL)
|
||||
|
|
|
|||
|
|
@ -575,6 +575,24 @@ const struct io_issue_def io_issue_defs[] = {
|
|||
.prep = io_pipe_prep,
|
||||
.issue = io_pipe,
|
||||
},
|
||||
[IORING_OP_NOP128] = {
|
||||
.audit_skip = 1,
|
||||
.iopoll = 1,
|
||||
.is_128 = 1,
|
||||
.prep = io_nop_prep,
|
||||
.issue = io_nop,
|
||||
},
|
||||
[IORING_OP_URING_CMD128] = {
|
||||
.buffer_select = 1,
|
||||
.needs_file = 1,
|
||||
.plug = 1,
|
||||
.iopoll = 1,
|
||||
.iopoll_queue = 1,
|
||||
.is_128 = 1,
|
||||
.async_size = sizeof(struct io_async_cmd),
|
||||
.prep = io_uring_cmd_prep,
|
||||
.issue = io_uring_cmd,
|
||||
},
|
||||
};
|
||||
|
||||
const struct io_cold_def io_cold_defs[] = {
|
||||
|
|
@ -825,6 +843,14 @@ const struct io_cold_def io_cold_defs[] = {
|
|||
[IORING_OP_PIPE] = {
|
||||
.name = "PIPE",
|
||||
},
|
||||
[IORING_OP_NOP128] = {
|
||||
.name = "NOP128",
|
||||
},
|
||||
[IORING_OP_URING_CMD128] = {
|
||||
.name = "URING_CMD128",
|
||||
.sqe_copy = io_uring_cmd_sqe_copy,
|
||||
.cleanup = io_uring_cmd_cleanup,
|
||||
},
|
||||
};
|
||||
|
||||
const char *io_uring_get_opcode(u8 opcode)
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ struct io_issue_def {
|
|||
unsigned iopoll_queue : 1;
|
||||
/* vectored opcode, set if 1) vectored, and 2) handler needs to know */
|
||||
unsigned vectored : 1;
|
||||
/* set to 1 if this opcode uses 128b sqes in a mixed sq */
|
||||
unsigned is_128 : 1;
|
||||
|
||||
/* size of async data needed, if any */
|
||||
unsigned short async_size;
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
|
|||
#define RESIZE_FLAGS (IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP)
|
||||
#define COPY_FLAGS (IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQE128 | \
|
||||
IORING_SETUP_CQE32 | IORING_SETUP_NO_MMAP | \
|
||||
IORING_SETUP_CQE_MIXED)
|
||||
IORING_SETUP_CQE_MIXED | IORING_SETUP_SQE_MIXED)
|
||||
|
||||
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -216,6 +216,18 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
|
||||
* slot.
|
||||
*/
|
||||
static inline size_t uring_sqe_size(struct io_kiocb *req)
|
||||
{
|
||||
if (req->ctx->flags & IORING_SETUP_SQE128 ||
|
||||
req->opcode == IORING_OP_URING_CMD128)
|
||||
return 2 * sizeof(struct io_uring_sqe);
|
||||
return sizeof(struct io_uring_sqe);
|
||||
}
|
||||
|
||||
void io_uring_cmd_sqe_copy(struct io_kiocb *req)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
|
|
@ -224,7 +236,7 @@ void io_uring_cmd_sqe_copy(struct io_kiocb *req)
|
|||
/* Should not happen, as REQ_F_SQE_COPIED covers this */
|
||||
if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes))
|
||||
return;
|
||||
memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
|
||||
memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req));
|
||||
ioucmd->sqe = ac->sqes;
|
||||
}
|
||||
|
||||
|
|
@ -242,7 +254,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_SQE128)
|
||||
if (ctx->flags & IORING_SETUP_SQE128 ||
|
||||
req->opcode == IORING_OP_URING_CMD128)
|
||||
issue_flags |= IO_URING_F_SQE128;
|
||||
if (ctx->flags & (IORING_SETUP_CQE32 | IORING_SETUP_CQE_MIXED))
|
||||
issue_flags |= IO_URING_F_CQE32;
|
||||
|
|
|
|||
Loading…
Reference in New Issue