idpf: implement Rx path for AF_XDP

Implement Rx packet processing specific to AF_XDP ZC using the libeth
XSk infra. Initialize queue registers before allocating buffers to
avoid redundant ifs when updating the queue tail.

Co-developed-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Tested-by: Ramu R <ramu.r@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
Alexander Lobakin 2025-09-11 18:22:32 +02:00 committed by Tony Nguyen
parent 8ff6d62261
commit 9705d6552f
6 changed files with 405 additions and 21 deletions

View File

@ -1424,16 +1424,16 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto queues_rel; goto queues_rel;
} }
err = idpf_rx_bufs_init_all(vport); err = idpf_queue_reg_init(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto queues_rel; goto queues_rel;
} }
err = idpf_queue_reg_init(vport); err = idpf_rx_bufs_init_all(vport);
if (err) { if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err); vport->vport_id, err);
goto queues_rel; goto queues_rel;
} }

View File

@ -389,6 +389,11 @@ static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
if (!bufq->buf) if (!bufq->buf)
return; return;
if (idpf_queue_has(XSK, bufq)) {
idpf_xskfq_rel(bufq);
return;
}
/* Free all the bufs allocated and given to hw on Rx queue */ /* Free all the bufs allocated and given to hw on Rx queue */
for (u32 i = 0; i < bufq->desc_count; i++) for (u32 i = 0; i < bufq->desc_count; i++)
idpf_rx_page_rel(&bufq->buf[i]); idpf_rx_page_rel(&bufq->buf[i]);
@ -437,11 +442,14 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
if (!rxq) if (!rxq)
return; return;
libeth_xdp_return_stash(&rxq->xdp); if (!idpf_queue_has(XSK, rxq))
libeth_xdp_return_stash(&rxq->xdp);
if (!idpf_is_queue_model_split(model)) if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq); idpf_rx_buf_rel_all(rxq);
idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
rxq->next_to_alloc = 0; rxq->next_to_alloc = 0;
rxq->next_to_clean = 0; rxq->next_to_clean = 0;
rxq->next_to_use = 0; rxq->next_to_use = 0;
@ -464,6 +472,7 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
return; return;
idpf_rx_buf_rel_bufq(bufq); idpf_rx_buf_rel_bufq(bufq);
idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
bufq->next_to_alloc = 0; bufq->next_to_alloc = 0;
bufq->next_to_clean = 0; bufq->next_to_clean = 0;
@ -751,6 +760,9 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
}; };
int ret; int ret;
if (idpf_queue_has(XSK, bufq))
return idpf_xskfq_init(bufq);
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
if (ret) if (ret)
return ret; return ret;
@ -846,6 +858,8 @@ static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
rxq->next_to_use = 0; rxq->next_to_use = 0;
idpf_queue_set(GEN_CHK, rxq); idpf_queue_set(GEN_CHK, rxq);
idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
return 0; return 0;
} }
@ -871,9 +885,10 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
bufq->next_to_alloc = 0; bufq->next_to_alloc = 0;
bufq->next_to_clean = 0; bufq->next_to_clean = 0;
bufq->next_to_use = 0; bufq->next_to_use = 0;
idpf_queue_set(GEN_CHK, bufq); idpf_queue_set(GEN_CHK, bufq);
idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
return 0; return 0;
} }
@ -3381,9 +3396,9 @@ __idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return 0; return 0;
} }
static bool idpf_rx_process_skb_fields(struct sk_buff *skb, bool idpf_rx_process_skb_fields(struct sk_buff *skb,
const struct libeth_xdp_buff *xdp, const struct libeth_xdp_buff *xdp,
struct libeth_rq_napi_stats *rs) struct libeth_rq_napi_stats *rs)
{ {
struct idpf_rx_queue *rxq; struct idpf_rx_queue *rxq;
@ -4242,7 +4257,9 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
struct idpf_rx_queue *rxq = q_vec->rx[i]; struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q; int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
idpf_xskrq_poll(rxq, budget_per_q) :
idpf_rx_splitq_clean(rxq, budget_per_q);
/* if we clean as many as budgeted, we must not be done */ /* if we clean as many as budgeted, we must not be done */
if (pkts_cleaned_per_q >= budget_per_q) if (pkts_cleaned_per_q >= budget_per_q)
clean_complete = false; clean_complete = false;
@ -4252,8 +4269,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
nid = numa_mem_id(); nid = numa_mem_id();
for (i = 0; i < q_vec->num_bufq; i++) for (i = 0; i < q_vec->num_bufq; i++) {
idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); if (!idpf_queue_has(XSK, q_vec->bufq[i]))
idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
}
return clean_complete; return clean_complete;
} }

View File

@ -141,6 +141,8 @@ do { \
#define IDPF_TX_FLAGS_TUNNEL BIT(3) #define IDPF_TX_FLAGS_TUNNEL BIT(3)
#define IDPF_TX_FLAGS_TSYN BIT(4) #define IDPF_TX_FLAGS_TSYN BIT(4)
struct libeth_rq_napi_stats;
union idpf_tx_flex_desc { union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */ struct idpf_flex_tx_desc q; /* queue based scheduling */
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
@ -491,6 +493,8 @@ struct idpf_tx_queue_stats {
* @next_to_clean: Next descriptor to clean * @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at * @next_to_alloc: RX buffer to allocate at
* @xdp: XDP buffer with the current frame * @xdp: XDP buffer with the current frame
* @xsk: current XDP buffer in XSk mode
* @pool: XSk pool if installed
* @cached_phc_time: Cached PHC time for the Rx queue * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync * @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats * @q_stats: See union idpf_rx_queue_stats
@ -546,7 +550,13 @@ struct idpf_rx_queue {
u32 next_to_clean; u32 next_to_clean;
u32 next_to_alloc; u32 next_to_alloc;
struct libeth_xdp_buff_stash xdp; union {
struct libeth_xdp_buff_stash xdp;
struct {
struct libeth_xdp_buff *xsk;
struct xsk_buff_pool *pool;
};
};
u64 cached_phc_time; u64 cached_phc_time;
struct u64_stats_sync stats_sync; struct u64_stats_sync stats_sync;
@ -711,16 +721,20 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
/** /**
* struct idpf_buf_queue - software structure representing a buffer queue * struct idpf_buf_queue - software structure representing a buffer queue
* @split_buf: buffer descriptor array * @split_buf: buffer descriptor array
* @hdr_buf: &libeth_fqe for header buffers
* @hdr_pp: &page_pool for header buffers
* @buf: &libeth_fqe for data buffers * @buf: &libeth_fqe for data buffers
* @pp: &page_pool for data buffers * @pp: &page_pool for data buffers
* @xsk_buf: &xdp_buff for XSk Rx buffers
* @pool: &xsk_buff_pool on XSk queues
* @hdr_buf: &libeth_fqe for header buffers
* @hdr_pp: &page_pool for header buffers
* @tail: Tail offset * @tail: Tail offset
* @flags: See enum idpf_queue_flags_t * @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors * @desc_count: Number of descriptors
* @thresh: refill threshold in XSk
* @next_to_use: Next descriptor to use * @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean * @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at * @next_to_alloc: RX buffer to allocate at
* @pending: number of buffers to refill (Xsk)
* @hdr_truesize: truesize for buffer headers * @hdr_truesize: truesize for buffer headers
* @truesize: truesize for data buffers * @truesize: truesize for data buffers
* @q_id: Queue id * @q_id: Queue id
@ -734,14 +748,24 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
struct idpf_buf_queue { struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly); __cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf; struct virtchnl2_splitq_rx_buf_desc *split_buf;
union {
struct {
struct libeth_fqe *buf;
struct page_pool *pp;
};
struct {
struct libeth_xdp_buff **xsk_buf;
struct xsk_buff_pool *pool;
};
};
struct libeth_fqe *hdr_buf; struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp; struct page_pool *hdr_pp;
struct libeth_fqe *buf;
struct page_pool *pp;
void __iomem *tail; void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count; u32 desc_count;
u32 thresh;
__cacheline_group_end_aligned(read_mostly); __cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write); __cacheline_group_begin_aligned(read_write);
@ -749,6 +773,7 @@ struct idpf_buf_queue {
u32 next_to_clean; u32 next_to_clean;
u32 next_to_alloc; u32 next_to_alloc;
u32 pending;
u32 hdr_truesize; u32 hdr_truesize;
u32 truesize; u32 truesize;
__cacheline_group_end_aligned(read_write); __cacheline_group_end_aligned(read_write);
@ -1079,6 +1104,9 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count); u16 cleaned_count);
bool idpf_rx_process_skb_fields(struct sk_buff *skb,
const struct libeth_xdp_buff *xdp,
struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq); void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);

View File

@ -46,7 +46,6 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
{ {
const struct idpf_vport *vport = rxq->q_vector->vport; const struct idpf_vport *vport = rxq->q_vector->vport;
bool split = idpf_is_queue_model_split(vport->rxq_model); bool split = idpf_is_queue_model_split(vport->rxq_model);
const struct page_pool *pp;
int err; int err;
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
@ -55,8 +54,18 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (err) if (err)
return err; return err;
pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp; if (idpf_queue_has(XSK, rxq)) {
xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp); err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
rxq->pool);
if (err)
goto unreg;
} else {
const struct page_pool *pp;
pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
}
if (!split) if (!split)
return 0; return 0;
@ -65,6 +74,11 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
rxq->num_xdp_txq = vport->num_xdp_txq; rxq->num_xdp_txq = vport->num_xdp_txq;
return 0; return 0;
unreg:
xdp_rxq_info_unreg(&rxq->xdp_rxq);
return err;
} }
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq) int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
@ -84,7 +98,9 @@ static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
rxq->num_xdp_txq = 0; rxq->num_xdp_txq = 0;
} }
xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq); if (!idpf_queue_has(XSK, rxq))
xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
xdp_rxq_info_unreg(&rxq->xdp_rxq); xdp_rxq_info_unreg(&rxq->xdp_rxq);
return 0; return 0;

View File

@ -9,6 +9,47 @@
static void idpf_xsk_tx_timer(struct work_struct *work); static void idpf_xsk_tx_timer(struct work_struct *work);
static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
struct idpf_rx_queue *rxq)
{
struct xsk_buff_pool *pool;
pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
return;
rxq->pool = pool;
idpf_queue_set(XSK, rxq);
}
static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
struct idpf_buf_queue *bufq)
{
struct xsk_buff_pool *pool;
u32 qid = U32_MAX;
for (u32 i = 0; i < vport->num_rxq_grp; i++) {
const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
if (&grp->splitq.bufq_sets[j].bufq == bufq) {
qid = grp->splitq.rxq_sets[0]->rxq.idx;
goto setup;
}
}
}
setup:
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
return;
bufq->pool = pool;
idpf_queue_set(XSK, bufq);
}
static void idpf_xsk_setup_txq(const struct idpf_vport *vport, static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
struct idpf_tx_queue *txq) struct idpf_tx_queue *txq)
{ {
@ -61,6 +102,12 @@ void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
return; return;
switch (type) { switch (type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
idpf_xsk_setup_rxq(vport, q);
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
idpf_xsk_setup_bufq(vport, q);
break;
case VIRTCHNL2_QUEUE_TYPE_TX: case VIRTCHNL2_QUEUE_TYPE_TX:
idpf_xsk_setup_txq(vport, q); idpf_xsk_setup_txq(vport, q);
break; break;
@ -75,9 +122,25 @@ void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type) void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
{ {
struct idpf_compl_queue *complq; struct idpf_compl_queue *complq;
struct idpf_buf_queue *bufq;
struct idpf_rx_queue *rxq;
struct idpf_tx_queue *txq; struct idpf_tx_queue *txq;
switch (type) { switch (type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
rxq = q;
if (!idpf_queue_has_clear(XSK, rxq))
return;
rxq->pool = NULL;
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
bufq = q;
if (!idpf_queue_has_clear(XSK, bufq))
return;
bufq->pool = NULL;
break;
case VIRTCHNL2_QUEUE_TYPE_TX: case VIRTCHNL2_QUEUE_TYPE_TX:
txq = q; txq = q;
if (!idpf_queue_has_clear(XSK, txq)) if (!idpf_queue_has_clear(XSK, txq))
@ -197,6 +260,31 @@ static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
return done_frames; return done_frames;
} }
static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
{
struct idpf_tx_queue *xdpsq = _xdpsq;
u32 free;
libeth_xdpsq_lock(&xdpsq->xdp_lock);
free = xdpsq->desc_count - xdpsq->pending;
if (free < xdpsq->thresh)
free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
*sq = (struct libeth_xdpsq){
.pool = xdpsq->pool,
.sqes = xdpsq->tx_buf,
.descs = xdpsq->desc_ring,
.count = xdpsq->desc_count,
.lock = &xdpsq->xdp_lock,
.ntu = &xdpsq->next_to_use,
.pending = &xdpsq->pending,
.xdp_tx = &xdpsq->xdp_tx,
};
return free;
}
static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq) static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
{ {
struct idpf_tx_queue *xdpsq = _xdpsq; struct idpf_tx_queue *xdpsq = _xdpsq;
@ -236,8 +324,225 @@ bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
LIBETH_XDP_DEFINE_START(); LIBETH_XDP_DEFINE_START();
LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete); LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
idpf_xdp_tx_xmit);
LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
idpf_xdp_tx_finalize);
LIBETH_XDP_DEFINE_END(); LIBETH_XDP_DEFINE_END();
static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
{
struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
desc = &desc[i];
#ifdef __LIBETH_WORD_ACCESS
*(u64 *)&desc->qword0 = i;
#else
desc->qword0.buf_id = cpu_to_le16(i);
#endif
desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
}
static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
{
struct libeth_xskfq_fp fq = {
.pool = bufq->pool,
.fqes = bufq->xsk_buf,
.descs = bufq->split_buf,
.ntu = bufq->next_to_use,
.count = bufq->desc_count,
};
u32 done;
done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
writel(fq.ntu, bufq->tail);
bufq->next_to_use = fq.ntu;
bufq->pending -= done;
return done == count;
}
static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
{
u32 count, rx_thresh = bufq->thresh;
count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
for (u32 i = 0; i < count; i += rx_thresh) {
if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
return false;
}
return true;
}
int idpf_xskfq_init(struct idpf_buf_queue *bufq)
{
struct libeth_xskfq fq = {
.pool = bufq->pool,
.count = bufq->desc_count,
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
ret = libeth_xskfq_create(&fq);
if (ret)
return ret;
bufq->xsk_buf = fq.fqes;
bufq->pending = fq.pending;
bufq->thresh = fq.thresh;
bufq->rx_buf_size = fq.buf_len;
if (!idpf_xskfq_refill(bufq))
netdev_err(bufq->pool->netdev,
"failed to allocate XSk buffers for qid %d\n",
bufq->pool->queue_id);
bufq->next_to_alloc = bufq->next_to_use;
idpf_queue_clear(HSPLIT_EN, bufq);
bufq->rx_hbuf_size = 0;
return 0;
}
void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
{
struct libeth_xskfq fq = {
.fqes = bufq->xsk_buf,
};
libeth_xskfq_destroy(&fq);
bufq->rx_buf_size = fq.buf_len;
bufq->thresh = fq.thresh;
bufq->pending = fq.pending;
}
struct idpf_xskfq_refill_set {
struct {
struct idpf_buf_queue *q;
u32 buf_id;
u32 pending;
} bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
{
bool ret = true;
for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
struct idpf_buf_queue *bufq = set->bufqs[i].q;
u32 ntc;
if (!bufq)
continue;
ntc = set->bufqs[i].buf_id;
if (unlikely(++ntc == bufq->desc_count))
ntc = 0;
bufq->next_to_clean = ntc;
bufq->pending += set->bufqs[i].pending;
if (bufq->pending > bufq->thresh)
ret &= idpf_xskfq_refill(bufq);
}
return ret;
}
int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
{
struct idpf_xskfq_refill_set set = { };
struct libeth_rq_napi_stats rs = { };
bool wake, gen, fail = false;
u32 ntc = rxq->next_to_clean;
struct libeth_xdp_buff *xdp;
LIBETH_XDP_ONSTACK_BULK(bq);
u32 cnt = rxq->desc_count;
wake = xsk_uses_need_wakeup(rxq->pool);
if (wake)
xsk_clear_rx_need_wakeup(rxq->pool);
gen = idpf_queue_has(GEN_CHK, rxq);
libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
rxq->xdpsqs, rxq->num_xdp_txq);
xdp = rxq->xsk;
while (likely(rs.packets < budget)) {
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
struct idpf_xdp_rx_desc desc __uninitialized;
struct idpf_buf_queue *bufq;
u32 bufq_id, buf_id;
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
idpf_xdp_get_qw0(&desc, rx_desc);
if (idpf_xdp_rx_gen(&desc) != gen)
break;
dma_rmb();
bufq_id = idpf_xdp_rx_bufq(&desc);
bufq = set.bufqs[bufq_id].q;
if (!bufq) {
bufq = &rxq->bufq_sets[bufq_id].bufq;
set.bufqs[bufq_id].q = bufq;
}
idpf_xdp_get_qw1(&desc, rx_desc);
buf_id = idpf_xdp_rx_buf(&desc);
set.bufqs[bufq_id].buf_id = buf_id;
set.bufqs[bufq_id].pending++;
xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
idpf_xdp_rx_len(&desc));
if (unlikely(++ntc == cnt)) {
ntc = 0;
gen = !gen;
idpf_queue_change(GEN_CHK, rxq);
}
if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
continue;
fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
xdp = NULL;
if (fail)
break;
}
idpf_xsk_finalize_rx(&bq);
rxq->next_to_clean = ntc;
rxq->xsk = xdp;
fail |= !idpf_xskfq_refill_set(&set);
u64_stats_update_begin(&rxq->stats_sync);
u64_stats_add(&rxq->q_stats.packets, rs.packets);
u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
u64_stats_update_end(&rxq->stats_sync);
if (!wake)
return unlikely(fail) ? budget : rs.packets;
if (unlikely(fail))
xsk_set_rx_need_wakeup(rxq->pool);
return rs.packets;
}
int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf) int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
{ {
struct xsk_buff_pool *pool = bpf->xsk.pool; struct xsk_buff_pool *pool = bpf->xsk.pool;
@ -245,6 +550,16 @@ int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
bool restart; bool restart;
int ret; int ret;
if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
LIBETH_RX_BUF_STRIDE)) {
NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
"%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
netdev_name(vport->netdev),
LIBETH_RX_BUF_STRIDE, qid,
xsk_pool_get_rx_frame_size(pool));
return -EINVAL;
}
restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev); restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
if (!restart) if (!restart)
goto pool; goto pool;

View File

@ -7,6 +7,8 @@
#include <linux/types.h> #include <linux/types.h>
enum virtchnl2_queue_type; enum virtchnl2_queue_type;
struct idpf_buf_queue;
struct idpf_rx_queue;
struct idpf_tx_queue; struct idpf_tx_queue;
struct idpf_vport; struct idpf_vport;
struct netdev_bpf; struct netdev_bpf;
@ -15,7 +17,11 @@ void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
enum virtchnl2_queue_type type); enum virtchnl2_queue_type type);
void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type); void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
int idpf_xskfq_init(struct idpf_buf_queue *bufq);
void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
void idpf_xsksq_clean(struct idpf_tx_queue *xdpq); void idpf_xsksq_clean(struct idpf_tx_queue *xdpq);
int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq); bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp); int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);