bnxt_en: Refactor bnxt_free_tx_rings() to free per TX ring

Modify bnxt_free_tx_rings() to free the skbs per TX ring.
This will be useful later in the series.

Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://patch.msgid.link/20250213011240.1640031-6-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Somnath Kotur 2025-02-12 17:12:33 -08:00 committed by Jakub Kicinski
parent f33a508c23
commit 09cc58d594
1 changed files with 62 additions and 55 deletions

View File

@ -3314,74 +3314,81 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
return work_done;
}
static void bnxt_free_tx_skbs(struct bnxt *bp)
static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
for (i = 0; i < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
struct sk_buff *skb;
int j, last;
if (idx < bp->tx_nr_rings_xdp &&
tx_buf->action == XDP_REDIRECT) {
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
xdp_return_frame(tx_buf->xdpf);
tx_buf->action = 0;
tx_buf->xdpf = NULL;
i++;
continue;
}
skb = tx_buf->skb;
if (!skb) {
i++;
continue;
}
tx_buf->skb = NULL;
if (tx_buf->is_push) {
dev_kfree_skb(skb);
i += 2;
continue;
}
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
DMA_TO_DEVICE);
last = tx_buf->nr_frags;
i += 2;
for (j = 0; j < last; j++, i++) {
int ring_idx = i & bp->tx_ring_mask;
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
tx_buf = &txr->tx_buf_ring[ring_idx];
dma_unmap_page(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(frag), DMA_TO_DEVICE);
}
dev_kfree_skb(skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
}
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i;
if (!bp->tx_ring)
return;
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
if (!txr->tx_buf_ring)
continue;
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
int k, last;
if (i < bp->tx_nr_rings_xdp &&
tx_buf->action == XDP_REDIRECT) {
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
xdp_return_frame(tx_buf->xdpf);
tx_buf->action = 0;
tx_buf->xdpf = NULL;
j++;
continue;
}
skb = tx_buf->skb;
if (!skb) {
j++;
continue;
}
tx_buf->skb = NULL;
if (tx_buf->is_push) {
dev_kfree_skb(skb);
j += 2;
continue;
}
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
DMA_TO_DEVICE);
last = tx_buf->nr_frags;
j += 2;
for (k = 0; k < last; k++, j++) {
int ring_idx = j & bp->tx_ring_mask;
skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
tx_buf = &txr->tx_buf_ring[ring_idx];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(frag), DMA_TO_DEVICE);
}
dev_kfree_skb(skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
}