Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.17-rc8).

Conflicts:

tools/testing/selftests/drivers/net/bonding/Makefile
  87951b5664 selftests: bonding: add test for passive LACP mode
  c2377f1763 selftests: bonding: add test for LACP actor port priority

Adjacent changes:

drivers/net/ethernet/cadence/macb.h
  fca3dc859b net: macb: remove illusion about TBQPH/RBQPH being per-queue
  89934dbf16 net: macb: Add TAPRIO traffic scheduling support

drivers/net/ethernet/cadence/macb_main.c
  fca3dc859b net: macb: remove illusion about TBQPH/RBQPH being per-queue
  89934dbf16 net: macb: Add TAPRIO traffic scheduling support

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2025-10-01 10:10:50 +02:00
commit f1455695d2
25 changed files with 401 additions and 146 deletions

View File

@ -86,7 +86,7 @@ properties:
items:
- enum: [ ether_clk, hclk, pclk ]
- enum: [ hclk, pclk ]
- const: tx_clk
- enum: [ tx_clk, tsu_clk ]
- enum: [ rx_clk, tsu_clk ]
- const: tsu_clk

View File

@ -4313,7 +4313,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
static void bond_work_cancel_all(struct bonding *bond)
void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);

View File

@ -593,20 +593,22 @@ static int bond_newlink(struct net_device *bond_dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
struct nlattr **data = params->data;
struct nlattr **tb = params->tb;
int err;
err = bond_changelink(bond_dev, tb, data, extack);
if (err < 0)
err = register_netdevice(bond_dev);
if (err)
return err;
err = register_netdevice(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
err = bond_changelink(bond_dev, tb, data, extack);
if (err) {
bond_work_cancel_all(bond);
unregister_netdevice(bond_dev);
}
return err;

View File

@ -865,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
return ENA_HASH_KEY_SIZE;
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_rss *rss = &adapter->ena_dev->rss;
return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,

View File

@ -220,10 +220,8 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@ -1246,10 +1244,8 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
unsigned int RBQPH;
/* ENST register offsets for this queue */
unsigned int ENST_START_TIME;

View File

@ -52,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@ -279,9 +275,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@ -496,19 +492,19 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/* Single register for all queues' high 32 bits. */
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
macb_writel(bp, RBQPH,
upper_32_bits(bp->queues[0].rx_ring_dma));
macb_writel(bp, TBQPH,
upper_32_bits(bp->queues[0].tx_ring_dma));
}
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, RBQPH,
upper_32_bits(queue->rx_ring_dma));
#endif
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH,
upper_32_bits(queue->tx_ring_dma));
#endif
}
}
@ -1167,10 +1163,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@ -2475,35 +2467,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
{
return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
}
static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
{
return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
}
static void macb_free_consistent(struct macb *bp)
{
struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
int size;
size_t size;
if (bp->rx_ring_tieoff) {
dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
if (queue->tx_ring) {
size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
dma_free_coherent(&bp->pdev->dev, size,
queue->tx_ring, queue->tx_ring_dma);
queue->tx_ring = NULL;
}
if (queue->rx_ring) {
size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
dma_free_coherent(&bp->pdev->dev, size,
queue->rx_ring, queue->rx_ring_dma);
queue->rx_ring = NULL;
}
queue->tx_ring = NULL;
queue->rx_ring = NULL;
}
}
@ -2545,35 +2544,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
struct device *dev = &bp->pdev->dev;
dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
int size;
void *tx, *rx;
size_t size;
/*
* Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
* We cannot enforce this guarantee, the best we can do is do a single
* allocation and hope it will land into alloc_pages() that guarantees
* natural alignment of physical addresses.
*/
size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
goto out_err;
netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
size, bp->num_queues, (unsigned long)tx_dma, tx);
size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
goto out_err;
netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->tx_ring_dma,
GFP_KERNEL);
if (!queue->tx_ring)
goto out_err;
netdev_dbg(bp->dev,
"Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
q, size, (unsigned long)queue->tx_ring_dma,
queue->tx_ring);
queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->rx_ring_dma, GFP_KERNEL);
if (!queue->rx_ring)
goto out_err;
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@ -4528,12 +4537,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1);
queue->RBQPH = GEM_RBQPH(hw_q - 1);
}
#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@ -4542,12 +4545,6 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH;
queue->RBQPH = MACB_RBQPH;
}
#endif
}
queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
@ -5701,6 +5698,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
macb_writel(bp, RBQPH,
upper_32_bits(bp->rx_ring_tieoff_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@ -5710,10 +5712,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue_writel(queue, RBQPH,
upper_32_bits(bp->rx_ring_tieoff_dma));
#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);

View File

@ -964,15 +964,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
skb = NULL;
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
if (pkt_len <= copy_thresh)
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
} else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,

View File

@ -1032,7 +1032,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
"Could not get VF driver data\n");
"Could not get PF driver data\n");
err = enetc4_pf_struct_init(si);
if (err)

View File

@ -52,24 +52,19 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
cbdr->next_to_clean = 0;
cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
cbdr->next_to_use = netc_read(cbdr->regs.pir);
cbdr->next_to_clean = netc_read(cbdr->regs.cir);
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
/* Step 2: Configure the producer index register */
netc_write(cbdr->regs.pir, cbdr->next_to_clean);
/* Step 3: Configure the consumer index register */
netc_write(cbdr->regs.cir, cbdr->next_to_use);
/* Step4: Configure the number of BDs of the Control BD Ring */
/* Step 2: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
/* Step 5: Enable the Control BD Ring */
/* Step 3: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;

View File

@ -702,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
dmam_free_coherent(&adapter->pdev->dev,
dma_mem->size, dma_mem->va,
dma_mem->pa);
dma_free_coherent(&adapter->pdev->dev,
dma_mem->size, dma_mem->va,
dma_mem->pa);
break;
}

View File

@ -3542,6 +3542,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}

View File

@ -854,6 +854,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}

View File

@ -294,6 +294,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
if (mlx5_cmd_is_down(dev)) {
ent->ret = -ENXIO;
return;
}
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@ -1070,7 +1074,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}

View File

@ -27,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@ -259,6 +260,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
if (current_work() != &fw_reset->reset_timeout_work.work)
cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@ -330,6 +333,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
&fw_reset->reset_flags))
schedule_delayed_work(&fw_reset->reset_timeout_work,
msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@ -739,6 +747,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
static void mlx5_sync_reset_timeout_work(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
work);
struct mlx5_fw_reset *fw_reset =
container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
struct mlx5_core_dev *dev = fw_reset->dev;
if (mlx5_sync_reset_clear_reset_requested(dev, true))
return;
mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
}
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@ -822,6 +843,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@ -865,6 +887,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;

View File

@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
int err;
if (!mlx5_cmd_is_down(dev))
return mlx5_cmd_do(dev, in, in_size, out, out_size);
err = mlx5_cmd_do(dev, in, in_size, out, out_size);
/* If FW is gone (-ENXIO), proceed to forceful reclaim */
if (err != -ENXIO)
return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);

View File

@ -1788,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
return 0;
return nfp_net_rss_key_sz(nn);
}

View File

@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,

View File

@ -22,6 +22,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
#define PTP_DEFAULT_MAX_VCLOCKS 20
#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int)))
#define PTP_MAX_CHANNELS 2048
enum {

View File

@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev,
size_t size;
u32 max;
if (kstrtou32(buf, 0, &max) || max == 0)
if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT)
return -EINVAL;
if (max == ptp->max_vclocks)

View File

@ -1161,6 +1161,7 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
struct iov_iter iter;
u64 translated;
int ret;
size_t size;
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
len - total_translated, &translated,
@ -1178,9 +1179,9 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
translated);
}
ret = copy_to_iter(src, translated, &iter);
if (ret < 0)
return ret;
size = copy_to_iter(src, translated, &iter);
if (size != translated)
return -EFAULT;
src += translated;
dst += translated;

View File

@ -710,6 +710,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
void bond_work_cancel_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);

View File

@ -27,11 +27,16 @@
/* Handle NCI Notification packets */
static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_core_reset_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
/* Handle NCI 2.x core reset notification */
const struct nci_core_reset_ntf *ntf = (void *)skb->data;
const struct nci_core_reset_ntf *ntf;
if (skb->len < sizeof(struct nci_core_reset_ntf))
return -EINVAL;
ntf = (struct nci_core_reset_ntf *)skb->data;
ndev->nci_ver = ntf->nci_ver;
pr_debug("nci_ver 0x%x, config_status 0x%x\n",
@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
__le32_to_cpu(ntf->manufact_specific_info);
nci_req_complete(ndev, NCI_STATUS_OK);
return 0;
}
static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
struct nci_core_conn_credit_ntf *ntf;
struct nci_conn_info *conn_info;
int i;
if (skb->len < sizeof(struct nci_core_conn_credit_ntf))
return -EINVAL;
ntf = (struct nci_core_conn_credit_ntf *)skb->data;
pr_debug("num_entries %d\n", ntf->num_entries);
if (ntf->num_entries > NCI_MAX_NUM_CONN)
@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
conn_info = nci_get_conn_info_by_conn_id(ndev,
ntf->conn_entries[i].conn_id);
if (!conn_info)
return;
return 0;
atomic_add(ntf->conn_entries[i].credits,
&conn_info->credits_cnt);
@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
/* trigger the next tx */
if (!skb_queue_empty(&ndev->tx_q))
queue_work(ndev->tx_wq, &ndev->tx_work);
return 0;
}
static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
__u8 status = skb->data[0];
__u8 status;
if (skb->len < 1)
return -EINVAL;
status = skb->data[0];
pr_debug("status 0x%x\n", status);
@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
(the state remains the same) */
nci_req_complete(ndev, status);
}
return 0;
}
static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
struct nci_core_intf_error_ntf *ntf;
if (skb->len < sizeof(struct nci_core_intf_error_ntf))
return -EINVAL;
ntf = (struct nci_core_intf_error_ntf *)skb->data;
ntf->conn_id = nci_conn_id(&ntf->conn_id);
@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
return 0;
}
static const __u8 *
@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_dev *ndev)
ndev->n_targets = 0;
}
static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_rf_discover_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
struct nci_rf_discover_ntf ntf;
const __u8 *data = skb->data;
const __u8 *data;
bool add_target = true;
if (skb->len < sizeof(struct nci_rf_discover_ntf))
return -EINVAL;
data = skb->data;
ntf.rf_discovery_id = *data++;
ntf.rf_protocol = *data++;
ntf.rf_tech_and_mode = *data++;
@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, ndev->targets,
ndev->n_targets);
}
return 0;
}
static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
@ -553,14 +588,19 @@ static int nci_store_ats_nfc_iso_dep(struct nci_dev *ndev,
return NCI_STATUS_OK;
}
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
struct nci_conn_info *conn_info;
struct nci_rf_intf_activated_ntf ntf;
const __u8 *data = skb->data;
const __u8 *data;
int err = NCI_STATUS_OK;
if (skb->len < sizeof(struct nci_rf_intf_activated_ntf))
return -EINVAL;
data = skb->data;
ntf.rf_discovery_id = *data++;
ntf.rf_interface = *data++;
ntf.rf_protocol = *data++;
@ -667,7 +707,7 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
if (err == NCI_STATUS_OK) {
conn_info = ndev->rf_conn_info;
if (!conn_info)
return;
return 0;
conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
conn_info->initial_num_credits = ntf.initial_num_credits;
@ -721,19 +761,26 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
pr_err("error when signaling tm activation\n");
}
}
return 0;
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
const struct nci_conn_info *conn_info;
const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data;
const struct nci_rf_deactivate_ntf *ntf;
if (skb->len < sizeof(struct nci_rf_deactivate_ntf))
return -EINVAL;
ntf = (struct nci_rf_deactivate_ntf *)skb->data;
pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
conn_info = ndev->rf_conn_info;
if (!conn_info)
return;
return 0;
/* drop tx data queue */
skb_queue_purge(&ndev->tx_q);
@ -765,14 +812,20 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
}
nci_req_complete(ndev, NCI_STATUS_OK);
return 0;
}
static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
const struct sk_buff *skb)
{
u8 status = NCI_STATUS_OK;
const struct nci_nfcee_discover_ntf *nfcee_ntf =
(struct nci_nfcee_discover_ntf *)skb->data;
const struct nci_nfcee_discover_ntf *nfcee_ntf;
if (skb->len < sizeof(struct nci_nfcee_discover_ntf))
return -EINVAL;
nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data;
/* NFCForum NCI 9.2.1 HCI Network Specific Handling
* If the NFCC supports the HCI Network, it SHALL return one,
@ -783,6 +836,8 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
ndev->cur_params.id = nfcee_ntf->nfcee_id;
nci_req_complete(ndev, status);
return 0;
}
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@ -809,35 +864,43 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
switch (ntf_opcode) {
case NCI_OP_CORE_RESET_NTF:
nci_core_reset_ntf_packet(ndev, skb);
if (nci_core_reset_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_CORE_CONN_CREDITS_NTF:
nci_core_conn_credits_ntf_packet(ndev, skb);
if (nci_core_conn_credits_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_CORE_GENERIC_ERROR_NTF:
nci_core_generic_error_ntf_packet(ndev, skb);
if (nci_core_generic_error_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_CORE_INTF_ERROR_NTF:
nci_core_conn_intf_error_ntf_packet(ndev, skb);
if (nci_core_conn_intf_error_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_RF_DISCOVER_NTF:
nci_rf_discover_ntf_packet(ndev, skb);
if (nci_rf_discover_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_RF_INTF_ACTIVATED_NTF:
nci_rf_intf_activated_ntf_packet(ndev, skb);
if (nci_rf_intf_activated_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_RF_DEACTIVATE_NTF:
nci_rf_deactivate_ntf_packet(ndev, skb);
if (nci_rf_deactivate_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_NFCEE_DISCOVER_NTF:
nci_nfcee_discover_ntf_packet(ndev, skb);
if (nci_nfcee_discover_ntf_packet(ndev, skb))
goto end;
break;
case NCI_OP_RF_NFCEE_ACTION_NTF:

View File

@ -13,6 +13,7 @@ TEST_PROGS := \
bond_macvlan_ipvlan.sh \
bond_passive_lacp.sh \
bond_lacp_prio.sh
bond_ipsec_offload.sh
TEST_FILES := \
lag_lib.sh \

View File

@ -0,0 +1,156 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# IPsec over bonding offload test:
#
# +----------------+
# | bond0 |
# | | |
# | eth0 eth1 |
# +---+-------+----+
#
# We use netdevsim instead of physical interfaces
#-------------------------------------------------------------------
# Example commands
# ip x s add proto esp src 192.0.2.1 dst 192.0.2.2 \
# spi 0x07 mode transport reqid 0x07 replay-window 32 \
# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
# sel src 192.0.2.1/24 dst 192.0.2.2/24
# offload dev bond0 dir out
# ip x p add dir out src 192.0.2.1/24 dst 192.0.2.2/24 \
# tmpl proto esp src 192.0.2.1 dst 192.0.2.2 \
# spi 0x07 mode transport reqid 0x07
#
#-------------------------------------------------------------------
lib_dir=$(dirname "$0")
# shellcheck disable=SC1091
source "$lib_dir"/../../../net/lib.sh
srcip=192.0.2.1
dstip=192.0.2.2
ipsec0=/sys/kernel/debug/netdevsim/netdevsim0/ports/0/ipsec
ipsec1=/sys/kernel/debug/netdevsim/netdevsim0/ports/1/ipsec
active_slave=""
# shellcheck disable=SC2317
active_slave_changed()
{
local old_active_slave=$1
local new_active_slave
# shellcheck disable=SC2154
new_active_slave=$(ip -n "${ns}" -d -j link show bond0 | \
jq -r ".[].linkinfo.info_data.active_slave")
[ "$new_active_slave" != "$old_active_slave" ] && [ "$new_active_slave" != "null" ]
}
test_offload()
{
# use ping to exercise the Tx path
ip netns exec "$ns" ping -I bond0 -c 3 -W 1 -i 0 "$dstip" >/dev/null
active_slave=$(ip -n "${ns}" -d -j link show bond0 | \
jq -r ".[].linkinfo.info_data.active_slave")
if [ "$active_slave" = "$nic0" ]; then
sysfs=$ipsec0
elif [ "$active_slave" = "$nic1" ]; then
sysfs=$ipsec1
else
check_err 1 "bond_ipsec_offload invalid active_slave $active_slave"
fi
# The tx/rx order in sysfs may changed after failover
grep -q "SA count=2 tx=3" "$sysfs" && grep -q "tx ipaddr=$dstip" "$sysfs"
check_err $? "incorrect tx count with link ${active_slave}"
log_test bond_ipsec_offload "active_slave ${active_slave}"
}
setup_env()
{
if ! mount | grep -q debugfs; then
mount -t debugfs none /sys/kernel/debug/ &> /dev/null
defer umount /sys/kernel/debug/
fi
# setup netdevsim since dummy/veth dev doesn't have offload support
if [ ! -w /sys/bus/netdevsim/new_device ] ; then
if ! modprobe -q netdevsim; then
echo "SKIP: can't load netdevsim for ipsec offload"
# shellcheck disable=SC2154
exit "$ksft_skip"
fi
defer modprobe -r netdevsim
fi
setup_ns ns
defer cleanup_ns "$ns"
}
setup_bond()
{
ip -n "$ns" link add bond0 type bond mode active-backup miimon 100
ip -n "$ns" addr add "$srcip/24" dev bond0
ip -n "$ns" link set bond0 up
echo "0 2" | ip netns exec "$ns" tee /sys/bus/netdevsim/new_device >/dev/null
nic0=$(ip netns exec "$ns" ls /sys/bus/netdevsim/devices/netdevsim0/net | head -n 1)
nic1=$(ip netns exec "$ns" ls /sys/bus/netdevsim/devices/netdevsim0/net | tail -n 1)
ip -n "$ns" link set "$nic0" master bond0
ip -n "$ns" link set "$nic1" master bond0
# we didn't create a peer, make sure we can Tx by adding a permanent
# neighbour this need to be added after enslave
ip -n "$ns" neigh add "$dstip" dev bond0 lladdr 00:11:22:33:44:55
# create offloaded SAs, both in and out
ip -n "$ns" x p add dir out src "$srcip/24" dst "$dstip/24" \
tmpl proto esp src "$srcip" dst "$dstip" spi 9 \
mode transport reqid 42
ip -n "$ns" x p add dir in src "$dstip/24" dst "$srcip/24" \
tmpl proto esp src "$dstip" dst "$srcip" spi 9 \
mode transport reqid 42
ip -n "$ns" x s add proto esp src "$srcip" dst "$dstip" spi 9 \
mode transport reqid 42 aead "rfc4106(gcm(aes))" \
0x3132333435363738393031323334353664636261 128 \
sel src "$srcip/24" dst "$dstip/24" \
offload dev bond0 dir out
ip -n "$ns" x s add proto esp src "$dstip" dst "$srcip" spi 9 \
mode transport reqid 42 aead "rfc4106(gcm(aes))" \
0x3132333435363738393031323334353664636261 128 \
sel src "$dstip/24" dst "$srcip/24" \
offload dev bond0 dir in
# does offload show up in ip output
lines=$(ip -n "$ns" x s list | grep -c "crypto offload parameters: dev bond0 dir")
if [ "$lines" -ne 2 ] ; then
check_err 1 "bond_ipsec_offload SA offload missing from list output"
fi
}
trap defer_scopes_cleanup EXIT
setup_env
setup_bond
# start Offload testing
test_offload
# do failover and re-test
ip -n "$ns" link set "$active_slave" down
slowwait 5 active_slave_changed "$active_slave"
test_offload
# make sure offload get removed from driver
ip -n "$ns" x s flush
ip -n "$ns" x p flush
line0=$(grep -c "SA count=0" "$ipsec0")
line1=$(grep -c "SA count=0" "$ipsec1")
[ "$line0" -ne 1 ] || [ "$line1" -ne 1 ]
check_fail $? "bond_ipsec_offload SA not removed from driver"
exit "$EXIT_STATUS"

View File

@ -11,3 +11,7 @@ CONFIG_NET_SCH_INGRESS=y
CONFIG_NLMON=y
CONFIG_VETH=y
CONFIG_VLAN_8021Q=m
CONFIG_INET_ESP=y
CONFIG_INET_ESP_OFFLOAD=y
CONFIG_XFRM_USER=m
CONFIG_NETDEVSIM=m