Including fixes from bluetooth and wireless.

Current release - new code bugs:
 
  - ptp: expose raw cycles only for clocks with free-running counter
 
  - bonding: fix null-deref in actor_port_prio setting
 
  - mdio: ERR_PTR-check regmap pointer returned by device_node_to_regmap()
 
  - eth: libie: depend on DEBUG_FS when building LIBIE_FWLOG
 
 Previous releases - regressions:
 
  - virtio_net: fix perf regression due to bad alignment of
    virtio_net_hdr_v1_hash
 
  - Revert "wifi: ath10k: avoid unnecessary wait for service ready message"
    caused regressions for QCA988x and QCA9984
 
  - Revert "wifi: ath12k: Fix missing station power save configuration"
    caused regressions for WCN7850
 
  - eth: bnxt_en: shutdown FW DMA in bnxt_shutdown(), fix memory
    corruptions after kexec
 
 Previous releases - always broken:
 
  - virtio-net: fix received packet length check for big packets
 
  - sctp: fix races in socket diag handling
 
  - wifi: add an hrtimer-based delayed work item to avoid low granularity
    of timers set relatively far in the future, and use it where it matters
    (e.g. when performing AP-scheduled channel switch)
 
  - eth: mlx5e:
    - correctly propagate error in case of module EEPROM read failure
    - fix HW-GRO on systems with PAGE_SIZE == 64kB
 
  - dsa: b53: fixes for tagging, link configuration / RMII, FDB, multicast
 
  - phy: lan8842: implement latest errata
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmkMyx0ACgkQMUZtbf5S
 Irs4gA//fRGPHfEksbVO0lAR9QVUKq/Wvokt6DnLXQsi7js7kun7ymVFyU8tVacP
 sGoYbTTXO34XpijKVMTFe5WnnnSF3cri1/e+PYNXndKGHOheLvz0aq+CniiLexaT
 ag/opHX9+Io6x8DyOVkkRJYByvEcXgy1vFjhk5O04wn7tGPBNzvaKQJzjfVIiiWP
 thma/e77jVUqsNptS/VzJNCDwPB3Qi0fqylRovu7A59Kmr7GLZxaqZQpvM5/XjU3
 s3NhNjNDawmiwxN2AIztXo+vMReqFaiCr+z36OcruE04CFslkQGmE/5g3FdDGg+Q
 RE7Wfgk2UJ+Q5Y1jnQWNYOkGaMd6bMgPQD/8zZvwEf163Gh1YBh0rtDY07DWV5FR
 wp5cmeNDo2Mf+nnCM2UaoUQS2AAmkub2aAIjnlvrefeJMKciyMqtQe+V02aIxuvK
 BPmeSCN1IOyAIDCRE3Fd6JFyk+4Z4YC6Hdm/WOG517eGrDh3yV9JE/+C3jdh3JwT
 lvaScKhCdyq/9mFqEcU/yR5Kc4Od6Rw0K0s5DM4LAbKSsxI6hp6SkuoFHUsqFsRR
 HL3ucy4c4hmLxz6AA5/+UVIPzFsY5cwOnhs3AU2UVuA95fH0QWqX2u2ll0rC9mAW
 xDXa/ai0/KAGvVYqjC+MDyF8zrtzSG7C40ZsxhdcU84s94ZNVFI=
 =a/Ml
 -----END PGP SIGNATURE-----

Merge tag 'net-6.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
  Including fixes from bluetooth and wireless.

  Current release - new code bugs:

   - ptp: expose raw cycles only for clocks with free-running counter

   - bonding: fix null-deref in actor_port_prio setting

   - mdio: ERR_PTR-check regmap pointer returned by
     device_node_to_regmap()

   - eth: libie: depend on DEBUG_FS when building LIBIE_FWLOG

  Previous releases - regressions:

   - virtio_net: fix perf regression due to bad alignment of
     virtio_net_hdr_v1_hash

   - Revert "wifi: ath10k: avoid unnecessary wait for service ready
     message" caused regressions for QCA988x and QCA9984

   - Revert "wifi: ath12k: Fix missing station power save configuration"
     caused regressions for WCN7850

   - eth: bnxt_en: shutdown FW DMA in bnxt_shutdown(), fix memory
     corruptions after kexec

  Previous releases - always broken:

   - virtio-net: fix received packet length check for big packets

   - sctp: fix races in socket diag handling

   - wifi: add an hrtimer-based delayed work item to avoid low
     granularity of timers set relatively far in the future, and use it
     where it matters (e.g. when performing AP-scheduled channel switch)

   - eth: mlx5e:
       - correctly propagate error in case of module EEPROM read failure
       - fix HW-GRO on systems with PAGE_SIZE == 64kB

   - dsa: b53: fixes for tagging, link configuration / RMII, FDB,
     multicast

   - phy: lan8842: implement latest errata"

* tag 'net-6.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (63 commits)
  selftests/vsock: avoid false-positives when checking dmesg
  net: bridge: fix MST static key usage
  net: bridge: fix use-after-free due to MST port state bypass
  lan966x: Fix sleeping in atomic context
  bonding: fix NULL pointer dereference in actor_port_prio setting
  net: dsa: microchip: Fix reserved multicast address table programming
  net: wan: framer: pef2256: Switch to devm_mfd_add_devices()
  net: libwx: fix device bus LAN ID
  net/mlx5e: SHAMPO, Fix header formulas for higher MTUs and 64K pages
  net/mlx5e: SHAMPO, Fix skb size check for 64K pages
  net/mlx5e: SHAMPO, Fix header mapping for 64K pages
  net: ti: icssg-prueth: Fix fdb hash size configuration
  net/mlx5e: Fix return value in case of module EEPROM read error
  net: gro_cells: Reduce lock scope in gro_cell_poll
  libie: depend on DEBUG_FS when building LIBIE_FWLOG
  wifi: mac80211_hwsim: Limit destroy_on_close radio removal to netgroup
  netpoll: Fix deadlock in memory allocation under spinlock
  net: ethernet: ti: netcp: Standardize knav_dma_open_channel to return NULL on error
  virtio-net: fix received length check in big packets
  bnxt_en: Fix warning in bnxt_dl_reload_down()
  ...
This commit is contained in:
Linus Torvalds 2025-11-06 08:52:30 -08:00
commit c2c2ccfd4b
72 changed files with 877 additions and 334 deletions

View File

@ -4818,6 +4818,7 @@ F: drivers/net/dsa/b53/*
F: drivers/net/dsa/bcm_sf2*
F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
F: net/dsa/tag_brcm.c
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Florian Fainelli <florian.fainelli@broadcom.com>

View File

@ -625,8 +625,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
if (!len)
if (!len) {
kvfree(ptr);
return -EPERM;
}
*_buf = ptr;
return len;

View File

@ -1904,13 +1904,13 @@ setup_instance(struct hfcsusb *hw, struct device *parent)
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int err;
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hw->ctrl_urb) {
pr_warn("%s: No memory for control urb\n",
driver_info->vend_name);
kfree(hw);
return -ENOMEM;
err = -ENOMEM;
goto err_free_hw;
}
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
if (setup_instance(hw, dev->dev.parent)) {
err = -EIO;
goto err_free_urb;
}
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
err_free_urb:
usb_free_urb(hw->ctrl_urb);
err_free_hw:
kfree(hw);
return err;
}
/* function called when an active device is removed */

View File

@ -225,13 +225,6 @@ static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_actor_port_prio_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN},
{ "maxval", 65535, BOND_VALFLAG_MAX},
{ "default", 255, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0},
};
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
@ -497,7 +490,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_ACTOR_PORT_PRIO,
.name = "actor_port_prio",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
.values = bond_actor_port_prio_tbl,
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_actor_port_prio_set,
},
[BOND_OPT_AD_ACTOR_SYSTEM] = {

View File

@ -371,11 +371,11 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
* frames should be flooded or not.
*/
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
} else {
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
mgmt |= B53_IP_MCAST_25;
mgmt |= B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
}
@ -1372,6 +1372,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
reg &= ~(0x3 << GMII_PO_SPEED_S);
if (is5301x(dev) || is58xx(dev))
reg &= ~PORT_OVERRIDE_SPEED_2000M;
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@ -1390,6 +1394,11 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
if (is5325(dev))
reg &= ~PORT_OVERRIDE_LP_FLOW_25;
else
reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
if (rx_pause) {
if (is5325(dev))
reg |= PORT_OVERRIDE_LP_FLOW_25;
@ -1593,8 +1602,11 @@ static void b53_phylink_mac_link_down(struct phylink_config *config,
struct b53_device *dev = dp->ds->priv;
int port = dp->index;
if (mode == MLO_AN_PHY)
if (mode == MLO_AN_PHY) {
if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
b53_force_link(dev, port, false);
return;
}
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@ -1622,6 +1634,13 @@ static void b53_phylink_mac_link_up(struct phylink_config *config,
if (mode == MLO_AN_PHY) {
/* Re-negotiate EEE if it was enabled already */
p->eee_enabled = b53_eee_init(ds, port, phydev);
if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
}
return;
}
@ -2018,7 +2037,7 @@ static int b53_arl_search_wait(struct b53_device *dev)
do {
b53_read8(dev, B53_ARLIO_PAGE, offset, &reg);
if (!(reg & ARL_SRCH_STDN))
return 0;
return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@ -2068,13 +2087,16 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
unsigned int count = 0;
u8 offset;
int ret;
u8 reg;
if (priv->num_arl_bins > 2)
results_per_hit = 2;
mutex_lock(&priv->arl_mutex);
if (is5325(priv) || is5365(priv))
@ -2096,7 +2118,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
if (ret)
break;
if (priv->num_arl_bins > 2) {
if (results_per_hit == 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
@ -2106,7 +2128,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
} while (count++ < b53_max_arl_entries(priv) / 2);
} while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);

View File

@ -111,8 +111,7 @@
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
#define B53_IP_MCAST_25 BIT(0)
#define B53_IPMC_FWD_EN BIT(1)
#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)

View File

@ -1355,9 +1355,15 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
#define RESV_MCAST_CNT 8
static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
u8 i, ports, update;
const u32 *masks;
bool override;
u32 data;
int ret;
@ -1366,23 +1372,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
/* Set the Override bit for forwarding BPDU packet to CPU */
ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
ALU_V_OVERRIDE | BIT(dev->cpu_port));
if (ret < 0)
return ret;
data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
/* The reserved multicast address table has 8 entries. Each entry has
* a default value of which port to forward. It is assumed the host
* port is the last port in most of the switches, but that is not the
* case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
* port is port 5, the first RGMII port. It is okay for LAN9370, a
* 5-port switch, but may not be correct for the other 8-port
* versions. It is necessary to update the whole table to forward to
* the right ports.
* Furthermore PTP messages can use a reserved multicast address and
* the host will not receive them if this table is not correct.
*/
for (i = 0; i < RESV_MCAST_CNT; i++) {
data = reserved_mcast_map[i] <<
dev->info->shifts[ALU_STAT_INDEX];
data |= ALU_STAT_START |
masks[ALU_STAT_DIRECT] |
masks[ALU_RESV_MCAST_ADDR] |
masks[ALU_STAT_READ];
ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
if (ret < 0)
return ret;
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready(dev);
if (ret < 0) {
dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
if (ret < 0)
return ret;
ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
if (ret < 0)
return ret;
override = false;
ports = data & dev->port_mask;
switch (i) {
case 0:
case 6:
/* Change the host port. */
update = BIT(dev->cpu_port);
override = true;
break;
case 2:
/* Change the host port. */
update = BIT(dev->cpu_port);
break;
case 4:
case 5:
case 7:
/* Skip the host port. */
update = dev->port_mask & ~BIT(dev->cpu_port);
break;
default:
update = ports;
break;
}
if (update != ports || override) {
data &= ~dev->port_mask;
data |= update;
/* Set Override bit to receive frame even when port is
* closed.
*/
if (override)
data |= ALU_V_OVERRIDE;
ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
if (ret < 0)
return ret;
data = reserved_mcast_map[i] <<
dev->info->shifts[ALU_STAT_INDEX];
data |= ALU_STAT_START |
masks[ALU_STAT_DIRECT] |
masks[ALU_RESV_MCAST_ADDR] |
masks[ALU_STAT_WRITE];
ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
if (ret < 0)
return ret;
/* wait to be finished */
ret = ksz9477_wait_alu_sta_ready(dev);
if (ret < 0)
return ret;
}
}
return 0;

View File

@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2024 Microchip Technology Inc.
* Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@ -397,7 +397,6 @@
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
#define ALU_RESV_MCAST_ADDR BIT(1)
#define REG_SW_ALU_VAL_A 0x0420

View File

@ -808,6 +808,8 @@ static const u16 ksz9477_regs[] = {
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
[ALU_STAT_DIRECT] = 0,
[ALU_RESV_MCAST_ADDR] = BIT(1),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@ -835,6 +837,8 @@ static const u8 ksz9477_xmii_ctrl1[] = {
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
[ALU_STAT_DIRECT] = BIT(3),
[ALU_RESV_MCAST_ADDR] = BIT(2),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};

View File

@ -294,6 +294,8 @@ enum ksz_masks {
DYNAMIC_MAC_TABLE_TIMESTAMP,
ALU_STAT_WRITE,
ALU_STAT_READ,
ALU_STAT_DIRECT,
ALU_RESV_MCAST_ADDR,
P_MII_TX_FLOW_CTRL,
P_MII_RX_FLOW_CTRL,
};

View File

@ -12439,7 +12439,7 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@ -16892,6 +16892,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
netif_close(dev);
if (bnxt_hwrm_func_drv_unrgtr(bp)) {
pcie_flr(pdev);
goto shutdown_exit;
}
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);

View File

@ -2149,7 +2149,7 @@ struct bnxt_bs_trace_info {
static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
u32 offset)
{
if (!bs_trace->wrapped &&
if (!bs_trace->wrapped && bs_trace->magic_byte &&
*bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
bs_trace->wrapped = 1;
bs_trace->last_offset = offset;
@ -2941,6 +2941,7 @@ void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);

View File

@ -333,13 +333,14 @@ static void bnxt_fill_drv_seg_record(struct bnxt *bp,
u32 offset = 0;
int rc = 0;
record->max_entries = cpu_to_le32(ctxm->max_entries);
record->entry_size = cpu_to_le32(ctxm->entry_size);
rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
if (rc)
return;
bnxt_bs_trace_check_wrap(bs_trace, offset);
record->max_entries = cpu_to_le32(ctxm->max_entries);
record->entry_size = cpu_to_le32(ctxm->entry_size);
record->offset = cpu_to_le32(bs_trace->last_offset);
record->wrapped = bs_trace->wrapped;
}

View File

@ -461,7 +461,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
rtnl_unlock();
break;
}
bnxt_cancel_reservations(bp, false);
bnxt_clear_reservations(bp, false);
bnxt_free_ctx_mem(bp, false);
break;
}

View File

@ -1051,9 +1051,9 @@ static void bnxt_ptp_free(struct bnxt *bp)
if (ptp->ptp_clock) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
}
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
}
}
int bnxt_ptp_init(struct bnxt *bp)

View File

@ -26,6 +26,19 @@ int gve_clock_nic_ts_read(struct gve_priv *priv)
return 0;
}
static int gve_ptp_gettimex64(struct ptp_clock_info *info,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
return -EOPNOTSUPP;
}
static int gve_ptp_settime64(struct ptp_clock_info *info,
const struct timespec64 *ts)
{
return -EOPNOTSUPP;
}
static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
{
const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
@ -47,6 +60,8 @@ static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
static const struct ptp_clock_info gve_ptp_caps = {
.owner = THIS_MODULE,
.name = "gve clock",
.gettimex64 = gve_ptp_gettimex64,
.settime64 = gve_ptp_settime64,
.do_aux_work = gve_ptp_do_aux_work,
};

View File

@ -146,7 +146,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select LIBIE_FWLOG
select LIBIE_FWLOG if DEBUG_FS
select MDIO
select NET_DEVLINK
select PLDMFW
@ -298,7 +298,7 @@ config ICE
select DIMLIB
select LIBIE
select LIBIE_ADMINQ
select LIBIE_FWLOG
select LIBIE_FWLOG if DEBUG_FS
select NET_DEVLINK
select PACKING
select PLDMFW

View File

@ -821,9 +821,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
/* Bitmask indicating in use pools */

View File

@ -1516,10 +1516,8 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->xdp_cnt = numptrs;
pool->xdp = devm_kcalloc(pfvf->dev,
numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
if (IS_ERR(pool->xdp)) {
netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
return PTR_ERR(pool->xdp);
}
if (!pool->xdp)
return -ENOMEM;
}
return 0;

View File

@ -634,7 +634,10 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
u32 hd_per_page;
u16 hd_per_wqe;
u8 log_hd_per_page;
u8 log_hd_entry_size;
unsigned long *bitmap;
u16 pi;
u16 ci;

View File

@ -2125,14 +2125,12 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (!size_read)
return i;
if (size_read == -EINVAL)
return -EINVAL;
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
"Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
return i;
return size_read;
}
i += size_read;

View File

@ -791,8 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
u8 log_hd_per_page, log_hd_entry_size;
u16 hd_per_wq, hd_per_wqe;
u32 hd_pool_size;
u16 hd_per_wq;
int wq_size;
int err;
@ -815,11 +816,24 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
if (err)
goto err_umr_mkey;
rq->mpwqe.shampo->hd_per_wqe =
mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
} else {
log_hd_per_page = order_base_2(hd_per_wqe);
log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
}
rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */

View File

@ -648,17 +648,20 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
int header_index)
{
BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
return &shampo->pages[header_index >> shampo->log_hd_per_page];
}
static u64 mlx5e_shampo_hd_offset(int header_index)
static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
{
return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u32 hd_per_page = shampo->hd_per_page;
return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
}
static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
@ -671,7 +674,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct mlx5e_umr_wqe *umr_wqe;
int headroom, i = 0;
int headroom, i;
headroom = rq->buff.headroom;
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
@ -679,26 +682,25 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
while (i < ksm_entries) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
for (i = 0; i < ksm_entries; i++, index++) {
struct mlx5e_frag_page *frag_page;
u64 addr;
err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
if (unlikely(err))
frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
header_offset = mlx5e_shampo_hd_offset(rq, index);
if (!header_offset) {
err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
frag_page);
if (err)
goto err_unmap;
}
addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
header_offset = mlx5e_shampo_hd_offset(index++);
umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(lkey),
.va = cpu_to_be64(addr + header_offset + headroom),
};
}
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
@ -713,9 +715,9 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
return 0;
err_unmap:
while (--i) {
while (--i >= 0) {
--index;
header_offset = mlx5e_shampo_hd_offset(index);
header_offset = mlx5e_shampo_hd_offset(rq, index);
if (!header_offset) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
@ -735,12 +737,11 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_ksm_entries, len;
max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev),
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
if (!ksm_entries)
return 0;
@ -858,7 +859,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
@ -1225,9 +1226,10 @@ static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
void *addr = netmem_address(frag_page->netmem);
return netmem_address(frag_page->netmem) + head_offset;
return addr + head_offset + rq->buff.headroom;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@ -2267,7 +2269,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
u16 head_offset = mlx5e_shampo_hd_offset(header_index);
u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
@ -2283,7 +2286,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
@ -2356,6 +2359,9 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
else
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}

View File

@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
}
}
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
@ -416,7 +416,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
@ -442,7 +442,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
@ -500,7 +500,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
@ -603,7 +603,7 @@ void lan966x_stats_get(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
@ -685,7 +685,7 @@ void lan966x_stats_get(struct net_device *dev,
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
@ -701,7 +701,7 @@ int lan966x_stats_init(struct lan966x *lan966x)
return -ENOMEM;
/* Init stats worker */
mutex_init(&lan966x->stats_lock);
spin_lock_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);

View File

@ -1261,7 +1261,6 @@ static int lan966x_probe(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
debugfs_remove_recursive(lan966x->debugfs_root);
@ -1279,7 +1278,6 @@ static void lan966x_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);

View File

@ -295,8 +295,8 @@ struct lan966x {
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
/* workqueue for reading stats */
struct mutex stats_lock;
/* lock for reading stats */
spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;

View File

@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
u32 counter;
id = id & 0xff; /* counter limit */
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
if (counter)
admin->cache.counter = counter;
}
@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
{
id = id & 0xff; /* counter limit */
mutex_lock(&lan966x->stats_lock);
spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
lan_wr(admin->cache.counter, lan966x,
SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
mutex_unlock(&lan966x->stats_lock);
spin_unlock(&lan966x->stats_lock);
}
static void lan966x_vcap_cache_write(struct net_device *dev,

View File

@ -29,6 +29,10 @@ static void ionic_tx_clean(struct ionic_queue *q,
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
/* Ensure TX descriptor writes reach memory before NIC reads them.
* Prevents device from fetching stale descriptors.
*/
dma_wmb();
ionic_q_post(q, ring_dbell);
}
@ -1444,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap;
int err;
desc_info = &q->tx_info[q->head_idx];
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
len = skb->len;
mss = skb_shinfo(skb)->gso_size;
outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
@ -1470,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
if (unlikely(err)) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
if (unlikely(err))
return err;
}
desc_info = &q->tx_info[q->head_idx];
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
len = skb->len;
mss = skb_shinfo(skb)->gso_size;
outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM));
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else

View File

@ -1441,6 +1441,9 @@ static int emac_set_pauseparam(struct net_device *dev,
struct emac_priv *priv = netdev_priv(dev);
u8 fc = 0;
if (!netif_running(dev))
return -ENETDOWN;
priv->flow_control_autoneg = pause->autoneg;
if (pause->autoneg) {

View File

@ -66,6 +66,9 @@
#define FDB_GEN_CFG1 0x60
#define SMEM_VLAN_OFFSET 8
#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
#define FDB_HASH_SIZE_MASK GENMASK(6, 3)
#define FDB_HASH_SIZE_SHIFT 3
#define FDB_HASH_SIZE 3
#define FDB_GEN_CFG2 0x64
#define FDB_VLAN_EN BIT(6)
@ -463,6 +466,8 @@ void icssg_init_emac_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
@ -484,6 +489,8 @@ void icssg_init_fw_offload_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +

View File

@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_channel = knav_dma_open_channel(dev,
tx_pipe->dma_chan_name, &config);
if (IS_ERR(tx_pipe->dma_channel)) {
if (!tx_pipe->dma_channel) {
dev_err(dev, "failed opening tx chan(%s)\n",
tx_pipe->dma_chan_name);
ret = PTR_ERR(tx_pipe->dma_channel);
ret = -EINVAL;
goto err;
}
@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
return 0;
err:
if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
if (tx_pipe->dma_channel)
knav_dma_close_channel(tx_pipe->dma_channel);
tx_pipe->dma_channel = NULL;
return ret;
@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
netcp->dma_chan_name, &config);
if (IS_ERR(netcp->rx_channel)) {
if (!netcp->rx_channel) {
dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
netcp->dma_chan_name);
ret = PTR_ERR(netcp->rx_channel);
ret = -EINVAL;
goto fail;
}

View File

@ -2427,7 +2427,8 @@ int wx_sw_init(struct wx *wx)
wx->oem_svid = pdev->subsystem_vendor;
wx->oem_ssid = pdev->subsystem_device;
wx->bus.device = PCI_SLOT(pdev->devfn);
wx->bus.func = PCI_FUNC(pdev->devfn);
wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
rd32(wx, WX_CFG_PORT_ST));
if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
pdev->is_virtfn) {

View File

@ -97,6 +97,8 @@
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
#define WX_CFG_PORT_ST 0x14404
#define WX_CFG_PORT_ST_LANID GENMASK(9, 8)
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
@ -557,8 +559,6 @@ enum WX_MSCA_CMD_value {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define WX_CFG_PORT_ST 0x14404
/******************* Receive Descriptor bit definitions **********************/
#define WX_RXD_STAT_DD BIT(0) /* Done */
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */

View File

@ -219,6 +219,8 @@ static int airoha_mdio_probe(struct platform_device *pdev)
priv = bus->priv;
priv->base_addr = addr;
priv->regmap = device_node_to_regmap(dev->parent->of_node);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk))

View File

@ -936,6 +936,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
ret = strscpy(udm->value, buf, sizeof(udm->value));
@ -949,6 +950,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@ -974,6 +976,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
if (ret)
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
if (msgid_enabled == curr)
@ -994,6 +997,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@ -1008,6 +1012,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
if (ret)
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
if (release_enabled == curr)
@ -1028,6 +1033,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@ -1042,6 +1048,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
if (ret)
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
if (taskname_enabled == curr)
@ -1062,6 +1069,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@ -1077,6 +1085,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
if (ret)
return ret;
mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
if (cpu_nr_enabled == curr)
@ -1105,6 +1114,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}

View File

@ -466,6 +466,12 @@ struct lan8842_priv {
u16 rev;
};
struct lanphy_reg_data {
int page;
u16 addr;
u16 val;
};
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
.cable_diag_reg = LAN8814_CABLE_DIAG,
@ -2835,6 +2841,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
*/
#define LAN8814_PAGE_PCS_DIGITAL 2
/**
* LAN8814_PAGE_EEE - Selects Extended Page 3.
*
* This page contains EEE registers
*/
#define LAN8814_PAGE_EEE 3
/**
* LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
*
@ -2853,6 +2866,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
*/
#define LAN8814_PAGE_PORT_REGS 5
/**
* LAN8814_PAGE_POWER_REGS - Selects Extended Page 28.
*
* This page contains analog control registers and power mode registers.
*/
#define LAN8814_PAGE_POWER_REGS 28
/**
* LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
*
@ -5884,6 +5904,144 @@ static int lan8842_probe(struct phy_device *phydev)
return 0;
}
#define LAN8814_POWER_MGMT_MODE_3_ANEG_MDI 0x13
#define LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX 0x14
#define LAN8814_POWER_MGMT_MODE_5_10BT_MDI 0x15
#define LAN8814_POWER_MGMT_MODE_6_10BT_MDIX 0x16
#define LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN 0x17
#define LAN8814_POWER_MGMT_MODE_8_100BT_MDI 0x18
#define LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX 0x19
#define LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX 0x1a
#define LAN8814_POWER_MGMT_MODE_11_100BT_MDIX 0x1b
#define LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX 0x1c
#define LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX 0x1d
#define LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX 0x1e
#define LAN8814_POWER_MGMT_DLLPD_D BIT(0)
#define LAN8814_POWER_MGMT_ADCPD_D BIT(1)
#define LAN8814_POWER_MGMT_PGAPD_D BIT(2)
#define LAN8814_POWER_MGMT_TXPD_D BIT(3)
#define LAN8814_POWER_MGMT_DLLPD_C BIT(4)
#define LAN8814_POWER_MGMT_ADCPD_C BIT(5)
#define LAN8814_POWER_MGMT_PGAPD_C BIT(6)
#define LAN8814_POWER_MGMT_TXPD_C BIT(7)
#define LAN8814_POWER_MGMT_DLLPD_B BIT(8)
#define LAN8814_POWER_MGMT_ADCPD_B BIT(9)
#define LAN8814_POWER_MGMT_PGAPD_B BIT(10)
#define LAN8814_POWER_MGMT_TXPD_B BIT(11)
#define LAN8814_POWER_MGMT_DLLPD_A BIT(12)
#define LAN8814_POWER_MGMT_ADCPD_A BIT(13)
#define LAN8814_POWER_MGMT_PGAPD_A BIT(14)
#define LAN8814_POWER_MGMT_TXPD_A BIT(15)
#define LAN8814_POWER_MGMT_C_D (LAN8814_POWER_MGMT_DLLPD_D | \
LAN8814_POWER_MGMT_ADCPD_D | \
LAN8814_POWER_MGMT_PGAPD_D | \
LAN8814_POWER_MGMT_DLLPD_C | \
LAN8814_POWER_MGMT_ADCPD_C | \
LAN8814_POWER_MGMT_PGAPD_C)
#define LAN8814_POWER_MGMT_B_C_D (LAN8814_POWER_MGMT_C_D | \
LAN8814_POWER_MGMT_DLLPD_B | \
LAN8814_POWER_MGMT_ADCPD_B | \
LAN8814_POWER_MGMT_PGAPD_B)
#define LAN8814_POWER_MGMT_VAL1 (LAN8814_POWER_MGMT_C_D | \
LAN8814_POWER_MGMT_ADCPD_B | \
LAN8814_POWER_MGMT_PGAPD_B | \
LAN8814_POWER_MGMT_ADCPD_A | \
LAN8814_POWER_MGMT_PGAPD_A)
#define LAN8814_POWER_MGMT_VAL2 LAN8814_POWER_MGMT_C_D
#define LAN8814_POWER_MGMT_VAL3 (LAN8814_POWER_MGMT_C_D | \
LAN8814_POWER_MGMT_DLLPD_B | \
LAN8814_POWER_MGMT_ADCPD_B | \
LAN8814_POWER_MGMT_PGAPD_A)
#define LAN8814_POWER_MGMT_VAL4 (LAN8814_POWER_MGMT_B_C_D | \
LAN8814_POWER_MGMT_ADCPD_A | \
LAN8814_POWER_MGMT_PGAPD_A)
#define LAN8814_POWER_MGMT_VAL5 LAN8814_POWER_MGMT_B_C_D
#define LAN8814_EEE_WAKE_TX_TIMER 0x0e
#define LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL 0x1f
static const struct lanphy_reg_data short_center_tap_errata[] = {
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_3_ANEG_MDI,
LAN8814_POWER_MGMT_VAL1 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX,
LAN8814_POWER_MGMT_VAL1 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_5_10BT_MDI,
LAN8814_POWER_MGMT_VAL1 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_6_10BT_MDIX,
LAN8814_POWER_MGMT_VAL1 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN,
LAN8814_POWER_MGMT_VAL2 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_8_100BT_MDI,
LAN8814_POWER_MGMT_VAL3 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX,
LAN8814_POWER_MGMT_VAL3 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX,
LAN8814_POWER_MGMT_VAL4 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_11_100BT_MDIX,
LAN8814_POWER_MGMT_VAL5 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX,
LAN8814_POWER_MGMT_VAL5 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX,
LAN8814_POWER_MGMT_VAL4 },
{ LAN8814_PAGE_POWER_REGS,
LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX,
LAN8814_POWER_MGMT_VAL4 },
};
static const struct lanphy_reg_data waketx_timer_errata[] = {
{ LAN8814_PAGE_EEE,
LAN8814_EEE_WAKE_TX_TIMER,
LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL },
};
static int lanphy_write_reg_data(struct phy_device *phydev,
const struct lanphy_reg_data *data,
size_t num)
{
int ret = 0;
while (num--) {
ret = lanphy_write_page_reg(phydev, data->page, data->addr,
data->val);
if (ret)
break;
}
return ret;
}
static int lan8842_erratas(struct phy_device *phydev)
{
int ret;
ret = lanphy_write_reg_data(phydev, short_center_tap_errata,
ARRAY_SIZE(short_center_tap_errata));
if (ret)
return ret;
return lanphy_write_reg_data(phydev, waketx_timer_errata,
ARRAY_SIZE(waketx_timer_errata));
}
static int lan8842_config_init(struct phy_device *phydev)
{
int ret;
@ -5896,6 +6054,11 @@ static int lan8842_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
/* Apply the erratas for this device */
ret = lan8842_erratas(phydev);
if (ret < 0)
return ret;
/* Even if the GPIOs are set to control the LEDs the behaviour of the
* LEDs is wrong, they are not blinking when there is traffic.
* To fix this it is required to set extended LED mode

View File

@ -192,6 +192,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (!skbn)
return 0;
/* Raw IP packets don't have a MAC header, but other subsystems
* (like xfrm) may still access MAC header offsets, so they must
* be initialized.
*/
skb_reset_mac_header(skbn);
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);

View File

@ -910,17 +910,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
goto ok;
}
/*
* Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously
* tries to receive more than is possible. This is usually
* the case of a broken device.
*/
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
dev_kfree_skb(skb);
return NULL;
}
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
@ -2112,9 +2101,19 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb =
page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
struct sk_buff *skb;
/* Make sure that len does not exceed the size allocated in
* add_recvbuf_big.
*/
if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
dev->name, len,
(vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
goto err;
}
skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
@ -2539,6 +2538,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
return NULL;
}
static inline u32
virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
{
return __le16_to_cpu(hdr_hash->hash_value_lo) |
(__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
}
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
struct sk_buff *skb)
{
@ -2565,7 +2571,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type);
}
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
@ -3311,6 +3317,10 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
/* Make sure it's safe to cast between formats */
BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr));
BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr));
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
@ -6750,7 +6760,7 @@ static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
hash_report = VIRTIO_NET_HASH_REPORT_NONE;
*rss_type = virtnet_xdp_rss_type[hash_report];
*hash = __le32_to_cpu(hdr_hash->hash_value);
*hash = virtio_net_hash_value(hdr_hash);
return 0;
}

View File

@ -648,7 +648,8 @@ static int pef2256_add_audio_devices(struct pef2256 *pef2256)
audio_devs[i].id = i;
}
ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL);
ret = devm_mfd_add_devices(pef2256->dev, 0, audio_devs, count,
NULL, 0, NULL);
kfree(audio_devs);
return ret;
}
@ -822,7 +823,7 @@ static int pef2256_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pef2256);
ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs,
ret = devm_mfd_add_devices(pef2256->dev, 0, pef2256_devs,
ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
if (ret) {
dev_err(pef2256->dev, "add devices failed (%d)\n", ret);

View File

@ -1764,32 +1764,33 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left) {
/* Sometimes the PCI HIF doesn't receive interrupt
* for the service ready message even if the buffer
* was completed. PCIe sniffer shows that it's
* because the corresponding CE ring doesn't fires
* it. Workaround here by polling CE rings. Since
* the message could arrive at any time, continue
* polling until timeout.
* it. Workaround here by polling CE rings once.
*/
do {
ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
/* The 100 ms granularity is a tradeoff considering scheduler
* overhead and response latency
*/
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
msecs_to_jiffies(100));
if (time_left)
return 0;
} while (time_before(jiffies, timeout));
ath10k_warn(ar, "failed to receive service ready completion\n");
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left) {
ath10k_warn(ar, "polling timed out\n");
return -ETIMEDOUT;
}
ath10k_warn(ar, "service ready completion received, continuing normally\n");
}
return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)

View File

@ -4064,68 +4064,12 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (vif->type != NL80211_IFTYPE_STATION)
return;
enable_ps = arvif->ahvif->ps;
if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
info = ath12k_mac_get_link_bss_conf(arvif);
if (!info) {
ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
vif->addr, arvif->link_id);
return;
}
/* firmware doesn't like 0 */
timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
timeout);
if (ret) {
ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return;
}
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
if (ret)
ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
}
static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@ -4189,24 +4133,61 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
}
if (changed & BSS_CHANGED_PS) {
links = ahvif->links_map;
vif_cfg = &vif->cfg;
static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
bool enable_ps;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
if (!arvif || !arvif->ar)
continue;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ar = arvif->ar;
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (ar->ab->hw_params->supports_sta_ps) {
ahvif->ps = vif_cfg->ps;
ath12k_mac_vif_setup_ps(arvif);
enable_ps = arvif->ahvif->ps;
if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
info = ath12k_mac_get_link_bss_conf(arvif);
if (!info) {
ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
vif->addr, arvif->link_id);
return;
}
/* firmware doesn't like 0 */
timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
timeout);
if (ret) {
ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return;
}
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
if (ret)
ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
@ -4228,6 +4209,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@ -4514,6 +4496,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_PS &&
ar->ab->hw_params->supports_sta_ps) {
ahvif->ps = vif_cfg->ps;
ath12k_mac_vif_setup_ps(arvif);
}
}
static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,

View File

@ -6698,14 +6698,15 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
static void remove_user_radios(u32 portid)
static void remove_user_radios(u32 portid, int netgroup)
{
struct mac80211_hwsim_data *entry, *tmp;
LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
if (entry->destroy_on_close && entry->portid == portid &&
entry->netgroup == netgroup) {
list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
@ -6730,7 +6731,7 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
remove_user_radios(notify->portid);
remove_user_radios(notify->portid, hwsim_net_get_netgroup(notify->net));
if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"

View File

@ -791,6 +791,7 @@ static int __zd_usb_enable_rx(struct zd_usb *usb)
if (urbs) {
for (i = 0; i < RX_URBS_COUNT; i++)
free_rx_urb(urbs[i]);
kfree(urbs);
}
return r;
}

View File

@ -561,10 +561,14 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
return ptp_mask_en_single(pccontext->private_clkdata, argptr);
case PTP_SYS_OFFSET_PRECISE_CYCLES:
if (!ptp->has_cycles)
return -EOPNOTSUPP;
return ptp_sys_offset_precise(ptp, argptr,
ptp->info->getcrosscycles);
case PTP_SYS_OFFSET_EXTENDED_CYCLES:
if (!ptp->has_cycles)
return -EOPNOTSUPP;
return ptp_sys_offset_extended(ptp, argptr,
ptp->info->getcyclesx64);
default:

View File

@ -402,7 +402,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
* @name: slave channel name
* @config: dma configuration parameters
*
* Returns pointer to appropriate DMA channel on success or error.
* Return: Pointer to appropriate DMA channel on success or NULL on error.
*/
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
@ -414,13 +414,13 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!kdev) {
pr_err("keystone-navigator-dma driver not registered\n");
return (void *)-EINVAL;
return NULL;
}
chan_num = of_channel_match_helper(dev->of_node, name, &instance);
if (chan_num < 0) {
dev_err(kdev->dev, "No DMA instance with name %s\n", name);
return (void *)-EINVAL;
return NULL;
}
dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
@ -431,7 +431,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (config->direction != DMA_MEM_TO_DEV &&
config->direction != DMA_DEV_TO_MEM) {
dev_err(kdev->dev, "bad direction\n");
return (void *)-EINVAL;
return NULL;
}
/* Look for correct dma instance */
@ -443,7 +443,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL;
return NULL;
}
/* Look for correct dma channel from dma instance */
@ -463,14 +463,14 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
return (void *)-EINVAL;
return NULL;
}
if (atomic_read(&chan->ref_count) >= 1) {
if (!check_config(chan, config)) {
dev_err(kdev->dev, "channel %d config miss-match\n",
chan_num);
return (void *)-EINVAL;
return NULL;
}
}

View File

@ -78,8 +78,20 @@ struct libie_fwlog {
);
};
#if IS_ENABLED(CONFIG_LIBIE_FWLOG)
int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api);
void libie_fwlog_deinit(struct libie_fwlog *fwlog);
void libie_fwlog_reregister(struct libie_fwlog *fwlog);
void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len);
#else
static inline int libie_fwlog_init(struct libie_fwlog *fwlog,
struct libie_fwlog_api *api)
{
return -EOPNOTSUPP;
}
static inline void libie_fwlog_deinit(struct libie_fwlog *fwlog) { }
static inline void libie_fwlog_reregister(struct libie_fwlog *fwlog) { }
static inline void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf,
u16 len) { }
#endif /* CONFIG_LIBIE_FWLOG */
#endif /* _LIBIE_FWLOG_H_ */

View File

@ -401,7 +401,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
if (!tnl_hdr_negotiated)
return -EINVAL;
vhdr->hash_hdr.hash_value = 0;
vhdr->hash_hdr.hash_value_lo = 0;
vhdr->hash_hdr.hash_value_hi = 0;
vhdr->hash_hdr.hash_report = 0;
vhdr->hash_hdr.padding = 0;

View File

@ -780,7 +780,7 @@ struct mgmt_adv_pattern {
__u8 ad_type;
__u8 offset;
__u8 length;
__u8 value[31];
__u8 value[HCI_MAX_AD_LENGTH];
} __packed;
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052

View File

@ -6435,6 +6435,11 @@ static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
* after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
* use just cancel_work() instead of cancel_work_sync(), it requires
* being in a section protected by wiphy_lock().
*
* Note that these are scheduled with a timer where the accuracy
* becomes less the longer in the future the scheduled timer is. Use
* wiphy_hrtimer_work_queue() if the timer must be not be late by more
* than approximately 10 percent.
*/
void wiphy_delayed_work_queue(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork,
@ -6506,6 +6511,79 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
bool wiphy_delayed_work_pending(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
struct wiphy_hrtimer_work {
struct wiphy_work work;
struct wiphy *wiphy;
struct hrtimer timer;
};
enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t);
static inline void wiphy_hrtimer_work_init(struct wiphy_hrtimer_work *hrwork,
wiphy_work_func_t func)
{
hrtimer_setup(&hrwork->timer, wiphy_hrtimer_work_timer,
CLOCK_BOOTTIME, HRTIMER_MODE_REL);
wiphy_work_init(&hrwork->work, func);
}
/**
* wiphy_hrtimer_work_queue - queue hrtimer work for the wiphy
* @wiphy: the wiphy to queue for
* @hrwork: the high resolution timer worker
* @delay: the delay given as a ktime_t
*
* Please refer to wiphy_delayed_work_queue(). The difference is that
* the hrtimer work uses a high resolution timer for scheduling. This
* may be needed if timeouts might be scheduled further in the future
* and the accuracy of the normal timer is not sufficient.
*
* Expect a delay of a few milliseconds as the timer is scheduled
* with some slack and some more time may pass between queueing the
* work and its start.
*/
void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork,
ktime_t delay);
/**
* wiphy_hrtimer_work_cancel - cancel previously queued hrtimer work
* @wiphy: the wiphy, for debug purposes
* @hrtimer: the hrtimer work to cancel
*
* Cancel the work *without* waiting for it, this assumes being
* called under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrtimer);
/**
* wiphy_hrtimer_work_flush - flush previously queued hrtimer work
* @wiphy: the wiphy, for debug purposes
* @hrwork: the hrtimer work to flush
*
* Flush the work (i.e. run it if pending). This must be called
* under the wiphy mutex acquired by wiphy_lock().
*/
void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork);
/**
* wiphy_hrtimer_work_pending - Find out whether a wiphy hrtimer
* work item is currently pending.
*
* @wiphy: the wiphy, for debug purposes
* @hrwork: the hrtimer work in question
*
* Return: true if timer is pending, false otherwise
*
* Please refer to the wiphy_delayed_work_pending() documentation as
* this is the equivalent function for hrtimer based delayed work
* items.
*/
bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork);
/**
* enum ieee80211_ap_reg_power - regulatory power for an Access Point
*

View File

@ -193,7 +193,8 @@ struct virtio_net_hdr_v1 {
struct virtio_net_hdr_v1_hash {
struct virtio_net_hdr_v1 hdr;
__le32 hash_value;
__le16 hash_value_lo;
__le16 hash_value_hi;
#define VIRTIO_NET_HASH_REPORT_NONE 0
#define VIRTIO_NET_HASH_REPORT_IPv4 1
#define VIRTIO_NET_HASH_REPORT_TCPv4 2

View File

@ -193,6 +193,8 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
grp->nr_vlan_devs++;
netdev_update_features(dev);
return 0;
out_unregister_netdev:

View File

@ -4218,6 +4218,13 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
}
if (i == ARRAY_SIZE(hci_cc_table)) {
if (!skb->len) {
bt_dev_err(hdev, "Unexpected cc 0x%4.4x with no status",
*opcode);
*status = HCI_ERROR_UNSPECIFIED;
return;
}
/* Unknown opcode, assume byte 0 contains the status, so
* that e.g. __hci_cmd_sync() properly returns errors
* for vendor specific commands send by HCI drivers.

View File

@ -5395,9 +5395,9 @@ static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
for (i = 0; i < pattern_count; i++) {
offset = patterns[i].offset;
length = patterns[i].length;
if (offset >= HCI_MAX_EXT_AD_LENGTH ||
length > HCI_MAX_EXT_AD_LENGTH ||
(offset + length) > HCI_MAX_EXT_AD_LENGTH)
if (offset >= HCI_MAX_AD_LENGTH ||
length > HCI_MAX_AD_LENGTH ||
(offset + length) > HCI_MAX_AD_LENGTH)
return MGMT_STATUS_INVALID_PARAMS;
p = kmalloc(sizeof(*p), GFP_KERNEL);

View File

@ -25,7 +25,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
vg = nbp_vlan_group_rcu(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
(br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) &&
(br_mst_is_enabled(p) || p->state == BR_STATE_FORWARDING) &&
br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) &&
!br_skb_isolated(p, skb);
}

View File

@ -386,6 +386,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
del_nbp(p);
}
br_mst_uninit(br);
br_recalculate_neigh_suppress_enabled(br);
br_fdb_delete_by_port(br, NULL, 0, 1);

View File

@ -94,7 +94,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
br = p->br;
if (br_mst_is_enabled(br)) {
if (br_mst_is_enabled(p)) {
state = BR_STATE_FORWARDING;
} else {
if (p->state == BR_STATE_DISABLED) {
@ -429,7 +429,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
forward:
if (br_mst_is_enabled(p->br))
if (br_mst_is_enabled(p))
goto defer_stp_filtering;
switch (p->state) {

View File

@ -22,6 +22,12 @@ bool br_mst_enabled(const struct net_device *dev)
}
EXPORT_SYMBOL_GPL(br_mst_enabled);
void br_mst_uninit(struct net_bridge *br)
{
if (br_opt_get(br, BROPT_MST_ENABLED))
static_branch_dec(&br_mst_used);
}
int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids)
{
const struct net_bridge_vlan_group *vg;
@ -225,9 +231,9 @@ int br_mst_set_enabled(struct net_bridge *br, bool on,
return err;
if (on)
static_branch_enable(&br_mst_used);
static_branch_inc(&br_mst_used);
else
static_branch_disable(&br_mst_used);
static_branch_dec(&br_mst_used);
br_opt_toggle(br, BROPT_MST_ENABLED, on);
return 0;

View File

@ -1935,10 +1935,12 @@ static inline bool br_vlan_state_allowed(u8 state, bool learn_allow)
/* br_mst.c */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
DECLARE_STATIC_KEY_FALSE(br_mst_used);
static inline bool br_mst_is_enabled(struct net_bridge *br)
static inline bool br_mst_is_enabled(const struct net_bridge_port *p)
{
/* check the port's vlan group to avoid racing with port deletion */
return static_branch_unlikely(&br_mst_used) &&
br_opt_get(br, BROPT_MST_ENABLED);
br_opt_get(p->br, BROPT_MST_ENABLED) &&
rcu_access_pointer(p->vlgrp);
}
int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
@ -1952,8 +1954,9 @@ int br_mst_fill_info(struct sk_buff *skb,
const struct net_bridge_vlan_group *vg);
int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr,
struct netlink_ext_ack *extack);
void br_mst_uninit(struct net_bridge *br);
#else
static inline bool br_mst_is_enabled(struct net_bridge *br)
static inline bool br_mst_is_enabled(const struct net_bridge_port *p)
{
return false;
}
@ -1987,6 +1990,10 @@ static inline int br_mst_process(struct net_bridge_port *p,
{
return -EOPNOTSUPP;
}
static inline void br_mst_uninit(struct net_bridge *br)
{
}
#endif
struct nf_br_ops {

View File

@ -60,9 +60,10 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb;
int work_done = 0;
__local_lock_nested_bh(&cell->bh_lock);
while (work_done < budget) {
__local_lock_nested_bh(&cell->bh_lock);
skb = __skb_dequeue(&cell->napi_skbs);
__local_unlock_nested_bh(&cell->bh_lock);
if (!skb)
break;
napi_gro_receive(napi, skb);
@ -71,7 +72,6 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
if (work_done < budget)
napi_complete_done(napi, work_done);
__local_unlock_nested_bh(&cell->bh_lock);
return work_done;
}

View File

@ -228,19 +228,16 @@ static void refill_skbs(struct netpoll *np)
{
struct sk_buff_head *skb_pool;
struct sk_buff *skb;
unsigned long flags;
skb_pool = &np->skb_pool;
spin_lock_irqsave(&skb_pool->lock, flags);
while (skb_pool->qlen < MAX_SKBS) {
while (READ_ONCE(skb_pool->qlen) < MAX_SKBS) {
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
if (!skb)
break;
__skb_queue_tail(skb_pool, skb);
skb_queue_tail(skb_pool, skb);
}
spin_unlock_irqrestore(&skb_pool->lock, flags);
}
static void zap_completion_queue(void)

View File

@ -224,12 +224,14 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
{
int len = BRCM_LEG_TAG_LEN;
int source_port;
__be16 *proto;
u8 *brcm_tag;
if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
return NULL;
brcm_tag = dsa_etype_header_pos_rx(skb);
proto = (__be16 *)(brcm_tag + BRCM_LEG_TAG_LEN);
source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
@ -237,8 +239,12 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
if (!skb->dev)
return NULL;
/* VLAN tag is added by BCM63xx internal switch */
if (netdev_uses_dsa(skb->dev))
/* The internal switch in BCM63XX SoCs always tags on egress on the CPU
* port. We use VID 0 internally for untagged traffic, so strip the tag
* if the TCI field is all 0, and keep it otherwise to also retain
* e.g. 802.1p tagged packets.
*/
if (proto[0] == htons(ETH_P_8021Q) && proto[1] == 0)
len += VLAN_HLEN;
/* Remove Broadcom tag and update checksum */

View File

@ -1290,7 +1290,7 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
&link->csa.finalize_work);
break;
case NL80211_IFTYPE_STATION:
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
&link->u.mgd.csa.switch_work, 0);
break;
case NL80211_IFTYPE_UNSPECIFIED:

View File

@ -612,11 +612,11 @@ struct ieee80211_if_managed {
u8 *assoc_req_ies;
size_t assoc_req_ies_len;
struct wiphy_delayed_work ml_reconf_work;
struct wiphy_hrtimer_work ml_reconf_work;
u16 removed_links;
/* TID-to-link mapping support */
struct wiphy_delayed_work ttlm_work;
struct wiphy_hrtimer_work ttlm_work;
struct ieee80211_adv_ttlm_info ttlm_info;
struct wiphy_work teardown_ttlm_work;
@ -1017,10 +1017,10 @@ struct ieee80211_link_data_managed {
bool operating_11g_mode;
struct {
struct wiphy_delayed_work switch_work;
struct wiphy_hrtimer_work switch_work;
struct cfg80211_chan_def ap_chandef;
struct ieee80211_parsed_tpe tpe;
unsigned long time;
ktime_t time;
bool waiting_bcn;
bool ignored_same_chan;
bool blocked_tx;

View File

@ -472,10 +472,10 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
* from there.
*/
if (link->conf->csa_active)
wiphy_delayed_work_queue(local->hw.wiphy,
wiphy_hrtimer_work_queue(local->hw.wiphy,
&link->u.mgd.csa.switch_work,
link->u.mgd.csa.time -
jiffies);
ktime_get_boottime());
}
for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {

View File

@ -45,7 +45,7 @@
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100)
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS (100 * USEC_PER_MSEC)
#define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00
#define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5)
@ -2594,7 +2594,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
return;
}
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
&link->u.mgd.csa.switch_work, 0);
}
@ -2753,7 +2753,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
.timestamp = timestamp,
.device_timestamp = device_timestamp,
};
unsigned long now;
u32 csa_time_tu;
ktime_t now;
int res;
lockdep_assert_wiphy(local->hw.wiphy);
@ -2983,10 +2984,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
csa_ie.mode);
/* we may have to handle timeout for deactivated link in software */
now = jiffies;
link->u.mgd.csa.time = now +
TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) *
link->conf->beacon_int);
now = ktime_get_boottime();
csa_time_tu = (max_t(int, csa_ie.count, 1) - 1) * link->conf->beacon_int;
link->u.mgd.csa.time = now + us_to_ktime(ieee80211_tu_to_usec(csa_time_tu));
if (ieee80211_vif_link_active(&sdata->vif, link->link_id) &&
local->ops->channel_switch) {
@ -3001,7 +3001,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
/* channel switch handled in software */
wiphy_delayed_work_queue(local->hw.wiphy,
wiphy_hrtimer_work_queue(local->hw.wiphy,
&link->u.mgd.csa.switch_work,
link->u.mgd.csa.time - now);
return;
@ -4242,14 +4242,14 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(&sdata->u.mgd.ttlm_info, 0,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->neg_ttlm_timeout_work);
sdata->u.mgd.removed_links = 0;
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy,
&sdata->u.mgd.ml_reconf_work);
wiphy_work_cancel(sdata->local->hw.wiphy,
@ -6876,7 +6876,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
/* In case the removal was cancelled, abort it */
if (sdata->u.mgd.removed_links) {
sdata->u.mgd.removed_links = 0;
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy,
&sdata->u.mgd.ml_reconf_work);
}
return;
@ -6906,9 +6906,9 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
}
sdata->u.mgd.removed_links = removed_links;
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.ml_reconf_work,
TU_TO_JIFFIES(delay));
us_to_ktime(ieee80211_tu_to_usec(delay)));
}
static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
@ -7095,7 +7095,7 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
/* if a planned TID-to-link mapping was cancelled -
* abort it
*/
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy,
&sdata->u.mgd.ttlm_work);
} else if (sdata->u.mgd.ttlm_info.active) {
/* if no TID-to-link element, set to default mapping in
@ -7130,7 +7130,7 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
if (ttlm_info.switch_time) {
u16 beacon_ts_tu, st_tu, delay;
u32 delay_jiffies;
u64 delay_usec;
u64 mask;
/* The t2l map switch time is indicated with a partial
@ -7152,23 +7152,23 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata,
if (delay > IEEE80211_ADV_TTLM_ST_UNDERFLOW)
return;
delay_jiffies = TU_TO_JIFFIES(delay);
delay_usec = ieee80211_tu_to_usec(delay);
/* Link switching can take time, so schedule it
* 100ms before to be ready on time
*/
if (delay_jiffies > IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS)
delay_jiffies -=
if (delay_usec > IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS)
delay_usec -=
IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS;
else
delay_jiffies = 0;
delay_usec = 0;
sdata->u.mgd.ttlm_info = ttlm_info;
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
wiphy_hrtimer_work_cancel(sdata->local->hw.wiphy,
&sdata->u.mgd.ttlm_work);
wiphy_delayed_work_queue(sdata->local->hw.wiphy,
wiphy_hrtimer_work_queue(sdata->local->hw.wiphy,
&sdata->u.mgd.ttlm_work,
delay_jiffies);
us_to_ktime(delay_usec));
return;
}
}
@ -8793,7 +8793,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_csa_connection_drop_work);
wiphy_delayed_work_init(&ifmgd->tdls_peer_del_work,
ieee80211_tdls_peer_del_work);
wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
wiphy_hrtimer_work_init(&ifmgd->ml_reconf_work,
ieee80211_ml_reconf_work);
wiphy_delayed_work_init(&ifmgd->reconf.wk,
ieee80211_ml_sta_reconf_timeout);
@ -8802,7 +8802,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
wiphy_delayed_work_init(&ifmgd->tx_tspec_wk,
ieee80211_sta_handle_tspec_ac_params_wk);
wiphy_delayed_work_init(&ifmgd->ttlm_work,
wiphy_hrtimer_work_init(&ifmgd->ttlm_work,
ieee80211_tid_to_link_map_work);
wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work,
ieee80211_neg_ttlm_timeout_work);
@ -8849,7 +8849,7 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
else
link->u.mgd.req_smps = IEEE80211_SMPS_OFF;
wiphy_delayed_work_init(&link->u.mgd.csa.switch_work,
wiphy_hrtimer_work_init(&link->u.mgd.csa.switch_work,
ieee80211_csa_switch_work);
ieee80211_clear_tpe(&link->conf->tpe);
@ -10064,7 +10064,7 @@ void ieee80211_mgd_stop_link(struct ieee80211_link_data *link)
&link->u.mgd.request_smps_work);
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.recalc_smps);
wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
wiphy_hrtimer_work_cancel(link->sdata->local->hw.wiphy,
&link->u.mgd.csa.switch_work);
}

View File

@ -73,19 +73,26 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
struct nlattr *attr;
void *info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(laddr, address_list, list)
addrcnt++;
rcu_read_unlock();
attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
if (!attr)
return -EMSGSIZE;
info = nla_data(attr);
rcu_read_lock();
list_for_each_entry_rcu(laddr, address_list, list) {
memcpy(info, &laddr->a, sizeof(laddr->a));
memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
info += addrlen;
if (!--addrcnt)
break;
}
rcu_read_unlock();
return 0;
}
@ -223,14 +230,15 @@ struct sctp_comm_param {
bool net_admin;
};
static size_t inet_assoc_attr_size(struct sctp_association *asoc)
static size_t inet_assoc_attr_size(struct sock *sk,
struct sctp_association *asoc)
{
int addrlen = sizeof(struct sockaddr_storage);
int addrcnt = 0;
struct sctp_sockaddr_entry *laddr;
list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
list)
list, lockdep_sock_is_held(sk))
addrcnt++;
return nla_total_size(sizeof(struct sctp_info))
@ -256,11 +264,14 @@ static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *t
if (err)
return err;
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
if (!rep)
return -ENOMEM;
lock_sock(sk);
rep = nlmsg_new(inet_assoc_attr_size(sk, assoc), GFP_KERNEL);
if (!rep) {
release_sock(sk);
return -ENOMEM;
}
if (ep != assoc->ep) {
err = -EAGAIN;
goto out;

View File

@ -37,7 +37,7 @@
/* 1st Level Abstractions. */
/* Initialize a new transport from provided memory. */
static struct sctp_transport *sctp_transport_init(struct net *net,
static void sctp_transport_init(struct net *net,
struct sctp_transport *peer,
const union sctp_addr *addr,
gfp_t gfp)
@ -83,8 +83,6 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
refcount_set(&peer->refcnt, 1);
return peer;
}
/* Allocate and initialize a new transport. */
@ -96,20 +94,13 @@ struct sctp_transport *sctp_transport_new(struct net *net,
transport = kzalloc(sizeof(*transport), gfp);
if (!transport)
goto fail;
return NULL;
if (!sctp_transport_init(net, transport, addr, gfp))
goto fail_init;
sctp_transport_init(net, transport, addr, gfp);
SCTP_DBG_OBJCNT_INC(transport);
return transport;
fail_init:
kfree(transport);
fail:
return NULL;
}
/* This transport is no longer needed. Free up if possible, or

View File

@ -1787,6 +1787,62 @@ bool wiphy_delayed_work_pending(struct wiphy *wiphy,
}
EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending);
enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t)
{
struct wiphy_hrtimer_work *hrwork =
container_of(t, struct wiphy_hrtimer_work, timer);
wiphy_work_queue(hrwork->wiphy, &hrwork->work);
return HRTIMER_NORESTART;
}
EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_timer);
void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork,
ktime_t delay)
{
trace_wiphy_hrtimer_work_queue(wiphy, &hrwork->work, delay);
if (!delay) {
hrtimer_cancel(&hrwork->timer);
wiphy_work_queue(wiphy, &hrwork->work);
return;
}
hrwork->wiphy = wiphy;
hrtimer_start_range_ns(&hrwork->timer, delay,
1000 * NSEC_PER_USEC, HRTIMER_MODE_REL);
}
EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_queue);
void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork)
{
lockdep_assert_held(&wiphy->mtx);
hrtimer_cancel(&hrwork->timer);
wiphy_work_cancel(wiphy, &hrwork->work);
}
EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_cancel);
void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork)
{
lockdep_assert_held(&wiphy->mtx);
hrtimer_cancel(&hrwork->timer);
wiphy_work_flush(wiphy, &hrwork->work);
}
EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_flush);
bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
struct wiphy_hrtimer_work *hrwork)
{
return hrtimer_is_queued(&hrwork->timer);
}
EXPORT_SYMBOL_GPL(wiphy_hrtimer_work_pending);
static int __init cfg80211_init(void)
{
int err;

View File

@ -304,6 +304,27 @@ TRACE_EVENT(wiphy_delayed_work_queue,
__entry->delay)
);
TRACE_EVENT(wiphy_hrtimer_work_queue,
TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work,
ktime_t delay),
TP_ARGS(wiphy, work, delay),
TP_STRUCT__entry(
WIPHY_ENTRY
__field(void *, instance)
__field(void *, func)
__field(ktime_t, delay)
),
TP_fast_assign(
WIPHY_ASSIGN;
__entry->instance = work;
__entry->func = work->func;
__entry->delay = delay;
),
TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%llu",
WIPHY_PR_ARG, __entry->instance, __entry->func,
__entry->delay)
);
TRACE_EVENT(wiphy_work_worker_start,
TP_PROTO(struct wiphy *wiphy),
TP_ARGS(wiphy),

View File

@ -20,4 +20,8 @@ TEST_PROGS := \
udp_tunnel_nic.sh \
# end of TEST_PROGS
TEST_FILES := \
ethtool-common.sh
# end of TEST_FILES
include ../../../lib.mk

View File

@ -754,11 +754,11 @@ static void send_ipv6_exthdr(int fd, struct sockaddr_ll *daddr, char *ext_data1,
static char exthdr_pck[sizeof(buf) + MIN_EXTHDR_SIZE];
create_packet(buf, 0, 0, PAYLOAD_LEN, 0);
add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data1);
add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data1);
write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr);
create_packet(buf, PAYLOAD_LEN * 1, 0, PAYLOAD_LEN, 0);
add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_HOPOPTS, ext_data2);
add_ipv6_exthdr(buf, exthdr_pck, IPPROTO_DSTOPTS, ext_data2);
write_packet(fd, exthdr_pck, total_hdr_len + PAYLOAD_LEN + MIN_EXTHDR_SIZE, daddr);
}
@ -989,6 +989,7 @@ static void check_recv_pkts(int fd, int *correct_payload,
static void gro_sender(void)
{
const int fin_delay_us = 100 * 1000;
static char fin_pkt[MAX_HDR_LEN];
struct sockaddr_ll daddr = {};
int txfd = -1;
@ -1032,15 +1033,22 @@ static void gro_sender(void)
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
} else if (strcmp(testname, "tcp") == 0) {
send_changed_checksum(txfd, &daddr);
/* Adding sleep before sending FIN so that it is not
* received prior to other packets.
*/
usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_changed_seq(txfd, &daddr);
usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_changed_ts(txfd, &daddr);
usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
send_diff_opt(txfd, &daddr);
usleep(fin_delay_us);
write_packet(txfd, fin_pkt, total_hdr_len, &daddr);
} else if (strcmp(testname, "ip") == 0) {
send_changed_ECN(txfd, &daddr);

View File

@ -389,9 +389,9 @@ run_test() {
local rc
host_oops_cnt_before=$(dmesg | grep -c -i 'Oops')
host_warn_cnt_before=$(dmesg --level=warn | wc -l)
host_warn_cnt_before=$(dmesg --level=warn | grep -c -i 'vsock')
vm_oops_cnt_before=$(vm_ssh -- dmesg | grep -c -i 'Oops')
vm_warn_cnt_before=$(vm_ssh -- dmesg --level=warn | wc -l)
vm_warn_cnt_before=$(vm_ssh -- dmesg --level=warn | grep -c -i 'vsock')
name=$(echo "${1}" | awk '{ print $1 }')
eval test_"${name}"
@ -403,7 +403,7 @@ run_test() {
rc=$KSFT_FAIL
fi
host_warn_cnt_after=$(dmesg --level=warn | wc -l)
host_warn_cnt_after=$(dmesg --level=warn | grep -c -i 'vsock')
if [[ ${host_warn_cnt_after} -gt ${host_warn_cnt_before} ]]; then
echo "FAIL: kernel warning detected on host" | log_host "${name}"
rc=$KSFT_FAIL
@ -415,7 +415,7 @@ run_test() {
rc=$KSFT_FAIL
fi
vm_warn_cnt_after=$(vm_ssh -- dmesg --level=warn | wc -l)
vm_warn_cnt_after=$(vm_ssh -- dmesg --level=warn | grep -c -i 'vsock')
if [[ ${vm_warn_cnt_after} -gt ${vm_warn_cnt_before} ]]; then
echo "FAIL: kernel warning detected on vm" | log_host "${name}"
rc=$KSFT_FAIL