mirror of https://github.com/torvalds/linux.git
gve: Consolidate and persist ethtool ring changes
Refactor the ethtool ring parameter configuration logic to address two
issues: unnecessary queue resets and lost configuration changes when
the interface is down.
Previously, `gve_set_ringparam` could trigger multiple queue
destructions and recreations for a single command, as different settings
(e.g., header split, ring sizes) were applied one by one. Furthermore,
if the interface was down, any changes made via ethtool were discarded
instead of being saved for the next time the interface was brought up.
This patch centralizes the configuration logic. Individual functions
like `gve_set_hsplit_config` are modified to only validate and stage
changes in a temporary config struct.
The main `gve_set_ringparam` function now gathers all staged changes
and applies them as a single, combined configuration:
1. If the interface is up, it calls `gve_adjust_config` once.
2. If the interface is down, it saves the settings directly to the
driver's private struct, ensuring they persist and are used when
the interface is brought back up.
Signed-off-by: Ankit Garg <nktgrg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Link: https://patch.msgid.link/20251017012614.3631351-1-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a5cd3a60aa
commit
c30fd916c4
|
|
@ -1251,7 +1251,8 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
|
|||
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
|
||||
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
|
||||
bool gve_header_split_supported(const struct gve_priv *priv);
|
||||
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
|
||||
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
|
||||
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
|
||||
/* rx buffer handling */
|
||||
int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
|
||||
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
|
||||
|
|
|
|||
|
|
@ -537,34 +537,6 @@ static void gve_get_ringparam(struct net_device *netdev,
|
|||
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
|
||||
}
|
||||
|
||||
static int gve_adjust_ring_sizes(struct gve_priv *priv,
|
||||
u16 new_tx_desc_cnt,
|
||||
u16 new_rx_desc_cnt)
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
int err;
|
||||
|
||||
/* get current queue configuration */
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
|
||||
/* copy over the new ring_size from ethtool */
|
||||
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
|
||||
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
|
||||
|
||||
if (netif_running(priv->dev)) {
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Set new ring_size for the next up */
|
||||
priv->tx_desc_cnt = new_tx_desc_cnt;
|
||||
priv->rx_desc_cnt = new_rx_desc_cnt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
|
||||
u16 new_rx_desc_cnt)
|
||||
{
|
||||
|
|
@ -584,20 +556,13 @@ static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gve_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *cmd,
|
||||
struct kernel_ethtool_ringparam *kernel_cmd,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
|
||||
u16 new_rx_desc_cnt,
|
||||
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
|
||||
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
|
||||
{
|
||||
struct gve_priv *priv = netdev_priv(netdev);
|
||||
u16 new_tx_cnt, new_rx_cnt;
|
||||
int err;
|
||||
|
||||
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
|
||||
if (new_tx_desc_cnt == priv->tx_desc_cnt &&
|
||||
new_rx_desc_cnt == priv->rx_desc_cnt)
|
||||
return 0;
|
||||
|
||||
if (!priv->modify_ring_size_enabled) {
|
||||
|
|
@ -605,13 +570,48 @@ static int gve_set_ringparam(struct net_device *netdev,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
new_tx_cnt = cmd->tx_pending;
|
||||
new_rx_cnt = cmd->rx_pending;
|
||||
|
||||
if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
|
||||
if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
|
||||
return -EINVAL;
|
||||
|
||||
return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
|
||||
tx_alloc_cfg->ring_size = new_tx_desc_cnt;
|
||||
rx_alloc_cfg->ring_size = new_rx_desc_cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gve_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *cmd,
|
||||
struct kernel_ethtool_ringparam *kernel_cmd,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
struct gve_priv *priv = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
|
||||
&rx_alloc_cfg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
|
||||
&tx_alloc_cfg, &rx_alloc_cfg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (netif_running(priv->dev)) {
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
/* Set ring params for the next up */
|
||||
priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
|
||||
priv->rx_cfg.packet_buffer_size =
|
||||
rx_alloc_cfg.packet_buffer_size;
|
||||
priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
|
||||
priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gve_user_reset(struct net_device *netdev, u32 *flags)
|
||||
|
|
|
|||
|
|
@ -2058,12 +2058,10 @@ bool gve_header_split_supported(const struct gve_priv *priv)
|
|||
priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
|
||||
}
|
||||
|
||||
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
|
||||
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
|
||||
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
bool enable_hdr_split;
|
||||
int err = 0;
|
||||
|
||||
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
|
||||
return 0;
|
||||
|
|
@ -2081,14 +2079,11 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
|
|||
if (enable_hdr_split == priv->header_split_enabled)
|
||||
return 0;
|
||||
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
rx_alloc_cfg->enable_header_split = enable_hdr_split;
|
||||
rx_alloc_cfg->packet_buffer_size =
|
||||
gve_get_pkt_buf_size(priv, enable_hdr_split);
|
||||
|
||||
rx_alloc_cfg.enable_header_split = enable_hdr_split;
|
||||
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
|
||||
|
||||
if (netif_running(priv->dev))
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gve_set_features(struct net_device *netdev,
|
||||
|
|
|
|||
Loading…
Reference in New Issue