net: gso: restore ids of outer ip headers correctly

Currently, NETIF_F_TSO_MANGLEID indicates that the inner-most ID can
be mangled. Outer IDs can always be mangled.

Make GSO preserve outer IDs by default, with NETIF_F_TSO_MANGLEID allowing
both inner and outer IDs to be mangled.

This commit also modifies a few drivers that use SKB_GSO_FIXEDID directly.

Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
Reviewed-by: Edward Cree <ecree.xilinx@gmail.com> # for sfc
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20250923085908.4687-4-richardbgobert@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Richard Gobert 2025-09-23 10:59:06 +02:00 committed by Paolo Abeni
parent 21f7484220
commit 3271f19bf7
8 changed files with 63 additions and 30 deletions

View File

@ -43,10 +43,19 @@ also point to the TCP header of the packet.
For IPv4 segmentation we support one of two types in terms of the IP ID.
The default behavior is to increment the IP ID with every segment. If the
GSO type SKB_GSO_TCP_FIXEDID is specified then we will not increment the IP
ID and all segments will use the same IP ID. If a device has
NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO
and we will either increment the IP ID for all frames, or leave it at a
static value based on driver preference.
ID and all segments will use the same IP ID.
For encapsulated packets, SKB_GSO_TCP_FIXEDID refers only to the outer header.
SKB_GSO_TCP_FIXEDID_INNER can be used to specify the same for the inner header.
Any combination of these two GSO types is allowed.
If a device has NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when
performing TSO and we will either increment the IP ID for all frames, or leave
it at a static value based on driver preference. For encapsulated packets,
NETIF_F_TSO_MANGLEID is relevant for both outer and inner headers, unless the
DF bit is not set on the outer header, in which case the device driver must
guarantee that the IP ID field is incremented in the outer header with every
segment.
UDP Fragmentation Offload
@ -124,10 +133,7 @@ Generic Receive Offload
Generic receive offload is the complement to GSO. Ideally any frame
assembled by GRO should be segmented to create an identical sequence of
frames using GSO, and any sequence of frames segmented by GSO should be
able to be reassembled back to the original by GRO. The only exception to
this is IPv4 ID in the case that the DF bit is set for a given IP header.
If the value of the IPv4 ID is not sequentially incrementing it will be
altered so that it is when a frame assembled via GRO is segmented via GSO.
able to be reassembled back to the original by GRO.
Partial Generic Segmentation Offload

View File

@ -1290,8 +1290,12 @@ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *
tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
ipv4->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
SKB_GSO_TCP_FIXEDID;
}
skb->csum_start = (unsigned char *)tcp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);

View File

@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
if (encap) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
}
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
ESE_GZ_TX_DESC_IP4_ID_NO_OP,
ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);

View File

@ -5320,13 +5320,18 @@ void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
netdev_features_t feature;
if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
gso_type |= __SKB_GSO_TCP_FIXEDID;
feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));

View File

@ -674,7 +674,7 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 2,
SKB_GSO_TCP_FIXEDID = 1 << 3,
__SKB_GSO_TCP_FIXEDID = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
@ -707,6 +707,12 @@ enum {
SKB_GSO_FRAGLIST = 1 << 18,
SKB_GSO_TCP_ACCECN = 1 << 19,
/* These indirectly map onto the same netdev feature.
* If NETIF_F_TSO_MANGLEID is set it may mangle both inner and outer IDs.
*/
SKB_GSO_TCP_FIXEDID = 1 << 30,
SKB_GSO_TCP_FIXEDID_INNER = 1 << 31,
};
#if BITS_PER_LONG > 32

View File

@ -3768,8 +3768,14 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
features &= ~dev->gso_partial_features;
/* Make sure to clear the IPv4 ID mangling feature if the
* IPv4 header has the potential to be fragmented.
/* Make sure to clear the IPv4 ID mangling feature if the IPv4 header
* has the potential to be fragmented so that TSO does not generate
* segments with the same ID. For encapsulated packets, the ID mangling
* feature is guaranteed not to use the same ID for the outer IPv4
* headers of the generated segments if the headers have the potential
* to be fragmented, so there is no need to clear the IPv4 ID mangling
* feature (see the section about NETIF_F_TSO_MANGLEID in
* segmentation-offloads.rst).
*/
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
struct iphdr *iph = skb->encapsulation ?

View File

@ -1395,14 +1395,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
segs = ERR_PTR(-EPROTONOSUPPORT);
if (!skb->encapsulation || encap) {
udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap));
if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
goto out;
/* fixed ID is invalid if DF bit is not set */
if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
goto out;
}
if (!skb->encapsulation || encap)
udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {

View File

@ -471,7 +471,6 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct tcphdr *th = tcp_hdr(skb);
bool is_fixedid;
if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
@ -485,10 +484,9 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
is_fixedid = (NAPI_GRO_CB(skb)->ip_fixedid >> skb->encapsulation) & 1;
BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID << 1 != SKB_GSO_TCP_FIXEDID_INNER);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
(is_fixedid * SKB_GSO_TCP_FIXEDID);
(NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
tcp_gro_complete(skb);
return 0;