net_sched: act_skbmod: use RCU in tcf_skbmod_dump()

Also storing tcf_action into struct tcf_skbmod_params
makes sure there is no discrepancy in tcf_skbmod_act().

No longer block BH in tcf_skbmod_init() when acquiring tcf_lock.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250827125349.3505302-5-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2025-08-27 12:53:49 +00:00 committed by Jakub Kicinski
parent e97ae74297
commit 53df77e785
2 changed files with 13 additions and 14 deletions

View File

@ -12,6 +12,7 @@
struct tcf_skbmod_params {
struct rcu_head rcu;
u64 flags; /*up to 64 types of operations; extend if needed */
int action;
u8 eth_dst[ETH_ALEN];
u16 eth_type;
u8 eth_src[ETH_ALEN];

View File

@ -27,19 +27,18 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
struct tcf_result *res)
{
struct tcf_skbmod *d = to_skbmod(a);
int action, max_edit_len, err;
struct tcf_skbmod_params *p;
int max_edit_len, err;
u64 flags;
tcf_lastuse_update(&d->tcf_tm);
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
action = READ_ONCE(d->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
p = rcu_dereference_bh(d->skbmod_p);
if (unlikely(p->action == TC_ACT_SHOT))
goto drop;
max_edit_len = skb_mac_header_len(skb);
p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
/* tcf_skbmod_init() guarantees "flags" to be one of the following:
@ -85,7 +84,7 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
INET_ECN_set_ce(skb);
out:
return action;
return p->action;
drop:
qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
@ -193,9 +192,9 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
}
p->flags = lflags;
p->action = parm->action;
if (ovr)
spin_lock_bh(&d->tcf_lock);
spin_lock(&d->tcf_lock);
/* Protected by tcf_lock if overwriting existing action. */
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p_old = rcu_dereference_protected(d->skbmod_p, 1);
@ -209,7 +208,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
rcu_assign_pointer(d->skbmod_p, p);
if (ovr)
spin_unlock_bh(&d->tcf_lock);
spin_unlock(&d->tcf_lock);
if (p_old)
kfree_rcu(p_old, rcu);
@ -248,10 +247,9 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
opt.index = d->tcf_index;
opt.refcnt = refcount_read(&d->tcf_refcnt) - ref;
opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
spin_lock_bh(&d->tcf_lock);
opt.action = d->tcf_action;
p = rcu_dereference_protected(d->skbmod_p,
lockdep_is_held(&d->tcf_lock));
rcu_read_lock();
p = rcu_dereference(d->skbmod_p);
opt.action = p->action;
opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@ -269,10 +267,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure;
spin_unlock_bh(&d->tcf_lock);
rcu_read_unlock();
return skb->len;
nla_put_failure:
spin_unlock_bh(&d->tcf_lock);
rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}