mirror of https://github.com/torvalds/linux.git
bpf, netkit: Add indirect call wrapper for fetching peer dev
ndo_get_peer_dev is used in tcx BPF fast path, therefore make use of indirect call wrapper and therefore optimize the bpf_redirect_peer() internal handling a bit. Add a small skb_get_peer_dev() wrapper which utilizes the INDIRECT_CALL_1() macro instead of open coding. Future work could potentially add a peer pointer directly into struct net_device in future and convert veth and netkit over to use it so that eventually ndo_get_peer_dev can be removed. Co-developed-by: Nikolay Aleksandrov <razor@blackwall.org> Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/r/20231114004220.6495-7-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
parent
024ee930cb
commit
2c22542570
|
|
@ -7,6 +7,7 @@
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
#include <linux/netfilter_netdev.h>
|
#include <linux/netfilter_netdev.h>
|
||||||
#include <linux/bpf_mprog.h>
|
#include <linux/bpf_mprog.h>
|
||||||
|
#include <linux/indirect_call_wrapper.h>
|
||||||
|
|
||||||
#include <net/netkit.h>
|
#include <net/netkit.h>
|
||||||
#include <net/dst.h>
|
#include <net/dst.h>
|
||||||
|
|
@ -177,7 +178,7 @@ static void netkit_set_headroom(struct net_device *dev, int headroom)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct net_device *netkit_peer_dev(struct net_device *dev)
|
INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
|
||||||
{
|
{
|
||||||
return rcu_dereference(netkit_priv(dev)->peer);
|
return rcu_dereference(netkit_priv(dev)->peer);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@ int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||||
int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||||
int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
|
int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||||
int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
|
int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
|
||||||
|
INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
|
||||||
#else
|
#else
|
||||||
static inline int netkit_prog_attach(const union bpf_attr *attr,
|
static inline int netkit_prog_attach(const union bpf_attr *attr,
|
||||||
struct bpf_prog *prog)
|
struct bpf_prog *prog)
|
||||||
|
|
@ -34,5 +35,10 @@ static inline int netkit_prog_query(const union bpf_attr *attr,
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct net_device *netkit_peer_dev(struct net_device *dev)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif /* CONFIG_NETKIT */
|
#endif /* CONFIG_NETKIT */
|
||||||
#endif /* __NET_NETKIT_H */
|
#endif /* __NET_NETKIT_H */
|
||||||
|
|
|
||||||
|
|
@ -81,6 +81,7 @@
|
||||||
#include <net/xdp.h>
|
#include <net/xdp.h>
|
||||||
#include <net/mptcp.h>
|
#include <net/mptcp.h>
|
||||||
#include <net/netfilter/nf_conntrack_bpf.h>
|
#include <net/netfilter/nf_conntrack_bpf.h>
|
||||||
|
#include <net/netkit.h>
|
||||||
#include <linux/un.h>
|
#include <linux/un.h>
|
||||||
|
|
||||||
#include "dev.h"
|
#include "dev.h"
|
||||||
|
|
@ -2468,6 +2469,16 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
||||||
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
|
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
|
||||||
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
|
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
|
||||||
|
|
||||||
|
static struct net_device *skb_get_peer_dev(struct net_device *dev)
|
||||||
|
{
|
||||||
|
const struct net_device_ops *ops = dev->netdev_ops;
|
||||||
|
|
||||||
|
if (likely(ops->ndo_get_peer_dev))
|
||||||
|
return INDIRECT_CALL_1(ops->ndo_get_peer_dev,
|
||||||
|
netkit_peer_dev, dev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
int skb_do_redirect(struct sk_buff *skb)
|
int skb_do_redirect(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||||
|
|
@ -2481,12 +2492,9 @@ int skb_do_redirect(struct sk_buff *skb)
|
||||||
if (unlikely(!dev))
|
if (unlikely(!dev))
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
if (flags & BPF_F_PEER) {
|
if (flags & BPF_F_PEER) {
|
||||||
const struct net_device_ops *ops = dev->netdev_ops;
|
if (unlikely(!skb_at_tc_ingress(skb)))
|
||||||
|
|
||||||
if (unlikely(!ops->ndo_get_peer_dev ||
|
|
||||||
!skb_at_tc_ingress(skb)))
|
|
||||||
goto out_drop;
|
goto out_drop;
|
||||||
dev = ops->ndo_get_peer_dev(dev);
|
dev = skb_get_peer_dev(dev);
|
||||||
if (unlikely(!dev ||
|
if (unlikely(!dev ||
|
||||||
!(dev->flags & IFF_UP) ||
|
!(dev->flags & IFF_UP) ||
|
||||||
net_eq(net, dev_net(dev))))
|
net_eq(net, dev_net(dev))))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue