847 lines
22 KiB
Diff
847 lines
22 KiB
Diff
From 11c3fae5afa6cac444d12622e2cf5af60a99c1ef Mon Sep 17 00:00:00 2001
|
|
From: OpenWrt community <openwrt-devel@lists.openwrt.org>
|
|
Date: Wed, 13 Jul 2022 13:43:15 +0200
|
|
Subject: [PATCH] net/bridge: add bridge offload
|
|
|
|
---
|
|
include/linux/if_bridge.h | 1 +
|
|
net/bridge/Makefile | 2 +-
|
|
net/bridge/br.c | 8 +
|
|
net/bridge/br_device.c | 2 +
|
|
net/bridge/br_fdb.c | 5 +
|
|
net/bridge/br_forward.c | 3 +
|
|
net/bridge/br_if.c | 6 +-
|
|
net/bridge/br_input.c | 5 +
|
|
net/bridge/br_offload.c | 438 ++++++++++++++++++++++++++++++++
|
|
net/bridge/br_private.h | 22 +-
|
|
net/bridge/br_private_offload.h | 23 ++
|
|
net/bridge/br_stp.c | 3 +
|
|
net/bridge/br_sysfs_br.c | 35 +++
|
|
net/bridge/br_sysfs_if.c | 2 +
|
|
net/bridge/br_vlan_tunnel.c | 3 +
|
|
15 files changed, 555 insertions(+), 3 deletions(-)
|
|
create mode 100644 net/bridge/br_offload.c
|
|
create mode 100644 net/bridge/br_private_offload.h
|
|
|
|
--- a/include/linux/if_bridge.h
|
|
+++ b/include/linux/if_bridge.h
|
|
@@ -59,6 +59,7 @@ struct br_ip_list {
|
|
#define BR_MRP_LOST_IN_CONT BIT(19)
|
|
#define BR_TX_FWD_OFFLOAD BIT(20)
|
|
#define BR_BPDU_FILTER BIT(21)
|
|
+#define BR_OFFLOAD BIT(22)
|
|
|
|
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
|
|
|
|
--- a/net/bridge/Makefile
|
|
+++ b/net/bridge/Makefile
|
|
@@ -5,7 +5,7 @@
|
|
|
|
obj-$(CONFIG_BRIDGE) += bridge.o
|
|
|
|
-bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
|
|
+bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o br_offload.o \
|
|
br_ioctl.o br_stp.o br_stp_bpdu.o \
|
|
br_stp_if.o br_stp_timer.o br_netlink.o \
|
|
br_netlink_tunnel.o br_arp_nd_proxy.o
|
|
--- a/net/bridge/br.c
|
|
+++ b/net/bridge/br.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <net/switchdev.h>
|
|
|
|
#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
/*
|
|
* Handle changes in state of network devices enslaved to a bridge.
|
|
@@ -381,6 +382,10 @@ static int __init br_init(void)
|
|
if (err)
|
|
goto err_out;
|
|
|
|
+ err = br_offload_init();
|
|
+ if (err)
|
|
+ goto err_out0;
|
|
+
|
|
err = register_pernet_subsys(&br_net_ops);
|
|
if (err)
|
|
goto err_out1;
|
|
@@ -430,6 +435,8 @@ err_out3:
|
|
err_out2:
|
|
unregister_pernet_subsys(&br_net_ops);
|
|
err_out1:
|
|
+ br_offload_fini();
|
|
+err_out0:
|
|
br_fdb_fini();
|
|
err_out:
|
|
stp_proto_unregister(&br_stp_proto);
|
|
@@ -452,6 +459,7 @@ static void __exit br_deinit(void)
|
|
#if IS_ENABLED(CONFIG_ATM_LANE)
|
|
br_fdb_test_addr_hook = NULL;
|
|
#endif
|
|
+ br_offload_fini();
|
|
br_fdb_fini();
|
|
}
|
|
|
|
--- a/net/bridge/br_device.c
|
|
+++ b/net/bridge/br_device.c
|
|
@@ -524,6 +524,8 @@ void br_dev_setup(struct net_device *dev
|
|
br->bridge_hello_time = br->hello_time = 2 * HZ;
|
|
br->bridge_forward_delay = br->forward_delay = 15 * HZ;
|
|
br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
|
|
+ br->offload_cache_size = 128;
|
|
+ br->offload_cache_reserved = 8;
|
|
dev->max_mtu = ETH_MAX_MTU;
|
|
|
|
br_netfilter_rtable_init(br);
|
|
--- a/net/bridge/br_fdb.c
|
|
+++ b/net/bridge/br_fdb.c
|
|
@@ -23,6 +23,7 @@
|
|
#include <net/switchdev.h>
|
|
#include <trace/events/bridge.h>
|
|
#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
static const struct rhashtable_params br_fdb_rht_params = {
|
|
.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
|
|
@@ -518,6 +519,8 @@ static struct net_bridge_fdb_entry *fdb_
|
|
fdb->key.vlan_id = vid;
|
|
fdb->flags = flags;
|
|
fdb->updated = fdb->used = jiffies;
|
|
+ INIT_HLIST_HEAD(&fdb->offload_in);
|
|
+ INIT_HLIST_HEAD(&fdb->offload_out);
|
|
if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
|
|
&fdb->rhnode,
|
|
br_fdb_rht_params)) {
|
|
@@ -794,6 +797,8 @@ static void fdb_notify(struct net_bridge
|
|
struct sk_buff *skb;
|
|
int err = -ENOBUFS;
|
|
|
|
+ br_offload_fdb_update(fdb);
|
|
+
|
|
if (swdev_notify)
|
|
br_switchdev_fdb_notify(br, fdb, type);
|
|
|
|
--- a/net/bridge/br_forward.c
|
|
+++ b/net/bridge/br_forward.c
|
|
@@ -16,6 +16,7 @@
|
|
#include <linux/if_vlan.h>
|
|
#include <linux/netfilter_bridge.h>
|
|
#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
/* Don't forward packets to originating port or forwarding disabled */
|
|
static inline int should_deliver(const struct net_bridge_port *p,
|
|
@@ -32,6 +33,8 @@ static inline int should_deliver(const s
|
|
|
|
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
+ br_offload_output(skb);
|
|
+
|
|
skb_push(skb, ETH_HLEN);
|
|
if (!is_skb_forwardable(skb->dev, skb))
|
|
goto drop;
|
|
--- a/net/bridge/br_if.c
|
|
+++ b/net/bridge/br_if.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <net/net_namespace.h>
|
|
|
|
#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
/*
|
|
* Determine initial path cost based on speed.
|
|
@@ -428,7 +429,7 @@ static struct net_bridge_port *new_nbp(s
|
|
p->path_cost = port_cost(dev);
|
|
p->priority = 0x8000 >> BR_PORT_BITS;
|
|
p->port_no = index;
|
|
- p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
|
|
+ p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_OFFLOAD;
|
|
br_init_port(p);
|
|
br_set_state(p, BR_STATE_DISABLED);
|
|
br_stp_port_timer_init(p);
|
|
@@ -771,6 +772,9 @@ void br_port_flags_change(struct net_bri
|
|
|
|
if (mask & BR_NEIGH_SUPPRESS)
|
|
br_recalculate_neigh_suppress_enabled(br);
|
|
+
|
|
+ if (mask & BR_OFFLOAD)
|
|
+ br_offload_port_state(p);
|
|
}
|
|
|
|
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
|
|
--- a/net/bridge/br_input.c
|
|
+++ b/net/bridge/br_input.c
|
|
@@ -22,6 +22,7 @@
|
|
#include <linux/rculist.h>
|
|
#include "br_private.h"
|
|
#include "br_private_tunnel.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
static int
|
|
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
@@ -171,6 +172,7 @@ int br_handle_frame_finish(struct net *n
|
|
dst->used = now;
|
|
br_forward(dst->dst, skb, local_rcv, false);
|
|
} else {
|
|
+ br_offload_skb_disable(skb);
|
|
if (!mcast_hit)
|
|
br_flood(br, skb, pkt_type, local_rcv, false);
|
|
else
|
|
@@ -304,6 +306,9 @@ static rx_handler_result_t br_handle_fra
|
|
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
|
|
|
|
p = br_port_get_rcu(skb->dev);
|
|
+ if (br_offload_input(p, skb))
|
|
+ return RX_HANDLER_CONSUMED;
|
|
+
|
|
if (p->flags & BR_VLAN_TUNNEL)
|
|
br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
|
|
|
|
--- /dev/null
|
|
+++ b/net/bridge/br_offload.c
|
|
@@ -0,0 +1,438 @@
|
|
+// SPDX-License-Identifier: GPL-2.0-only
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
+
|
|
+static DEFINE_SPINLOCK(offload_lock);
|
|
+
|
|
+struct bridge_flow_key {
|
|
+ u8 dest[ETH_ALEN];
|
|
+ u8 src[ETH_ALEN];
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ u16 vlan_tag;
|
|
+ bool vlan_present;
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct bridge_flow {
|
|
+ struct net_bridge_port *port;
|
|
+ struct rhash_head node;
|
|
+ struct bridge_flow_key key;
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ bool vlan_out_present;
|
|
+ u16 vlan_out;
|
|
+#endif
|
|
+
|
|
+ unsigned long used;
|
|
+ struct net_bridge_fdb_entry *fdb_in, *fdb_out;
|
|
+ struct hlist_node fdb_list_in, fdb_list_out;
|
|
+
|
|
+ struct rcu_head rcu;
|
|
+};
|
|
+
|
|
+static const struct rhashtable_params flow_params = {
|
|
+ .automatic_shrinking = true,
|
|
+ .head_offset = offsetof(struct bridge_flow, node),
|
|
+ .key_len = sizeof(struct bridge_flow_key),
|
|
+ .key_offset = offsetof(struct bridge_flow, key),
|
|
+};
|
|
+
|
|
+static struct kmem_cache *offload_cache __read_mostly;
|
|
+
|
|
+static void
|
|
+flow_rcu_free(struct rcu_head *head)
|
|
+{
|
|
+ struct bridge_flow *flow;
|
|
+
|
|
+ flow = container_of(head, struct bridge_flow, rcu);
|
|
+ kmem_cache_free(offload_cache, flow);
|
|
+}
|
|
+
|
|
+static void
|
|
+__br_offload_flow_free(struct bridge_flow *flow)
|
|
+{
|
|
+ flow->used = 0;
|
|
+ hlist_del(&flow->fdb_list_in);
|
|
+ hlist_del(&flow->fdb_list_out);
|
|
+
|
|
+ call_rcu(&flow->rcu, flow_rcu_free);
|
|
+}
|
|
+
|
|
+static void
|
|
+br_offload_flow_free(struct bridge_flow *flow)
|
|
+{
|
|
+ if (rhashtable_remove_fast(&flow->port->offload.rht, &flow->node,
|
|
+ flow_params) != 0)
|
|
+ return;
|
|
+
|
|
+ __br_offload_flow_free(flow);
|
|
+}
|
|
+
|
|
+static bool
|
|
+br_offload_flow_fdb_refresh_time(struct bridge_flow *flow,
|
|
+ struct net_bridge_fdb_entry *fdb)
|
|
+{
|
|
+ if (!time_after(flow->used, fdb->updated))
|
|
+ return false;
|
|
+
|
|
+ fdb->updated = flow->used;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+br_offload_flow_refresh_time(struct bridge_flow *flow)
|
|
+{
|
|
+ br_offload_flow_fdb_refresh_time(flow, flow->fdb_in);
|
|
+ br_offload_flow_fdb_refresh_time(flow, flow->fdb_out);
|
|
+}
|
|
+
|
|
+static void
|
|
+br_offload_destroy_cb(void *ptr, void *arg)
|
|
+{
|
|
+ struct bridge_flow *flow = ptr;
|
|
+
|
|
+ __br_offload_flow_free(flow);
|
|
+}
|
|
+
|
|
+static bool
|
|
+br_offload_need_gc(struct net_bridge_port *p)
|
|
+{
|
|
+ return (atomic_read(&p->offload.rht.nelems) +
|
|
+ p->br->offload_cache_reserved) >= p->br->offload_cache_size;
|
|
+}
|
|
+
|
|
+static void
|
|
+br_offload_gc_work(struct work_struct *work)
|
|
+{
|
|
+ struct rhashtable_iter hti;
|
|
+ struct net_bridge_port *p;
|
|
+ struct bridge_flow *gc_flow = NULL;
|
|
+ struct bridge_flow *flow;
|
|
+ unsigned long gc_used;
|
|
+
|
|
+ p = container_of(work, struct net_bridge_port, offload.gc_work);
|
|
+
|
|
+ if (!br_offload_need_gc(p))
|
|
+ return;
|
|
+
|
|
+ rhashtable_walk_enter(&p->offload.rht, &hti);
|
|
+ rhashtable_walk_start(&hti);
|
|
+ while ((flow = rhashtable_walk_next(&hti)) != NULL) {
|
|
+ unsigned long used;
|
|
+
|
|
+ if (IS_ERR(flow))
|
|
+ continue;
|
|
+
|
|
+ used = READ_ONCE(flow->used);
|
|
+ if (!used)
|
|
+ continue;
|
|
+
|
|
+ if (gc_flow && !time_before(used, gc_used))
|
|
+ continue;
|
|
+
|
|
+ gc_flow = flow;
|
|
+ gc_used = used;
|
|
+ }
|
|
+ rhashtable_walk_stop(&hti);
|
|
+ rhashtable_walk_exit(&hti);
|
|
+
|
|
+ if (!gc_flow)
|
|
+ return;
|
|
+
|
|
+ spin_lock_bh(&offload_lock);
|
|
+ if (br_offload_need_gc(p) && gc_flow &&
|
|
+ gc_flow->used == gc_used)
|
|
+ br_offload_flow_free(gc_flow);
|
|
+ if (p->offload.enabled && br_offload_need_gc(p))
|
|
+ queue_work(system_long_wq, work);
|
|
+ spin_unlock_bh(&offload_lock);
|
|
+
|
|
+}
|
|
+
|
|
+void br_offload_port_state(struct net_bridge_port *p)
|
|
+{
|
|
+ struct net_bridge_port_offload *o = &p->offload;
|
|
+ bool enabled = true;
|
|
+ bool flush = false;
|
|
+
|
|
+ if (p->state != BR_STATE_FORWARDING ||
|
|
+ !(p->flags & BR_OFFLOAD))
|
|
+ enabled = false;
|
|
+
|
|
+ spin_lock_bh(&offload_lock);
|
|
+ if (o->enabled == enabled)
|
|
+ goto out;
|
|
+
|
|
+ if (enabled) {
|
|
+ if (!o->gc_work.func)
|
|
+ INIT_WORK(&o->gc_work, br_offload_gc_work);
|
|
+ rhashtable_init(&o->rht, &flow_params);
|
|
+ } else {
|
|
+ flush = true;
|
|
+ rhashtable_free_and_destroy(&o->rht, br_offload_destroy_cb, o);
|
|
+ }
|
|
+
|
|
+ o->enabled = enabled;
|
|
+
|
|
+out:
|
|
+ spin_unlock_bh(&offload_lock);
|
|
+
|
|
+ if (flush)
|
|
+ flush_work(&o->gc_work);
|
|
+}
|
|
+
|
|
+void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb)
|
|
+{
|
|
+ struct bridge_flow *f;
|
|
+ struct hlist_node *tmp;
|
|
+
|
|
+ spin_lock_bh(&offload_lock);
|
|
+
|
|
+ hlist_for_each_entry_safe(f, tmp, &fdb->offload_in, fdb_list_in)
|
|
+ br_offload_flow_free(f);
|
|
+
|
|
+ hlist_for_each_entry_safe(f, tmp, &fdb->offload_out, fdb_list_out)
|
|
+ br_offload_flow_free(f);
|
|
+
|
|
+ spin_unlock_bh(&offload_lock);
|
|
+}
|
|
+
|
|
+static void
|
|
+br_offload_prepare_key(struct net_bridge_port *p, struct bridge_flow_key *key,
|
|
+ struct sk_buff *skb)
|
|
+{
|
|
+ memset(key, 0, sizeof(*key));
|
|
+ memcpy(key, eth_hdr(skb), 2 * ETH_ALEN);
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ if (!br_opt_get(p->br, BROPT_VLAN_ENABLED))
|
|
+ return;
|
|
+
|
|
+ if (!skb_vlan_tag_present(skb) || skb->vlan_proto != p->br->vlan_proto)
|
|
+ return;
|
|
+
|
|
+ key->vlan_present = true;
|
|
+ key->vlan_tag = skb_vlan_tag_get_id(skb);
|
|
+#endif
|
|
+}
|
|
+
|
|
+void br_offload_output(struct sk_buff *skb)
|
|
+{
|
|
+ struct net_bridge_port_offload *o;
|
|
+ struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
|
|
+ struct net_bridge_port *p, *inp;
|
|
+ struct net_device *dev;
|
|
+ struct net_bridge_fdb_entry *fdb_in, *fdb_out;
|
|
+ struct net_bridge_vlan_group *vg;
|
|
+ struct bridge_flow_key key;
|
|
+ struct bridge_flow *flow;
|
|
+ u16 vlan;
|
|
+
|
|
+ if (!cb->offload)
|
|
+ return;
|
|
+
|
|
+ rcu_read_lock();
|
|
+
|
|
+ p = br_port_get_rcu(skb->dev);
|
|
+ if (!p)
|
|
+ goto out;
|
|
+
|
|
+ o = &p->offload;
|
|
+ if (!o->enabled)
|
|
+ goto out;
|
|
+
|
|
+ if (atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size)
|
|
+ goto out;
|
|
+
|
|
+ dev = dev_get_by_index_rcu(dev_net(p->br->dev), cb->input_ifindex);
|
|
+ if (!dev)
|
|
+ goto out;
|
|
+
|
|
+ inp = br_port_get_rcu(dev);
|
|
+ if (!inp)
|
|
+ goto out;
|
|
+
|
|
+ vg = nbp_vlan_group_rcu(inp);
|
|
+ vlan = cb->input_vlan_present ? cb->input_vlan_tag : br_get_pvid(vg);
|
|
+ fdb_in = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_source, vlan);
|
|
+ if (!fdb_in || !fdb_in->dst)
|
|
+ goto out;
|
|
+
|
|
+ vg = nbp_vlan_group_rcu(p);
|
|
+ vlan = skb_vlan_tag_present(skb) ? skb_vlan_tag_get_id(skb) : br_get_pvid(vg);
|
|
+ fdb_out = br_fdb_find_rcu(p->br, eth_hdr(skb)->h_dest, vlan);
|
|
+ if (!fdb_out || !fdb_out->dst)
|
|
+ goto out;
|
|
+
|
|
+ br_offload_prepare_key(p, &key, skb);
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ key.vlan_present = cb->input_vlan_present;
|
|
+ key.vlan_tag = cb->input_vlan_tag;
|
|
+#endif
|
|
+
|
|
+ flow = kmem_cache_alloc(offload_cache, GFP_ATOMIC);
|
|
+ flow->port = inp;
|
|
+ memcpy(&flow->key, &key, sizeof(key));
|
|
+
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ flow->vlan_out_present = skb_vlan_tag_present(skb);
|
|
+ flow->vlan_out = skb_vlan_tag_get(skb);
|
|
+#endif
|
|
+
|
|
+ flow->fdb_in = fdb_in;
|
|
+ flow->fdb_out = fdb_out;
|
|
+ flow->used = jiffies;
|
|
+
|
|
+ spin_lock_bh(&offload_lock);
|
|
+ if (!o->enabled ||
|
|
+ atomic_read(&p->offload.rht.nelems) >= p->br->offload_cache_size ||
|
|
+ rhashtable_insert_fast(&inp->offload.rht, &flow->node, flow_params)) {
|
|
+ kmem_cache_free(offload_cache, flow);
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ hlist_add_head(&flow->fdb_list_in, &fdb_in->offload_in);
|
|
+ hlist_add_head(&flow->fdb_list_out, &fdb_out->offload_out);
|
|
+
|
|
+ if (br_offload_need_gc(p))
|
|
+ queue_work(system_long_wq, &p->offload.gc_work);
|
|
+
|
|
+out_unlock:
|
|
+ spin_unlock_bh(&offload_lock);
|
|
+
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
+}
|
|
+
|
|
+bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb)
|
|
+{
|
|
+ struct net_bridge_port_offload *o = &p->offload;
|
|
+ struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
|
|
+ struct bridge_flow_key key;
|
|
+ struct net_bridge_port *dst;
|
|
+ struct bridge_flow *flow;
|
|
+ unsigned long now = jiffies;
|
|
+ bool ret = false;
|
|
+
|
|
+ if (skb->len < sizeof(key))
|
|
+ return false;
|
|
+
|
|
+ if (!o->enabled)
|
|
+ return false;
|
|
+
|
|
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
|
|
+ return false;
|
|
+
|
|
+ br_offload_prepare_key(p, &key, skb);
|
|
+
|
|
+ rcu_read_lock();
|
|
+ flow = rhashtable_lookup(&o->rht, &key, flow_params);
|
|
+ if (!flow) {
|
|
+ cb->offload = 1;
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ cb->input_vlan_present = key.vlan_present != 0;
|
|
+ cb->input_vlan_tag = key.vlan_tag;
|
|
+#endif
|
|
+ cb->input_ifindex = p->dev->ifindex;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (flow->fdb_in->dst != p)
|
|
+ goto out;
|
|
+
|
|
+ dst = flow->fdb_out->dst;
|
|
+ if (!dst)
|
|
+ goto out;
|
|
+
|
|
+ ret = true;
|
|
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
|
|
+ if (!flow->vlan_out_present && key.vlan_present) {
|
|
+ __vlan_hwaccel_clear_tag(skb);
|
|
+ } else if (flow->vlan_out_present) {
|
|
+ if (skb_vlan_tag_present(skb) &&
|
|
+ skb->vlan_proto != p->br->vlan_proto) {
|
|
+ /* Protocol-mismatch, empty out vlan_tci for new tag */
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
|
|
+ skb_vlan_tag_get(skb));
|
|
+ if (unlikely(!skb))
|
|
+ goto out;
|
|
+
|
|
+ skb_pull(skb, ETH_HLEN);
|
|
+ skb_reset_mac_len(skb);
|
|
+ }
|
|
+
|
|
+ __vlan_hwaccel_put_tag(skb, p->br->vlan_proto,
|
|
+ flow->vlan_out);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ skb->dev = dst->dev;
|
|
+ skb_push(skb, ETH_HLEN);
|
|
+
|
|
+ if (skb_warn_if_lro(skb) || !is_skb_forwardable(skb->dev, skb)) {
|
|
+ kfree_skb(skb);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (now - flow->used >= HZ) {
|
|
+ flow->used = now;
|
|
+ br_offload_flow_refresh_time(flow);
|
|
+ }
|
|
+
|
|
+ skb_forward_csum(skb);
|
|
+ dev_queue_xmit(skb);
|
|
+
|
|
+out:
|
|
+ rcu_read_unlock();
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void
|
|
+br_offload_check_gc(struct net_bridge *br)
|
|
+{
|
|
+ struct net_bridge_port *p;
|
|
+
|
|
+ spin_lock_bh(&br->lock);
|
|
+ list_for_each_entry(p, &br->port_list, list)
|
|
+ if (br_offload_need_gc(p))
|
|
+ queue_work(system_long_wq, &p->offload.gc_work);
|
|
+ spin_unlock_bh(&br->lock);
|
|
+}
|
|
+
|
|
+
|
|
+int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
|
|
+ struct netlink_ext_ack *extack)
|
|
+{
|
|
+ br->offload_cache_size = val;
|
|
+ br_offload_check_gc(br);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
|
|
+ struct netlink_ext_ack *extack)
|
|
+{
|
|
+ br->offload_cache_reserved = val;
|
|
+ br_offload_check_gc(br);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __init br_offload_init(void)
|
|
+{
|
|
+ offload_cache = kmem_cache_create("bridge_offload_cache",
|
|
+ sizeof(struct bridge_flow),
|
|
+ 0, SLAB_HWCACHE_ALIGN, NULL);
|
|
+ if (!offload_cache)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void br_offload_fini(void)
|
|
+{
|
|
+ kmem_cache_destroy(offload_cache);
|
|
+}
|
|
--- a/net/bridge/br_private.h
|
|
+++ b/net/bridge/br_private.h
|
|
@@ -268,7 +268,13 @@ struct net_bridge_fdb_entry {
|
|
unsigned long updated ____cacheline_aligned_in_smp;
|
|
unsigned long used;
|
|
|
|
- struct rcu_head rcu;
|
|
+ union {
|
|
+ struct {
|
|
+ struct hlist_head offload_in;
|
|
+ struct hlist_head offload_out;
|
|
+ };
|
|
+ struct rcu_head rcu;
|
|
+ };
|
|
};
|
|
|
|
#define MDB_PG_FLAGS_PERMANENT BIT(0)
|
|
@@ -343,6 +349,12 @@ struct net_bridge_mdb_entry {
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
+struct net_bridge_port_offload {
|
|
+ struct rhashtable rht;
|
|
+ struct work_struct gc_work;
|
|
+ bool enabled;
|
|
+};
|
|
+
|
|
struct net_bridge_port {
|
|
struct net_bridge *br;
|
|
struct net_device *dev;
|
|
@@ -403,6 +415,7 @@ struct net_bridge_port {
|
|
u16 backup_redirected_cnt;
|
|
|
|
struct bridge_stp_xstats stp_xstats;
|
|
+ struct net_bridge_port_offload offload;
|
|
};
|
|
|
|
#define kobj_to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
|
|
@@ -519,6 +532,9 @@ struct net_bridge {
|
|
struct kobject *ifobj;
|
|
u32 auto_cnt;
|
|
|
|
+ u32 offload_cache_size;
|
|
+ u32 offload_cache_reserved;
|
|
+
|
|
#ifdef CONFIG_NET_SWITCHDEV
|
|
/* Counter used to make sure that hardware domains get unique
|
|
* identifiers in case a bridge spans multiple switchdev instances.
|
|
@@ -553,6 +569,10 @@ struct br_input_skb_cb {
|
|
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
|
|
u8 br_netfilter_broute:1;
|
|
#endif
|
|
+ u8 offload:1;
|
|
+ u8 input_vlan_present:1;
|
|
+ u16 input_vlan_tag;
|
|
+ int input_ifindex;
|
|
|
|
#ifdef CONFIG_NET_SWITCHDEV
|
|
/* Set if TX data plane offloading is used towards at least one
|
|
--- /dev/null
|
|
+++ b/net/bridge/br_private_offload.h
|
|
@@ -0,0 +1,23 @@
|
|
+#ifndef __BR_OFFLOAD_H
|
|
+#define __BR_OFFLOAD_H
|
|
+
|
|
+bool br_offload_input(struct net_bridge_port *p, struct sk_buff *skb);
|
|
+void br_offload_output(struct sk_buff *skb);
|
|
+void br_offload_port_state(struct net_bridge_port *p);
|
|
+void br_offload_fdb_update(const struct net_bridge_fdb_entry *fdb);
|
|
+int br_offload_init(void);
|
|
+void br_offload_fini(void);
|
|
+int br_offload_set_cache_size(struct net_bridge *br, unsigned long val,
|
|
+ struct netlink_ext_ack *extack);
|
|
+int br_offload_set_cache_reserved(struct net_bridge *br, unsigned long val,
|
|
+ struct netlink_ext_ack *extack);
|
|
+
|
|
+static inline void br_offload_skb_disable(struct sk_buff *skb)
|
|
+{
|
|
+ struct br_input_skb_cb *cb = (struct br_input_skb_cb *)skb->cb;
|
|
+
|
|
+ if (cb->offload)
|
|
+ cb->offload = 0;
|
|
+}
|
|
+
|
|
+#endif
|
|
--- a/net/bridge/br_stp.c
|
|
+++ b/net/bridge/br_stp.c
|
|
@@ -12,6 +12,7 @@
|
|
|
|
#include "br_private.h"
|
|
#include "br_private_stp.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
/* since time values in bpdu are in jiffies and then scaled (1/256)
|
|
* before sending, make sure that is at least one STP tick.
|
|
@@ -52,6 +53,8 @@ void br_set_state(struct net_bridge_port
|
|
(unsigned int) p->port_no, p->dev->name,
|
|
br_port_state_names[p->state]);
|
|
|
|
+ br_offload_port_state(p);
|
|
+
|
|
if (p->br->stp_enabled == BR_KERNEL_STP) {
|
|
switch (p->state) {
|
|
case BR_STATE_BLOCKING:
|
|
--- a/net/bridge/br_sysfs_br.c
|
|
+++ b/net/bridge/br_sysfs_br.c
|
|
@@ -18,6 +18,7 @@
|
|
#include <linux/sched/signal.h>
|
|
|
|
#include "br_private.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
/* IMPORTANT: new bridge options must be added with netlink support only
|
|
* please do not add new sysfs entries
|
|
@@ -930,6 +931,38 @@ static ssize_t vlan_stats_per_port_store
|
|
static DEVICE_ATTR_RW(vlan_stats_per_port);
|
|
#endif
|
|
|
|
+static ssize_t offload_cache_size_show(struct device *d,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct net_bridge *br = to_bridge(d);
|
|
+ return sprintf(buf, "%u\n", br->offload_cache_size);
|
|
+}
|
|
+
|
|
+static ssize_t offload_cache_size_store(struct device *d,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t len)
|
|
+{
|
|
+ return store_bridge_parm(d, buf, len, br_offload_set_cache_size);
|
|
+}
|
|
+static DEVICE_ATTR_RW(offload_cache_size);
|
|
+
|
|
+static ssize_t offload_cache_reserved_show(struct device *d,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct net_bridge *br = to_bridge(d);
|
|
+ return sprintf(buf, "%u\n", br->offload_cache_reserved);
|
|
+}
|
|
+
|
|
+static ssize_t offload_cache_reserved_store(struct device *d,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t len)
|
|
+{
|
|
+ return store_bridge_parm(d, buf, len, br_offload_set_cache_reserved);
|
|
+}
|
|
+static DEVICE_ATTR_RW(offload_cache_reserved);
|
|
+
|
|
static struct attribute *bridge_attrs[] = {
|
|
&dev_attr_forward_delay.attr,
|
|
&dev_attr_hello_time.attr,
|
|
@@ -984,6 +1017,8 @@ static struct attribute *bridge_attrs[]
|
|
&dev_attr_vlan_stats_enabled.attr,
|
|
&dev_attr_vlan_stats_per_port.attr,
|
|
#endif
|
|
+ &dev_attr_offload_cache_size.attr,
|
|
+ &dev_attr_offload_cache_reserved.attr,
|
|
NULL
|
|
};
|
|
|
|
--- a/net/bridge/br_sysfs_if.c
|
|
+++ b/net/bridge/br_sysfs_if.c
|
|
@@ -241,6 +241,7 @@ BRPORT_ATTR_FLAG(broadcast_flood, BR_BCA
|
|
BRPORT_ATTR_FLAG(neigh_suppress, BR_NEIGH_SUPPRESS);
|
|
BRPORT_ATTR_FLAG(isolated, BR_ISOLATED);
|
|
BRPORT_ATTR_FLAG(bpdu_filter, BR_BPDU_FILTER);
|
|
+BRPORT_ATTR_FLAG(offload, BR_OFFLOAD);
|
|
|
|
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
|
|
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
|
|
@@ -295,6 +296,7 @@ static const struct brport_attribute *br
|
|
&brport_attr_isolated,
|
|
&brport_attr_bpdu_filter,
|
|
&brport_attr_backup_port,
|
|
+ &brport_attr_offload,
|
|
NULL
|
|
};
|
|
|
|
--- a/net/bridge/br_vlan_tunnel.c
|
|
+++ b/net/bridge/br_vlan_tunnel.c
|
|
@@ -15,6 +15,7 @@
|
|
|
|
#include "br_private.h"
|
|
#include "br_private_tunnel.h"
|
|
+#include "br_private_offload.h"
|
|
|
|
static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
|
|
const void *ptr)
|
|
@@ -180,6 +181,7 @@ void br_handle_ingress_vlan_tunnel(struc
|
|
skb_dst_drop(skb);
|
|
|
|
__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
|
|
+ br_offload_skb_disable(skb);
|
|
}
|
|
|
|
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
|
|
@@ -201,6 +203,7 @@ int br_handle_egress_vlan_tunnel(struct
|
|
if (err)
|
|
return err;
|
|
|
|
+ br_offload_skb_disable(skb);
|
|
tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
|
|
if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
|
|
skb_dst_set(skb, &tunnel_dst->dst);
|