lede/target/linux/generic/hack-5.4/999-shortcut-fe-support.patch
2020-07-02 23:30:56 +08:00

182 lines
5.5 KiB
Diff

--- a/include/linux/if_bridge.h 2020-06-22 00:31:27.000000000 -0700
+++ b/include/linux/if_bridge.h 2020-06-27 18:17:02.739634872 -0700
@@ -51,7 +51,8 @@ struct br_ip_list {
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-
+extern void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
--- a/include/linux/skbuff.h 2020-06-23 11:43:45.724010000 -0700
+++ b/include/linux/skbuff.h 2020-06-27 18:16:21.428010018 -0700
@@ -825,6 +825,9 @@ struct sk_buff {
__u8 decrypted:1;
#endif
__u8 gro_skip:1;
+#ifdef CONFIG_SHORTCUT_FE
+ __u8 fast_forwarded:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
--- a/include/linux/timer.h 2020-06-22 00:31:27.000000000 -0700
+++ b/include/linux/timer.h 2020-06-27 18:23:29.015910189 -0700
@@ -17,7 +17,9 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
-
+#ifdef CONFIG_SHORTCUT_FE
+ unsigned long cust_data;
+#endif
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
--- a/include/net/netns/conntrack.h 2020-06-23 11:43:45.752009000 -0700
+++ b/include/net/netns/conntrack.h 2020-06-27 18:18:26.138861119 -0700
@@ -115,8 +115,12 @@ struct netns_ct {
#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
struct atomic_notifier_head nf_conntrack_chain;
#else
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct atomic_notifier_head nf_conntrack_chain;
+#else
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
#endif
+#endif
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
--- a/net/bridge/br_if.c 2020-06-22 00:31:27.000000000 -0700
+++ b/net/bridge/br_if.c 2020-06-27 18:31:59.765638309 -0700
@@ -746,6 +746,28 @@ void br_port_flags_change(struct net_bri
br_recalculate_neigh_suppress_enabled(br);
}
+void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct net_bridge *br;
+ struct pcpu_sw_netstats *stats;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return;
+
+ br = netdev_priv(dev);
+ stats = this_cpu_ptr(br->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += nlstats->rx_packets;
+ stats->rx_bytes += nlstats->rx_bytes;
+ stats->tx_packets += nlstats->tx_packets;
+ stats->tx_bytes += nlstats->tx_bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
+
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
struct net_bridge_port *p;
--- a/net/core/dev.c 2020-06-23 11:43:45.728009000 -0700
+++ b/net/core/dev.c 2020-06-27 18:27:40.161351260 -0700
@@ -3191,10 +3191,17 @@ static int xmit_one(struct sk_buff *skb,
{
unsigned int len;
int rc;
-
+#ifdef CONFIG_SHORTCUT_FE
+ /* If this skb has been fast forwarded then we don't want it to
+ * go to any taps (by definition we're trying to bypass them).
+ */
+ if (!skb->fast_forwarded) {
+#endif
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
-
+#ifdef CONFIG_SHORTCUT_FE
+ }
+#endif
#ifdef CONFIG_ETHERNET_PACKET_MANGLE
if (!dev->eth_mangle_tx ||
(skb = dev->eth_mangle_tx(dev, skb)) != NULL)
@@ -4683,6 +4690,10 @@ void netdev_rx_handler_unregister(struct
RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+#ifdef CONFIG_SHORTCUT_FE
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL_GPL(athrs_fast_nat_recv);
+#endif
/*
* Limit the use of PFMEMALLOC reserves to those protocols that implement
@@ -4733,7 +4744,9 @@ static int __netif_receive_skb_core(stru
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
-
+#ifdef CONFIG_SHORTCUT_FE
+ int (*fast_recv)(struct sk_buff *skb);
+#endif
net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
@@ -4772,6 +4785,15 @@ another_round:
if (unlikely(!skb))
goto out;
}
+#ifdef CONFIG_SHORTCUT_FE
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
+ if (fast_recv) {
+ if (fast_recv(skb)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
+ }
+ }
+#endif
if (skb_skip_tc_classify(skb))
goto skip_classify;
--- a/net/Kconfig 2020-06-23 11:43:45.732009000 -0700
+++ b/net/Kconfig 2020-06-27 18:24:06.035538006 -0700
@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT
# Extended BPF JIT (eBPF)
config HAVE_EBPF_JIT
bool
+
+config SHORTCUT_FE
+ bool "Enables kernel network stack path for Shortcut Forwarding Engine
+ default y
--- a/net/netfilter/nf_conntrack_proto_tcp.c 2020-06-23 11:43:45.404018000 -0700
+++ b/net/netfilter/nf_conntrack_proto_tcp.c 2020-06-27 18:28:24.610214943 -0700
@@ -33,12 +33,16 @@
/* Do not check the TCP window for incoming packets */
static int nf_ct_tcp_no_window_check __read_mostly = 1;
-
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check);
+#endif
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
static int nf_ct_tcp_be_liberal __read_mostly = 0;
-
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal);
+#endif
/* If it is set to zero, we disable picking up already established
connections. */
static int nf_ct_tcp_loose __read_mostly = 1;