lede/target/linux/ipq806x/patches-5.4/999-03a-qca-nss-ecm-support.patch
2023-05-31 09:03:59 +08:00

1585 lines
45 KiB
Diff

--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -149,4 +149,39 @@ br_port_flag_is_set(const struct net_dev
}
#endif
+/* QCA NSS ECM support - Start */
+extern struct net_device *br_port_dev_get(struct net_device *dev,
+ unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie);
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr,
+ __u16 vid);
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned char *addr,
+ unsigned int cookie);
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
+
+#define BR_FDB_EVENT_ADD 0x01
+#define BR_FDB_EVENT_DEL 0x02
+
+struct br_fdb_event {
+ struct net_device *dev;
+ unsigned char addr[6];
+ unsigned char is_local;
+};
+extern void br_fdb_register_notify(struct notifier_block *nb);
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_get_dst_hook_t(
+ const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -14,4 +14,30 @@
#include <linux/in6.h>
#include <uapi/linux/if_pppol2tp.h>
+/* QCA NSS ECM support - Start */
+/*
+ * Holds L2TP channel info
+ */
+struct pppol2tp_common_addr {
+ int tunnel_version; /* v2 or v3 */
+ __u32 local_tunnel_id, remote_tunnel_id; /* tunnel id */
+ __u32 local_session_id, remote_session_id; /* session id */
+ struct sockaddr_in local_addr, remote_addr; /* ip address and port */
+};
+
+/*
+ * L2TP channel operations
+ */
+struct pppol2tp_channel_ops {
+ struct ppp_channel_ops ops; /* ppp channel ops */
+};
+
+/*
+ * exported function which calls pppol2tp channel's get addressing
+ * function
+ */
+extern int pppol2tp_channel_addressing_get(struct ppp_channel *,
+ struct pppol2tp_common_addr *);
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -222,7 +222,28 @@ extern void vlan_vids_del_by_dev(struct
extern bool vlan_uses_dev(const struct net_device *dev);
+/* QCA NSS ECM support - Start */
+extern void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+extern u16 vlan_dev_get_egress_prio(struct net_device *dev, u32 skb_prio);
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev);
+/* QCA NSS ECM support - End */
+
#else
+/* QCA NSS ECM support - Start */
+static inline void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+
+}
+
+static inline u16 vlan_dev_get_egress_prio(struct net_device *dev,
+ u32 skb_prio)
+{
+ return 0;
+}
+/* QCA NSS ECM support - End */
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1656,6 +1656,20 @@ enum netdev_ml_priv_type {
ML_PRIV_CAN,
};
+/* QCA NSS ECM support - Start */
+enum netdev_priv_qca_ecm_flags {
+ IFF_QCA_ECM_TUN_TAP = 1<<0,
+ IFF_QCA_ECM_PPP_L2TPV2 = 1<<1,
+ IFF_QCA_ECM_PPP_L2TPV3 = 1<<2,
+ IFF_QCA_ECM_PPP_PPTP = 1<<3,
+};
+
+#define IFF_QCA_ECM_TUN_TAP IFF_QCA_ECM_TUN_TAP
+#define IFF_QCA_ECM_PPP_L2TPV2 IFF_QCA_ECM_PPP_L2TPV2
+#define IFF_QCA_ECM_PPP_L2TPV3 IFF_QCA_ECM_PPP_L2TPV3
+#define IFF_QCA_ECM_PPP_PPTP IFF_QCA_ECM_PPP_PPTP
+/* QCA NSS ECM support - End */
+
/**
* struct net_device - The DEVICE structure.
*
@@ -1966,6 +1980,7 @@ struct net_device {
unsigned int flags;
unsigned int priv_flags;
+ unsigned int priv_flags_qca_ecm; /* QCA NSS ECM support */
unsigned short gflags;
unsigned short padded;
@@ -2649,6 +2664,10 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ /* QCA NSS ECM Support - Start */
+ NETDEV_BR_JOIN,
+ NETDEV_BR_LEAVE,
+ /* QCA NSS ECM Support - End */
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -307,5 +307,13 @@ void bond_3ad_update_lacp_rate(struct bo
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
size_t bond_3ad_stats_size(void);
+
+/* QCA NSS ECM support - Start */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ uint8_t *dst_mac, void *src,
+ void *dst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM support - End */
#endif /* _NET_BOND_3AD_H */
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -244,6 +244,7 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
struct lock_class_key stats_lock_key;
+ u32 id; /* QCA NSS ECM support */
};
#define bond_slave_get_rcu(dev) \
@@ -758,4 +759,12 @@ static inline void bond_tx_drop(struct n
dev_kfree_skb_any(skb);
}
+/* QCA NSS ECM support - Start */
+extern struct bond_cb __rcu *bond_cb;
+
+uint32_t bond_xmit_hash_without_skb(uint8_t *src_mac, uint8_t *dst_mac,
+ void *psrc, void *pdst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM support - End */
#endif /* _NET_BONDING_H */
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -209,6 +209,11 @@ void rt6_multipath_rebalance(struct fib6
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb);
+int rt6_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -575,4 +575,15 @@ static inline void neigh_update_is_route
*notify = 1;
}
}
+
+/* QCA NSS ECM support - Start */
+struct neigh_mac_update {
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+};
+
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -28,6 +28,10 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
NF_CT_EXT_SYNPROXY,
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */
+#endif
+
NF_CT_EXT_NUM,
};
@@ -40,6 +44,9 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
+/* QCA NSS ECM support - Start */
+#define NF_CT_EXT_DSCPREMARK_TYPE struct nf_ct_dscpremark_ext
+/* QCA NSS ECM support - End */
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -224,6 +224,11 @@ struct rtable *rt_dst_alloc(struct net_d
bool nopolicy, bool noxfrm, bool will_cache);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb);
+int ip_rt_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -141,6 +141,23 @@ enum {
};
#define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1)
+/* QCA NSS ECM support - Start */
+#ifdef __KERNEL__
+struct bond_cb {
+ void (*bond_cb_link_up)(struct net_device *slave);
+ void (*bond_cb_link_down)(struct net_device *slave);
+ void (*bond_cb_enslave)(struct net_device *slave);
+ void (*bond_cb_release)(struct net_device *slave);
+ void (*bond_cb_delete_by_slave)(struct net_device *slave);
+ void (*bond_cb_delete_by_mac)(uint8_t *mac_addr);
+};
+
+extern int bond_register_cb(struct bond_cb *cb);
+extern void bond_unregister_cb(void);
+extern int bond_get_id(struct net_device *bond_dev);
+#endif /* __KERNEL__ */
+/* QCA NSS ECM support - End */
+
#endif /* _LINUX_IF_BONDING_H */
/*
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -121,6 +121,39 @@ static void ad_marker_response_received(
struct port *port);
static void ad_update_actor_keys(struct port *port, bool reset);
+/* QCA NSS ECM support - Start */
+struct bond_cb __rcu *bond_cb;
+
+int bond_register_cb(struct bond_cb *cb)
+{
+ struct bond_cb *lag_cb;
+
+ rcu_read_lock();
+ lag_cb = kzalloc(sizeof(*lag_cb), GFP_ATOMIC | __GFP_NOWARN);
+ if (!lag_cb) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ memcpy((void *)lag_cb, (void *)cb, sizeof(*cb));
+ rcu_assign_pointer(bond_cb, lag_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(bond_register_cb);
+
+void bond_unregister_cb(void)
+{
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ kfree(lag_cb_main);
+ rcu_assign_pointer(bond_cb, NULL);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(bond_unregister_cb);
+/* QCA NSS ECM support - Start */
/* ================= api to bonding and kernel code ================== */
@@ -998,6 +1031,29 @@ static void ad_mux_machine(struct port *
port->actor_oper_port_state |=
AD_STATE_SYNCHRONIZATION;
}
+
+ /* QCA NSS ECM support - Start */
+ /* Send a notificaton about change in state of this
+ * port. We only want to handle case where port moves
+ * from AD_MUX_COLLECTING_DISTRIBUTING ->
+ * AD_MUX_ATTACHED.
+ */
+ if (bond_slave_is_up(port->slave) &&
+ (last_state == AD_MUX_COLLECTING_DISTRIBUTING)) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_down) {
+ struct net_device *dev;
+
+ dev = port->slave->dev;
+ lag_cb_main->bond_cb_link_down(dev);
+ }
+ rcu_read_unlock();
+ }
+ /* QCA NSS ECM support - End */
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
@@ -1897,6 +1953,8 @@ static void ad_enable_collecting_distrib
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM support */
+
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Enabling port %d (LAG %d)\n",
port->actor_port_number,
@@ -1904,6 +1962,16 @@ static void ad_enable_collecting_distrib
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+
+ /* QCA NSS ECM support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(port->slave->dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM support - End */
}
}
@@ -2776,3 +2844,101 @@ int bond_3ad_stats_fill(struct sk_buff *
return 0;
}
+
+/* QCA NSS ECM support - Start */
+/* bond_3ad_get_tx_dev - Calculate egress interface for a given packet,
+ * for a LAG that is configured in 802.3AD mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address
+ * @dst: pointer to destination L3 address
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash is used to calculate hash using L2/L3
+ * addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, u8 *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct aggregator *agg;
+ struct ad_info ad_info;
+ struct list_head *iter;
+ struct slave *slave;
+ struct slave *first_ok_slave = NULL;
+ u32 hash = 0;
+ int slaves_in_agg;
+ int slave_agg_no = 0;
+ int agg_id;
+
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ slaves_in_agg = ad_info.ports;
+ agg_id = ad_info.aggregator_id;
+
+ if (slaves_in_agg == 0) {
+ pr_debug("%s: Error: active aggregator is empty\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_agg_no = hash % slaves_in_agg;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for 802.3AD fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ slave_agg_no = hash % slaves_in_agg;
+ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
+
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && bond_slave_can_tx(slave))
+ first_ok_slave = slave;
+ slave_agg_no--;
+ continue;
+ }
+
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ if (slave_agg_no >= 0) {
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ bond_dev->name, agg_id);
+ return NULL;
+ }
+
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found.
+ */
+ if (first_ok_slave)
+ return first_ok_slave->dev;
+
+ return NULL;
+}
+/* QCA NSS ECM support - End */
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -199,6 +199,7 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(
#endif
unsigned int bond_net_id __read_mostly;
+static unsigned long bond_id_mask = 0xFFFFFFF0; /* QCA NSS ECM Support */
/*-------------------------- Forward declarations ---------------------------*/
@@ -847,6 +848,23 @@ void bond_change_active_slave(struct bon
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
+ /* QCA NSS ECM support - Start */
+ if (bond->params.mode == BOND_MODE_XOR) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_up) {
+ struct net_device *dev;
+
+ dev = new_active->dev;
+ lag_cb_main->bond_cb_link_up(dev);
+ }
+ rcu_read_unlock();
+ }
+ /* QCA NSS ECM support - End */
+
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
@@ -1407,6 +1425,7 @@ int bond_enslave(struct net_device *bond
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM support */
int link_reporting;
int res = 0, i;
@@ -1806,6 +1825,13 @@ int bond_enslave(struct net_device *bond
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
+ /* QCA NSS ECM support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+ rcu_read_unlock();
+ /* QCA NSS ECM support - End */
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
bond_is_active_slave(new_slave) ? "an active" : "a backup",
@@ -1878,6 +1904,14 @@ err_undo_flags:
}
}
+ /* QCA NSS ECM support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+ rcu_read_unlock();
+ /* QCA NSS ECM support - End */
+
return res;
}
@@ -1899,6 +1933,7 @@ static int __bond_release_one(struct net
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM support */
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -1921,6 +1956,14 @@ static int __bond_release_one(struct net
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
+ /* QCA NSS ECM support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_release)
+ lag_cb_main->bond_cb_release(slave_dev);
+ rcu_read_unlock();
+ /* QCA NSS ECM support - End */
+
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
@@ -2226,6 +2269,10 @@ static void bond_miimon_commit(struct bo
{
struct list_head *iter;
struct slave *slave, *primary;
+ /* QCA NSS ECM support - Start */
+ struct net_device *slave_dev = NULL;
+ struct bond_cb *lag_cb_main;
+ /* QCA NSS ECM support - End */
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
@@ -2269,6 +2316,12 @@ static void bond_miimon_commit(struct bo
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
+ /* QCA NSS ECM support - Start */
+ if ((bond->params.mode == BOND_MODE_XOR) &&
+ (!slave_dev))
+ slave_dev = slave->dev;
+ /* QCA NSS ECM support - End */
+
if (!bond->curr_active_slave || slave == primary)
goto do_failover;
@@ -2310,6 +2363,15 @@ do_failover:
}
bond_set_carrier(bond);
+
+ /* QCA NSS ECM support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (slave_dev && lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(slave_dev);
+ rcu_read_unlock();
+ /* QCA NSS ECM support - End */
}
/* bond_mii_monitor
@@ -4403,6 +4465,11 @@ static void bond_destructor(struct net_d
struct bonding *bond = netdev_priv(bond_dev);
if (bond->wq)
destroy_workqueue(bond->wq);
+
+ /* QCA NSS ECM Support - Start */
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
+ /* QCA NSS ECM Support - End */
}
void bond_setup(struct net_device *bond_dev)
@@ -4954,6 +5021,16 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
rtnl_unlock();
+
+ /* QCA NSS ECM Support - Start */
+ bond = netdev_priv(bond_dev);
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
+ /* QCA NSS ECM Support - End */
+
return 0;
}
@@ -5049,6 +5126,203 @@ static void __exit bonding_exit(void)
#endif
}
+/* QCA NSS ECM support - Start */
+static bool bond_flow_dissect_without_skb(struct bonding *bond,
+ u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst,
+ u16 protocol, __be16 *layer4hdr,
+ struct flow_keys *fk)
+{
+ u32 *src = NULL;
+ u32 *dst = NULL;
+
+ fk->ports.ports = 0;
+ src = (uint32_t *)psrc;
+ dst = (uint32_t *)pdst;
+
+ if (protocol == htons(ETH_P_IP)) {
+ /* V4 addresses and address type*/
+ fk->addrs.v4addrs.src = src[0];
+ fk->addrs.v4addrs.dst = dst[0];
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ /* V6 addresses and address type*/
+ memcpy(&fk->addrs.v6addrs.src, src, sizeof(struct in6_addr));
+ memcpy(&fk->addrs.v6addrs.dst, dst, sizeof(struct in6_addr));
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ } else {
+ return false;
+ }
+ if ((bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) &&
+ (layer4hdr))
+ fk->ports.ports = *layer4hdr;
+
+ return true;
+}
+
+/* Extract the appropriate headers based on bond's xmit policy */
+
+/* bond_xmit_hash_without_skb - Applies load balancing algorithm for a packet,
+ * to calculate hash for a given set of L2/L3 addresses. Does not
+ * calculate egress interface.
+ */
+uint32_t bond_xmit_hash_without_skb(u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct flow_keys flow;
+ u32 hash = 0;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect_without_skb(bond, src_mac, dst_mac, psrc,
+ pdst, protocol, layer4hdr, &flow))
+ return (dst_mac[5] ^ src_mac[5]);
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23)
+ hash = dst_mac[5] ^ src_mac[5];
+ else if (layer4hdr)
+ hash = (__force u32)flow.ports.ports;
+
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+
+ return hash;
+}
+
+/* bond_xor_get_tx_dev - Calculate egress interface for a given packet for a LAG
+ * that is configured in balance-xor mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash_without_skb is used to calculate hash using
+ * L2/L3 addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+static struct net_device *bond_xor_get_tx_dev(struct sk_buff *skb,
+ u8 *src_mac, u8 *dst_mac,
+ void *src, void *dst,
+ u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
+ int slave_id = 0, i = 0;
+ u32 hash;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slave_cnt == 0) {
+ pr_debug("%s: Error: No slave is attached to the interface\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_id = hash % slave_cnt;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for balance-XOR fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, src,
+ dst, protocol, bond_dev,
+ layer4hdr);
+ slave_id = hash % slave_cnt;
+ }
+
+ i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0) {
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0)
+ break;
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ return NULL;
+}
+
+/* bond_get_tx_dev - Calculate egress interface for a given packet.
+ *
+ * Supports 802.3AD and balance-xor modes
+ *
+ * @skb: pointer to skb to be egressed, if valid
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * Returns: Either valid slave device, or NULL for un-supported LAG modes
+ */
+struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ if (!bond)
+ return NULL;
+
+ switch (bond->params.mode) {
+ case BOND_MODE_XOR:
+ return bond_xor_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ case BOND_MODE_8023AD:
+ return bond_3ad_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(bond_get_tx_dev);
+
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+
+ return bond->id;
+}
+EXPORT_SYMBOL(bond_get_id);
+/* QCA NSS ECM support - End */
+
module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -551,4 +551,52 @@ static int __init vlan_offload_init(void
return 0;
}
+/* QCA NSS ECM support - Start */
+/* Update the VLAN device with statistics from network offload engines */
+void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct vlan_pcpu_stats *stats;
+
+ if (!is_vlan_dev(dev))
+ return;
+
+ stats = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, 0);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += nlstats->rx_packets;
+ stats->rx_bytes += nlstats->rx_bytes;
+ stats->tx_packets += nlstats->tx_packets;
+ stats->tx_bytes += nlstats->tx_bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL(__vlan_dev_update_accel_stats);
+
+/* Lookup the 802.1p egress_map table and return the 802.1p value */
+u16 vlan_dev_get_egress_prio(struct net_device *dev, u32 skb_prio)
+{
+ struct vlan_priority_tci_mapping *mp;
+
+ mp = vlan_dev_priv(dev)->egress_priority_map[(skb_prio & 0xf)];
+ while (mp) {
+ if (mp->priority == skb_prio) {
+ /* This should already be shifted
+ * to mask correctly with the
+ * VLAN's TCI
+ */
+ return mp->vlan_qos;
+ }
+ mp = mp->next;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vlan_dev_get_egress_prio);
+
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_next_dev);
+/* QCA NSS ECM support - End */
+
fs_initcall(vlan_offload_init);
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -37,6 +37,35 @@ static int fdb_insert(struct net_bridge
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int, bool);
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
+
+void br_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
+
+void br_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
+
+void br_fdb_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
+
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -337,6 +366,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
+ u8 mac_addr[6]; /* QCA NSS ECM support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -353,8 +383,15 @@ void br_fdb_cleanup(struct work_struct *
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
- if (!hlist_unhashed(&f->fdb_node))
+ if (!hlist_unhashed(&f->fdb_node)) {
+ ether_addr_copy(mac_addr, f->key.addr.addr);
fdb_delete(br, f, true);
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list, 0,
+ (void *)mac_addr);
+ /* QCA NSS ECM support - End */
+ }
spin_unlock_bh(&br->hash_lock);
}
}
@@ -587,6 +624,12 @@ void br_fdb_update(struct net_bridge *br
/* Take over HW learned entry */
if (unlikely(fdb->added_by_external_learn))
fdb->added_by_external_learn = 0;
+
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list,
+ 0, (void *)addr);
+ /* QCA NSS ECM support - End */
}
if (now != fdb->updated)
fdb->updated = now;
@@ -696,6 +739,25 @@ static void fdb_notify(struct net_bridge
struct sk_buff *skb;
int err = -ENOBUFS;
+ /* QCA NSS ECM support - Start */
+ if (fdb->dst) {
+ int event;
+ struct br_fdb_event fdb_event;
+
+ if (type == RTM_NEWNEIGH)
+ event = BR_FDB_EVENT_ADD;
+ else
+ event = BR_FDB_EVENT_DEL;
+
+ fdb_event.dev = fdb->dst->dev;
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
+ fdb_event.is_local = fdb->is_local;
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
+ event,
+ (void *)&fdb_event);
+ }
+ /* QCA NSS ECM support - End */
+
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
@@ -1212,3 +1274,44 @@ void br_fdb_clear_offload(const struct n
spin_unlock_bh(&p->br->hash_lock);
}
EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
+
+/* QCA NSS ECM support - Start */
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
+ addr);
+ return;
+ }
+
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, 0, true);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
+
+/* Look up the MAC address in the device's bridge fdb table */
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr, __u16 vid)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return NULL;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
+/* QCA NSS ECM support - End */
+
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -26,6 +26,12 @@
#include "br_private.h"
+/* QCA NSS ECM support - Start */
+/* Hook for external forwarding logic */
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
+/* QCA NSS ECM support - End */
+
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
@@ -695,6 +701,8 @@ int br_add_if(struct net_bridge *br, str
kobject_uevent(&p->kobj, KOBJ_ADD);
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
+
return 0;
err7:
@@ -730,6 +738,8 @@ int br_del_if(struct net_bridge *br, str
p = br_port_get_rtnl(dev);
if (!p || p->br != br)
return -EINVAL;
+
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
@@ -785,6 +795,67 @@ void br_dev_update_stats(struct net_devi
}
EXPORT_SYMBOL_GPL(br_dev_update_stats);
+/* QCA NSS ECM support - Start */
+/* br_port_dev_get()
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
+ * use that to try and determine the egress port for that skb.
+ * If not, or no egress port could be determined, use the given addr
+ * to identify the port to which it is reachable,
+ * returing a reference to the net device associated with that port.
+ *
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
+ * associated port.
+ */
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie)
+{
+ struct net_bridge_fdb_entry *fdbe;
+ struct net_bridge *br;
+ struct net_device *netdev = NULL;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If the hook exists and the skb isn't NULL, try and get the port */
+ if (skb) {
+ br_port_dev_get_hook_t *port_dev_get_hook;
+
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
+ if (port_dev_get_hook) {
+ struct net_bridge_port *pdst =
+ __br_get(port_dev_get_hook, NULL, dev, skb,
+ addr, cookie);
+ if (pdst) {
+ dev_hold(pdst->dev);
+ netdev = pdst->dev;
+ goto out;
+ }
+ }
+ }
+
+ /* Either there is no hook, or can't
+ * determine the port to use - fall back to using FDB
+ */
+
+ br = netdev_priv(dev);
+
+ /* Lookup the fdb entry and get reference to the port dev */
+ fdbe = br_fdb_find_rcu(br, addr, 0);
+ if (fdbe && fdbe->dst) {
+ netdev = fdbe->dst->dev; /* port device */
+ dev_hold(netdev);
+ }
+out:
+ rcu_read_unlock();
+ return netdev;
+}
+EXPORT_SYMBOL_GPL(br_port_dev_get);
+/* QCA NSS ECM support - End */
+
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
struct net_bridge_port *p;
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -1269,4 +1269,9 @@ void br_do_proxy_suppress_arp(struct sk_
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
+
+/* QCA NSS ECM support - Start */
+#define __br_get(__hook, __default, __args ...) \
+ (__hook ? (__hook(__args)) : (__default))
+/* QCA NSS ECM support - End */
#endif
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1235,7 +1235,21 @@ static void neigh_update_hhs(struct neig
}
}
+/* QCA NSS ECM support - start */
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
+
+void neigh_mac_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
+/* QCA NSS ECM support - End */
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
@@ -1266,6 +1280,7 @@ static int __neigh_update(struct neighbo
int notify = 0;
struct net_device *dev;
int update_isrouter = 0;
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1275,6 +1290,8 @@ static int __neigh_update(struct neighbo
old = neigh->nud_state;
err = -EPERM;
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
+
if (neigh->dead) {
NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
new = old;
@@ -1317,6 +1334,11 @@ static int __neigh_update(struct neighbo
- compare new & old
- if they are different, check override flag
*/
+ /* QCA NSS ECM update - Start */
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
+ /* QCA NSS ECM update - End */
+
if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
@@ -1439,8 +1461,11 @@ out:
if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
neigh_update_gc_list(neigh);
- if (notify)
+ if (notify) {
neigh_update_notify(neigh, nlmsg_pid);
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
+ }
trace_neigh_update_done(neigh, err);
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1116,6 +1116,9 @@ static bool fib_valid_key_len(u32 key, u
return true;
}
+/* Define route change notification chain. */
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
@@ -1283,6 +1286,9 @@ int fib_table_insert(struct net *net, st
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_NEWROUTE, fi);
+
return 0;
out_fib_notif:
@@ -1609,6 +1615,9 @@ int fib_table_delete(struct net *net, st
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_DELROUTE, fa_to_delete->fa_info);
+
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
@@ -2220,6 +2229,20 @@ void __init fib_trie_init(void)
0, SLAB_PANIC, NULL);
}
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_register_notifier);
+
+int ip_rt_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
+/* QCA NSS ECM support - End */
+
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -7202,3 +7202,35 @@ void addrconf_cleanup(void)
destroy_workqueue(addrconf_wq);
}
+
+/* QCA NSS ECM support - Start */
+/* ipv6_dev_find()
+ * Find (and hold) net device that has the given address.
+ * Or NULL on failure.
+ */
+struct net_device *ipv6_dev_find(struct net *net, struct in6_addr *addr,
+ int strict)
+{
+ struct inet6_ifaddr *ifp;
+ struct net_device *dev;
+
+ ifp = ipv6_get_ifaddr(net, addr, NULL, strict);
+ if (!ifp)
+ return NULL;
+
+ if (!ifp->idev) {
+ in6_ifa_put(ifp);
+ return NULL;
+ }
+
+ dev = ifp->idev->dev;
+ if (dev)
+ dev_hold(dev);
+
+ in6_ifa_put(ifp);
+
+ return dev;
+}
+EXPORT_SYMBOL(ipv6_dev_find);
+/* QCA NSS ECM support - End */
+
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -648,6 +648,7 @@ void ndisc_send_ns(struct net_device *de
ndisc_send_skb(skb, daddr, saddr);
}
+EXPORT_SYMBOL(ndisc_send_ns);
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
const struct in6_addr *daddr)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3784,6 +3784,9 @@ out_free:
return ERR_PTR(err);
}
+/* Define route change notification chain. */
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
+
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
@@ -3795,6 +3798,10 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, rt);
+
fib6_info_release(rt);
return err;
@@ -3816,6 +3823,9 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -6144,6 +6154,20 @@ static int ip6_route_dev_notify(struct n
return NOTIFY_OK;
}
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_register_notifier);
+
+int rt6_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_unregister_notifier);
+/* QCA NSS ECM support - End */
+
/*
* /proc
*/
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -92,6 +92,7 @@
#include <net/ip.h>
#include <net/udp.h>
#include <net/inet_common.h>
+#include <linux/if_pppox.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
@@ -125,9 +126,19 @@ struct pppol2tp_session {
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-static const struct ppp_channel_ops pppol2tp_chan_ops = {
- .start_xmit = pppol2tp_xmit,
+/* QCA NSS ECM support - Start */
+static int pppol2tp_get_channel_protocol(struct ppp_channel *);
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *);
+static void pppol2tp_hold_chan(struct ppp_channel *);
+static void pppol2tp_release_chan(struct ppp_channel *);
+static const struct pppol2tp_channel_ops pppol2tp_chan_ops = {
+ .ops.start_xmit = pppol2tp_xmit,
+ .ops.get_channel_protocol = pppol2tp_get_channel_protocol,
+ .ops.get_channel_protocol_ver = pppol2tp_get_channel_protocol_ver,
+ .ops.hold = pppol2tp_hold_chan,
+ .ops.release = pppol2tp_release_chan,
};
+/* QCA NSS ECM support - End */
static const struct proto_ops pppol2tp_ops;
@@ -240,6 +251,7 @@ static void pppol2tp_recv(struct l2tp_se
session->name, data_len);
po = pppox_sk(sk);
+ skb->skb_iif = ppp_dev_index(&po->chan); /* QCA NSS ECM support */
ppp_input(&po->chan, skb);
} else {
l2tp_dbg(session, L2TP_MSG_DATA,
@@ -380,6 +392,13 @@ static int pppol2tp_xmit(struct ppp_chan
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
+ /* QCA NSS ECM support - start */
+ /* set incoming interface as the ppp interface */
+ if ((skb->protocol == htons(ETH_P_IP)) ||
+ (skb->protocol == htons(ETH_P_IPV6)))
+ skb->skb_iif = ppp_dev_index(chan);
+ /* QCA NSS ECM support - End */
+
local_bh_disable();
l2tp_xmit_skb(session, skb, session->hdr_len);
local_bh_enable();
@@ -816,7 +835,7 @@ static int pppol2tp_connect(struct socke
po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.ops = &pppol2tp_chan_ops.ops; /* QCA NSS ECM support */
po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
error = ppp_register_net_channel(sock_net(sk), &po->chan);
@@ -1749,6 +1768,109 @@ static void __exit pppol2tp_exit(void)
unregister_pernet_device(&pppol2tp_net_ops);
}
+/* QCA NSS ECM support - Start */
+/* pppol2tp_hold_chan() */
+static void pppol2tp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pppol2tp_release_chan() */
+static void pppol2tp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pppol2tp_get_channel_protocol()
+ * Return the protocol type of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OL2TP;
+}
+
+/* pppol2tp_get_channel_protocol_ver()
+ * Return the protocol version of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int version = 0;
+
+ if (chan && chan->private)
+ sk = (struct sock *)chan->private;
+ else
+ return -1;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return -1;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return -1;
+ }
+
+ version = tunnel->version;
+
+ sock_put(sk);
+
+ return version;
+}
+
+/* pppol2tp_get_addressing() */
+static int pppol2tp_get_addressing(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct inet_sock *isk = NULL;
+ int err = -ENXIO;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return err;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return err;
+ }
+ isk = inet_sk(tunnel->sock);
+
+ addr->local_tunnel_id = tunnel->tunnel_id;
+ addr->remote_tunnel_id = tunnel->peer_tunnel_id;
+ addr->local_session_id = session->session_id;
+ addr->remote_session_id = session->peer_session_id;
+
+ addr->local_addr.sin_port = isk->inet_sport;
+ addr->remote_addr.sin_port = isk->inet_dport;
+ addr->local_addr.sin_addr.s_addr = isk->inet_saddr;
+ addr->remote_addr.sin_addr.s_addr = isk->inet_daddr;
+
+ sock_put(sk);
+ return 0;
+}
+
+/* pppol2tp_channel_addressing_get() */
+int pppol2tp_channel_addressing_get(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ return pppol2tp_get_addressing(chan, addr);
+}
+EXPORT_SYMBOL(pppol2tp_channel_addressing_get);
+/* QCA NSS ECM support - End */
+
module_init(pppol2tp_init);
module_exit(pppol2tp_exit);
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -135,6 +135,13 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_DSCPREMARK_EXT
+ bool 'Connection tracking extension for dscp remark target'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking extension
+ for dscp remark.
+
config NF_CONNTRACK_CHAIN_EVENTS
bool "Register multiple callbacks to ct events"
depends on NF_CONNTRACK_EVENTS
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -14,6 +14,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_LABEL
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o
obj-$(CONFIG_NETFILTER) = netfilter.o