kernel: bump 5.15 to 5.15.42 (#9439)

Signed-off-by: aakkll <94471752+aakkll@users.noreply.github.com>
This commit is contained in:
aakkll 2022-05-25 22:03:53 +08:00 committed by GitHub
parent d5dc513703
commit b5cdd73cb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 228 additions and 190 deletions

View File

@ -1,2 +1,2 @@
LINUX_VERSION-5.15 = .41 LINUX_VERSION-5.15 = .42
LINUX_KERNEL_HASH-5.15.41 = 3c7cb1fc3b029b1b765a33af9608b6f18f734246050640def019ee4c4ad6591e LINUX_KERNEL_HASH-5.15.42 = 5ecd47a9f663368f005f74d24c8de4ffd93007b675ddf342e2c6db956bf82560

View File

@ -10,7 +10,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
--- a/net/netfilter/nf_flow_table_core.c --- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c
@@ -646,13 +646,41 @@ void nf_flow_table_free(struct nf_flowta @@ -608,13 +608,41 @@ void nf_flow_table_free(struct nf_flowta
} }
EXPORT_SYMBOL_GPL(nf_flow_table_free); EXPORT_SYMBOL_GPL(nf_flow_table_free);
@ -55,7 +55,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
--- a/net/netfilter/nft_flow_offload.c --- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c
@@ -438,47 +438,14 @@ static struct nft_expr_type nft_flow_off @@ -444,47 +444,14 @@ static struct nft_expr_type nft_flow_off
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };

View File

@ -18,9 +18,9 @@ Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1878,6 +1878,27 @@ static int advk_pcie_remove(struct platf @@ -1874,6 +1874,27 @@ static int advk_pcie_remove(struct platf
/* Remove IRQ handler */ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+ /* Disable MSI */ + /* Disable MSI */
+ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);

View File

@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1916,6 +1916,9 @@ static int advk_pcie_remove(struct platf @@ -1912,6 +1912,9 @@ static int advk_pcie_remove(struct platf
val &= ~LINK_TRAINING_EN; val &= ~LINK_TRAINING_EN;
advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);

View File

@ -20,7 +20,7 @@ Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1919,6 +1919,10 @@ static int advk_pcie_remove(struct platf @@ -1915,6 +1915,10 @@ static int advk_pcie_remove(struct platf
/* Free config space for emulated root bridge */ /* Free config space for emulated root bridge */
pci_bridge_emul_cleanup(&pcie->bridge); pci_bridge_emul_cleanup(&pcie->bridge);

View File

@ -20,7 +20,7 @@ Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1923,6 +1923,11 @@ static int advk_pcie_remove(struct platf @@ -1919,6 +1919,11 @@ static int advk_pcie_remove(struct platf
if (pcie->reset_gpio) if (pcie->reset_gpio)
gpiod_set_value_cansleep(pcie->reset_gpio, 1); gpiod_set_value_cansleep(pcie->reset_gpio, 1);

View File

@ -18,7 +18,7 @@ Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1640,6 +1640,9 @@ static int advk_pcie_enable_phy(struct a @@ -1634,6 +1634,9 @@ static int advk_pcie_enable_phy(struct a
return ret; return ret;
} }

View File

@ -154,7 +154,7 @@
static int static int
br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -164,6 +165,7 @@ int br_handle_frame_finish(struct net *n @@ -171,6 +172,7 @@ int br_handle_frame_finish(struct net *n
dst->used = now; dst->used = now;
br_forward(dst->dst, skb, local_rcv, false); br_forward(dst->dst, skb, local_rcv, false);
} else { } else {
@ -162,7 +162,7 @@
if (!mcast_hit) if (!mcast_hit)
br_flood(br, skb, pkt_type, local_rcv, false); br_flood(br, skb, pkt_type, local_rcv, false);
else else
@@ -297,6 +299,9 @@ static rx_handler_result_t br_handle_fra @@ -304,6 +306,9 @@ static rx_handler_result_t br_handle_fra
memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
p = br_port_get_rcu(skb->dev); p = br_port_get_rcu(skb->dev);

View File

@ -12,7 +12,7 @@ Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
--- a/net/bridge/br_input.c --- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c +++ b/net/bridge/br_input.c
@@ -109,10 +109,14 @@ int br_handle_frame_finish(struct net *n @@ -116,10 +116,14 @@ int br_handle_frame_finish(struct net *n
} }
} }

View File

@ -98,7 +98,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
--- /dev/null --- /dev/null
+++ b/net/netfilter/xt_FLOWOFFLOAD.c +++ b/net/netfilter/xt_FLOWOFFLOAD.c
@@ -0,0 +1,657 @@ @@ -0,0 +1,694 @@
+/* +/*
+ * Copyright (C) 2018-2021 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018-2021 Felix Fietkau <nbd@nbd.name>
+ * + *
@ -110,6 +110,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+#include <linux/init.h> +#include <linux/init.h>
+#include <linux/netfilter.h> +#include <linux/netfilter.h>
+#include <linux/netfilter/xt_FLOWOFFLOAD.h> +#include <linux/netfilter/xt_FLOWOFFLOAD.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h> +#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h> +#include <net/netfilter/nf_conntrack_extend.h>
@ -130,6 +131,21 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ struct delayed_work work; + struct delayed_work work;
+}; +};
+ +
+struct nf_forward_info {
+ const struct net_device *indev;
+ const struct net_device *outdev;
+ const struct net_device *hw_outdev;
+ struct id {
+ __u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps;
+ u8 ingress_vlans;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ enum flow_offload_xmit_type xmit_type;
+};
+
+static DEFINE_SPINLOCK(hooks_lock); +static DEFINE_SPINLOCK(hooks_lock);
+ +
+struct xt_flowoffload_table flowtable[2]; +struct xt_flowoffload_table flowtable[2];
@ -138,12 +154,23 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+xt_flowoffload_net_hook(void *priv, struct sk_buff *skb, +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state) + const struct nf_hook_state *state)
+{ +{
+ struct nf_flowtable *ft = priv; + struct vlan_ethhdr *veth;
+ + __be16 proto;
+ if (!atomic_read(&ft->rhashtable.nelems))
+ return NF_ACCEPT;
+ +
+ switch (skb->protocol) { + switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ proto = veth->h_vlan_encapsulated_proto;
+ break;
+ case htons(ETH_P_PPP_SES):
+ proto = nf_flow_pppoe_proto(skb);
+ break;
+ default:
+ proto = skb->protocol;
+ break;
+ }
+
+ switch (proto) {
+ case htons(ETH_P_IP): + case htons(ETH_P_IP):
+ return nf_flow_offload_ip_hook(priv, skb, state); + return nf_flow_offload_ip_hook(priv, skb, state);
+ case htons(ETH_P_IPV6): + case htons(ETH_P_IPV6):
@ -262,7 +289,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+} +}
+ +
+static void +static void
+xt_flowoffload_check_hook(struct flow_offload *flow, void *data) +xt_flowoffload_check_hook(struct nf_flowtable *flowtable, struct flow_offload *flow, void *data)
+{ +{
+ struct xt_flowoffload_table *table = data; + struct xt_flowoffload_table *table = data;
+ struct flow_offload_tuple *tuple0 = &flow->tuplehash[0].tuple; + struct flow_offload_tuple *tuple0 = &flow->tuplehash[0].tuple;
@ -296,7 +323,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ spin_unlock_bh(&hooks_lock); + spin_unlock_bh(&hooks_lock);
+ +
+ err = nf_flow_table_iterate(&table->ft, xt_flowoffload_check_hook, + err = nf_flow_table_iterate(&table->ft, xt_flowoffload_check_hook,
+ table); + NULL);
+ if (err && err != -EAGAIN) + if (err && err != -EAGAIN)
+ goto out; + goto out;
+ +
@ -323,183 +350,209 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ return false; + return false;
+} +}
+ +
+static bool flow_is_valid_ether_device(const struct net_device *dev) +static enum flow_offload_xmit_type nf_xmit_type(struct dst_entry *dst)
+{
+ if (dst_xfrm(dst))
+ return FLOW_OFFLOAD_XMIT_XFRM;
+
+ return FLOW_OFFLOAD_XMIT_NEIGH;
+}
+
+static void nf_default_forward_path(struct nf_flow_route *route,
+ struct dst_entry *dst_cache,
+ enum ip_conntrack_dir dir,
+ struct net_device **dev)
+{
+ dev[!dir] = dst_cache->dev;
+ route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
+ route->tuple[dir].dst = dst_cache;
+ route->tuple[dir].xmit_type = nf_xmit_type(dst_cache);
+}
+
+static bool nf_is_valid_ether_device(const struct net_device *dev)
+{ +{
+ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER || + if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr)) + dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+ return true; + return false;
+ +
+ return true; + return true;
+} +}
+ +
+static void +static void nf_dev_path_info(const struct net_device_path_stack *stack,
+xt_flowoffload_route_check_path(struct nf_flow_route *route, + struct nf_forward_info *info,
+ const struct nf_conn *ct, + unsigned char *ha)
+ enum ip_conntrack_dir dir,
+ struct net_device **out_dev)
+{ +{
+ const struct dst_entry *dst = route->tuple[dir].dst; + const struct net_device_path *path;
+ const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
+ struct net_device_path_stack stack;
+ enum net_device_path_type prev_type;
+ struct net_device *dev = dst->dev;
+ struct neighbour *n;
+ bool last = false;
+ u8 nud_state;
+ int i; + int i;
+ +
+ route->tuple[!dir].in.ifindex = dev->ifindex; + memcpy(info->h_dest, ha, ETH_ALEN);
+ route->tuple[dir].out.ifindex = dev->ifindex;
+
+ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
+ return;
+
+
+
+
+
+
+ n = dst_neigh_lookup(dst, daddr);
+ if (!n)
+ return;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ memcpy(route->tuple[dir].out.h_dest, n->ha, ETH_ALEN);
+ read_unlock_bh(&n->lock);
+ neigh_release(n);
+
+ if (!(nud_state & NUD_VALID))
+ return;
+
+ if (dev_fill_forward_path(dev, route->tuple[dir].out.h_dest, &stack) ||
+ !stack.num_paths)
+ return;
+
+ prev_type = DEV_PATH_ETHERNET;
+ for (i = 0; i <= stack.num_paths; i++) {
+ const struct net_device_path *path = &stack.path[i];
+ int n_encaps = route->tuple[!dir].in.num_encaps;
+
+ dev = (struct net_device *)path->dev;
+ if (flow_is_valid_ether_device(dev)) {
+ if (route->tuple[dir].xmit_type != FLOW_OFFLOAD_XMIT_DIRECT) {
+ memcpy(route->tuple[dir].out.h_source,
+ dev->dev_addr, ETH_ALEN);
+ route->tuple[dir].out.ifindex = dev->ifindex;
+ }
+ route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ }
+ +
+ for (i = 0; i < stack->num_paths; i++) {
+ path = &stack->path[i];
+ switch (path->type) { + switch (path->type) {
+ case DEV_PATH_PPPOE: + case DEV_PATH_ETHERNET:
+ case DEV_PATH_DSA:
+ case DEV_PATH_VLAN: + case DEV_PATH_VLAN:
+ if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX || + case DEV_PATH_PPPOE:
+ i == stack.num_paths) { + info->indev = path->dev;
+ last = true; + if (is_zero_ether_addr(info->h_source))
+ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+
+ if (path->type == DEV_PATH_ETHERNET)
+ break;
+ if (path->type == DEV_PATH_DSA) {
+ i = stack->num_paths;
+ break; + break;
+ } + }
+ +
+ route->tuple[!dir].in.num_encaps++; + /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
+ route->tuple[!dir].in.encap[n_encaps].id = path->encap.id; + if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
+ route->tuple[!dir].in.encap[n_encaps].proto = path->encap.proto; + info->indev = NULL;
+ break;
+ }
+ if (!info->outdev)
+ info->outdev = path->dev;
+ info->encap[info->num_encaps].id = path->encap.id;
+ info->encap[info->num_encaps].proto = path->encap.proto;
+ info->num_encaps++;
+ if (path->type == DEV_PATH_PPPOE) + if (path->type == DEV_PATH_PPPOE)
+ memcpy(route->tuple[dir].out.h_dest, + memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
+ path->encap.h_dest, ETH_ALEN);
+ break; + break;
+ case DEV_PATH_BRIDGE: + case DEV_PATH_BRIDGE:
+ switch (path->bridge.vlan_mode) { + if (is_zero_ether_addr(info->h_source))
+ case DEV_PATH_BR_VLAN_TAG: + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+ if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX ||
+ i == stack.num_paths) {
+ last = true;
+ break;
+ }
+ +
+ route->tuple[!dir].in.num_encaps++; + switch (path->bridge.vlan_mode) {
+ route->tuple[!dir].in.encap[n_encaps].id = + case DEV_PATH_BR_VLAN_UNTAG_HW:
+ path->bridge.vlan_id; + info->ingress_vlans |= BIT(info->num_encaps - 1);
+ route->tuple[!dir].in.encap[n_encaps].proto = + break;
+ path->bridge.vlan_proto; + case DEV_PATH_BR_VLAN_TAG:
+ info->encap[info->num_encaps].id = path->bridge.vlan_id;
+ info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
+ info->num_encaps++;
+ break; + break;
+ case DEV_PATH_BR_VLAN_UNTAG: + case DEV_PATH_BR_VLAN_UNTAG:
+ route->tuple[!dir].in.num_encaps--; + info->num_encaps--;
+ break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ route->tuple[!dir].in.ingress_vlans |= BIT(n_encaps - 1);
+ break; + break;
+ case DEV_PATH_BR_VLAN_KEEP: + case DEV_PATH_BR_VLAN_KEEP:
+ break; + break;
+ } + }
+ break; + break;
+ default: + default:
+ last = true; + info->indev = NULL;
+ break; + break;
+ } + }
+
+ if (last)
+ break;
+ } + }
+ if (!info->outdev)
+ info->outdev = info->indev;
+ +
+ *out_dev = dev; + info->hw_outdev = info->indev;
+ route->tuple[dir].out.hw_ifindex = dev->ifindex; +
+ route->tuple[!dir].in.ifindex = dev->ifindex; + if (nf_is_valid_ether_device(info->indev))
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+} +}
+ +
+static int +static int nf_dev_fill_forward_path(const struct nf_flow_route *route,
+xt_flowoffload_route_dir(struct nf_flow_route *route, const struct nf_conn *ct, + const struct dst_entry *dst_cache,
+ enum ip_conntrack_dir dir, + const struct nf_conn *ct,
+ const struct xt_action_param *par, int ifindex) + enum ip_conntrack_dir dir, u8 *ha,
+ struct net_device_path_stack *stack)
+{ +{
+ struct dst_entry *dst = NULL; + const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
+ struct flowi fl; + struct net_device *dev = dst_cache->dev;
+ struct neighbour *n;
+ u8 nud_state;
+ +
+ memset(&fl, 0, sizeof(fl)); + if (!nf_is_valid_ether_device(dev))
+ switch (xt_family(par)) { + goto out;
+ case NFPROTO_IPV4: +
+ fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.src.u3.ip; + n = dst_neigh_lookup(dst_cache, daddr);
+ fl.u.ip4.flowi4_oif = ifindex; + if (!n)
+ break; + return -1;
+ case NFPROTO_IPV6: +
+ fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.dst.u3.in6; + read_lock_bh(&n->lock);
+ fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.src.u3.in6; + nud_state = n->nud_state;
+ fl.u.ip6.flowi6_oif = ifindex; + ether_addr_copy(ha, n->ha);
+ break; + read_unlock_bh(&n->lock);
+ neigh_release(n);
+
+ if (!(nud_state & NUD_VALID))
+ return -1;
+
+out:
+ return dev_fill_forward_path(dev, ha, stack);
+}
+
+static void nf_dev_forward_path(struct nf_flow_route *route,
+ const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ struct net_device **devs)
+{
+ const struct dst_entry *dst = route->tuple[dir].dst;
+ struct net_device_path_stack stack;
+ struct nf_forward_info info = {};
+ unsigned char ha[ETH_ALEN];
+ int i;
+
+ if (nf_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
+ nf_dev_path_info(&stack, &info, ha);
+
+ devs[!dir] = (struct net_device *)info.indev;
+ if (!info.indev)
+ return;
+
+ route->tuple[!dir].in.ifindex = info.indev->ifindex;
+ for (i = 0; i < info.num_encaps; i++) {
+ route->tuple[!dir].in.encap[i].id = info.encap[i].id;
+ route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
+ } + }
+ route->tuple[!dir].in.num_encaps = info.num_encaps;
+ route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
+ +
+ nf_route(xt_net(par), &dst, &fl, false, xt_family(par)); + if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
+ if (!dst) + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
+ return -ENOENT; + memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
+ + route->tuple[dir].out.ifindex = info.outdev->ifindex;
+ route->tuple[dir].dst = dst; + route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
+ if (dst_xfrm(dst)) + route->tuple[dir].xmit_type = info.xmit_type;
+ route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_XFRM; + }
+ else
+ route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_NEIGH;
+
+ return 0;
+} +}
+ +
+static int +static int
+xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct, +xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
+ const struct xt_action_param *par, + const struct xt_action_param *par,
+ struct nf_flow_route *route, enum ip_conntrack_dir dir, + struct nf_flow_route *route, enum ip_conntrack_dir dir,
+ struct net_device **dev) + struct net_device **devs)
+{ +{
+ int ret; + struct dst_entry *this_dst = skb_dst(skb);
+ struct dst_entry *other_dst = NULL;
+ struct flowi fl;
+ +
+ ret = xt_flowoffload_route_dir(route, ct, dir, par, + memset(&fl, 0, sizeof(fl));
+ dev[dir]->ifindex); + switch (xt_family(par)) {
+ if (ret) + case NFPROTO_IPV4:
+ return ret; + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = xt_in(par)->ifindex;
+ break;
+ case NFPROTO_IPV6:
+ fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = xt_in(par)->ifindex;
+ break;
+ }
+ +
+ ret = xt_flowoffload_route_dir(route, ct, !dir, par, + nf_route(xt_net(par), &other_dst, &fl, false, xt_family(par));
+ dev[!dir]->ifindex); + if (!other_dst)
+ if (ret) + return -ENOENT;
+ return ret;
+ +
+ xt_flowoffload_route_check_path(route, ct, dir, &dev[!dir]); + nf_default_forward_path(route, this_dst, dir, devs);
+ xt_flowoffload_route_check_path(route, ct, !dir, &dev[dir]); + nf_default_forward_path(route, other_dst, !dir, devs);
+
+ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
+ route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
+ nf_dev_forward_path(route, ct, dir, devs);
+ nf_dev_forward_path(route, ct, !dir, devs);
+ }
+ +
+ return 0; + return 0;
+} +}
@ -542,7 +595,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ } + }
+ +
+ if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || + if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
+ ct->status & IPS_SEQ_ADJUST) + ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
+ return XT_CONTINUE; + return XT_CONTINUE;
+ +
+ if (!nf_ct_is_confirmed(ct)) + if (!nf_ct_is_confirmed(ct))
@ -586,7 +639,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ xt_flowoffload_check_device(table, devs[0]); + xt_flowoffload_check_device(table, devs[0]);
+ xt_flowoffload_check_device(table, devs[1]); + xt_flowoffload_check_device(table, devs[1]);
+ +
+ dst_release(route.tuple[dir].dst);
+ dst_release(route.tuple[!dir].dst); + dst_release(route.tuple[!dir].dst);
+ +
+ return XT_CONTINUE; + return XT_CONTINUE;
@ -594,7 +646,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+err_flow_add: +err_flow_add:
+ flow_offload_free(flow); + flow_offload_free(flow);
+err_flow_alloc: +err_flow_alloc:
+ dst_release(route.tuple[dir].dst);
+ dst_release(route.tuple[!dir].dst); + dst_release(route.tuple[!dir].dst);
+err_flow_route: +err_flow_route:
+ clear_bit(IPS_OFFLOAD_BIT, &ct->status); + clear_bit(IPS_OFFLOAD_BIT, &ct->status);
@ -661,20 +712,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ .notifier_call = flow_offload_netdev_event, + .notifier_call = flow_offload_netdev_event,
+}; +};
+ +
+static unsigned int
+nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ return nf_flow_offload_ip_hook(priv, skb, state);
+ case htons(ETH_P_IPV6):
+ return nf_flow_offload_ipv6_hook(priv, skb, state);
+ }
+
+ return NF_ACCEPT;
+}
+
+static int nf_flow_rule_route_inet(struct net *net, +static int nf_flow_rule_route_inet(struct net *net,
+ const struct flow_offload *flow, + const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir, + enum flow_offload_tuple_dir dir,
@ -704,7 +741,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ .setup = nf_flow_table_offload_setup, + .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_inet, + .action = nf_flow_rule_route_inet,
+ .free = nf_flow_table_free, + .free = nf_flow_table_free,
+ .hook = nf_flow_offload_inet_hook, + .hook = xt_flowoffload_net_hook,
+ .owner = THIS_MODULE, + .owner = THIS_MODULE,
+}; +};
+ +
@ -766,23 +803,23 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_core.h>
@@ -397,8 +396,7 @@ flow_offload_lookup(struct nf_flowtable @@ -380,8 +379,7 @@ flow_offload_lookup(struct nf_flowtable
} }
EXPORT_SYMBOL_GPL(flow_offload_lookup); EXPORT_SYMBOL_GPL(flow_offload_lookup);
-static int -static int
-nf_flow_table_iterate(struct nf_flowtable *flow_table, -nf_flow_table_iterate(struct nf_flowtable *flow_table,
+int nf_flow_table_iterate(struct nf_flowtable *flow_table, +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct flow_offload *flow, void *data), void (*iter)(struct nf_flowtable *flowtable,
struct flow_offload *flow, void *data),
void *data) void *data)
{ @@ -435,6 +433,7 @@ static void nf_flow_offload_gc_step(stru
@@ -430,6 +428,7 @@ nf_flow_table_iterate(struct nf_flowtabl nf_flow_offload_stats(flow_table, flow);
}
return err;
} }
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate); +EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple) static void nf_flow_offload_work_gc(struct work_struct *work)
{ {
--- /dev/null --- /dev/null
+++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h +++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
@ -806,14 +843,15 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+#endif /* _XT_FLOWOFFLOAD_H */ +#endif /* _XT_FLOWOFFLOAD_H */
--- a/include/net/netfilter/nf_flow_table.h --- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h
@@ -275,6 +275,10 @@ void nf_flow_table_free(struct nf_flowta @@ -275,6 +275,11 @@ void nf_flow_table_free(struct nf_flowta
void flow_offload_teardown(struct flow_offload *flow); void flow_offload_teardown(struct flow_offload *flow);
+int nf_flow_table_iterate(struct nf_flowtable *flow_table, +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data), + void (*iter)(struct nf_flowtable *flowtable,
+ void *data); + struct flow_offload *flow, void *data),
+ void *data);
+ +
void nf_flow_snat_port(const struct flow_offload *flow, void nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff, struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir); u8 protocol, enum flow_offload_tuple_dir dir);

View File

@ -63,7 +63,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+dma-shared-buffer-objs := $(dma-buf-objs-y) +dma-shared-buffer-objs := $(dma-buf-objs-y)
--- a/drivers/dma-buf/dma-buf.c --- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c
@@ -1498,4 +1498,5 @@ static void __exit dma_buf_deinit(void) @@ -1506,4 +1506,5 @@ static void __exit dma_buf_deinit(void)
kern_unmount(dma_buf_mnt); kern_unmount(dma_buf_mnt);
dma_buf_uninit_sysfs_statistics(); dma_buf_uninit_sysfs_statistics();
} }

View File

@ -15,7 +15,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/net/bridge/br_input.c --- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c +++ b/net/bridge/br_input.c
@@ -197,6 +197,9 @@ static void __br_handle_local_finish(str @@ -204,6 +204,9 @@ static void __br_handle_local_finish(str
/* note: already called with rcu_read_lock */ /* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{ {
@ -25,7 +25,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
__br_handle_local_finish(skb); __br_handle_local_finish(skb);
/* return 1 to signal the okfn() was called so it's ok to use the skb */ /* return 1 to signal the okfn() was called so it's ok to use the skb */
@@ -362,6 +365,17 @@ static rx_handler_result_t br_handle_fra @@ -369,6 +372,17 @@ static rx_handler_result_t br_handle_fra
forward: forward:
switch (p->state) { switch (p->state) {

View File

@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
--- a/net/bridge/br_input.c --- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c +++ b/net/bridge/br_input.c
@@ -319,6 +319,8 @@ static rx_handler_result_t br_handle_fra @@ -326,6 +326,8 @@ static rx_handler_result_t br_handle_fra
fwd_mask |= p->group_fwd_mask; fwd_mask |= p->group_fwd_mask;
switch (dest[5]) { switch (dest[5]) {
case 0x00: /* Bridge Group Address */ case 0x00: /* Bridge Group Address */

View File

@ -25,7 +25,7 @@ Signed-off-by: Marek Behún <kabel@kernel.org>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1554,6 +1554,22 @@ static void advk_pcie_handle_int(struct @@ -1553,6 +1553,22 @@ static void advk_pcie_handle_int(struct
dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
} }

View File

@ -33,15 +33,15 @@ Signed-off-by: Marek Behún <kabel@kernel.org>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -275,7 +275,6 @@ struct advk_pcie { @@ -274,7 +274,6 @@ struct advk_pcie {
int irq; u8 wins_count;
struct irq_domain *rp_irq_domain; struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain; struct irq_domain *irq_domain;
- struct irq_chip irq_chip; - struct irq_chip irq_chip;
raw_spinlock_t irq_lock; raw_spinlock_t irq_lock;
struct irq_domain *msi_domain; struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain; struct irq_domain *msi_inner_domain;
@@ -1331,14 +1330,19 @@ static void advk_pcie_irq_unmask(struct @@ -1330,14 +1329,19 @@ static void advk_pcie_irq_unmask(struct
raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
} }
@ -63,7 +63,7 @@ Signed-off-by: Marek Behún <kabel@kernel.org>
irq_set_chip_data(virq, pcie); irq_set_chip_data(virq, pcie);
return 0; return 0;
@@ -1397,7 +1401,6 @@ static int advk_pcie_init_irq_domain(str @@ -1396,7 +1400,6 @@ static int advk_pcie_init_irq_domain(str
struct device *dev = &pcie->pdev->dev; struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node; struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node; struct device_node *pcie_intc_node;
@ -71,7 +71,7 @@ Signed-off-by: Marek Behún <kabel@kernel.org>
int ret = 0; int ret = 0;
raw_spin_lock_init(&pcie->irq_lock); raw_spin_lock_init(&pcie->irq_lock);
@@ -1408,28 +1411,14 @@ static int advk_pcie_init_irq_domain(str @@ -1407,28 +1410,14 @@ static int advk_pcie_init_irq_domain(str
return -ENODEV; return -ENODEV;
} }

View File

@ -23,7 +23,7 @@ Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
--- a/drivers/pci/controller/pci-aardvark.c --- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c
@@ -1638,9 +1638,7 @@ static int advk_pcie_enable_phy(struct a @@ -1632,9 +1632,7 @@ static int advk_pcie_enable_phy(struct a
} }
ret = phy_power_on(pcie->phy); ret = phy_power_on(pcie->phy);