diff --git a/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch b/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch new file mode 100644 index 000000000..e4c0833ae --- /dev/null +++ b/target/linux/generic/backport-5.15/081-net-next-regmap-allow-to-define-reg_update_bits-for-no-bus.patch @@ -0,0 +1,52 @@ +From 02d6fdecb9c38de19065f6bed8d5214556fd061d Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Thu, 4 Nov 2021 16:00:40 +0100 +Subject: regmap: allow to define reg_update_bits for no bus configuration + +Some device requires a special handling for reg_update_bits and can't use +the normal regmap read write logic. An example is when locking is +handled by the device and rmw operations requires to do atomic operations. +Allow to declare a dedicated function in regmap_config for +reg_update_bits in no bus configuration. + +Signed-off-by: Ansuel Smith +Link: https://lore.kernel.org/r/20211104150040.1260-1-ansuelsmth@gmail.com +Signed-off-by: Mark Brown +--- + drivers/base/regmap/regmap.c | 1 + + include/linux/regmap.h | 7 +++++++ + 2 files changed, 8 insertions(+) + +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -877,6 +877,7 @@ struct regmap *__regmap_init(struct devi + if (!bus) { + map->reg_read = config->reg_read; + map->reg_write = config->reg_write; ++ map->reg_update_bits = config->reg_update_bits; + + map->defer_caching = false; + goto skip_format_initialization; +--- a/include/linux/regmap.h ++++ b/include/linux/regmap.h +@@ -290,6 +290,11 @@ typedef void (*regmap_unlock)(void *); + * read operation on a bus such as SPI, I2C, etc. Most of the + * devices do not need this. + * @reg_write: Same as above for writing. ++ * @reg_update_bits: Optional callback that if filled will be used to perform ++ * all the update_bits(rmw) operation. Should only be provided ++ * if the function require special handling with lock and reg ++ * handling and the operation cannot be represented as a simple ++ * update_bits operation on a bus such as SPI, I2C, etc. + * @fast_io: Register IO is fast. Use a spinlock instead of a mutex + * to perform locking. This field is ignored if custom lock/unlock + * functions are used (see fields lock/unlock of struct regmap_config). +@@ -372,6 +377,8 @@ struct regmap_config { + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); ++ int (*reg_update_bits)(void *context, unsigned int reg, ++ unsigned int mask, unsigned int val); + + bool fast_io; + diff --git a/target/linux/generic/backport-5.15/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch b/target/linux/generic/backport-5.15/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch new file mode 100644 index 000000000..18a8752a1 --- /dev/null +++ b/target/linux/generic/backport-5.15/300-v5.18-pinctrl-qcom-Return--EINVAL-for-setting-affinity-if-no-IRQ-parent.patch @@ -0,0 +1,48 @@ +From: Manivannan Sadhasivam +To: linus.walleij@linaro.org +Cc: bjorn.andersson@linaro.org, dianders@chromium.org, + linux-arm-msm@vger.kernel.org, linux-gpio@vger.kernel.org, + linux-kernel@vger.kernel.org, + Manivannan Sadhasivam +Subject: [PATCH] pinctrl: qcom: Return -EINVAL for setting affinity if no IRQ + parent +Date: Thu, 13 Jan 2022 21:56:17 +0530 +Message-Id: <20220113162617.131697-1-manivannan.sadhasivam@linaro.org> + +The MSM GPIO IRQ controller relies on the parent IRQ controller to set the +CPU affinity for the IRQ. And this is only valid if there is any wakeup +parent available and defined in DT. + +For the case of no parent IRQ controller defined in DT, +msm_gpio_irq_set_affinity() and msm_gpio_irq_set_vcpu_affinity() should +return -EINVAL instead of 0 as the affinity can't be set. + +Otherwise, below warning will be printed by genirq: + +genirq: irq_chip msmgpio did not update eff. affinity mask of irq 70 + +Signed-off-by: Manivannan Sadhasivam +--- + drivers/pinctrl/qcom/pinctrl-msm.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -1157,7 +1157,7 @@ static int msm_gpio_irq_set_affinity(str + if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) + return irq_chip_set_affinity_parent(d, dest, force); + +- return 0; ++ return -EINVAL; + } + + static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +@@ -1168,7 +1168,7 @@ static int msm_gpio_irq_set_vcpu_affinit + if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) + return irq_chip_set_vcpu_affinity_parent(d, vcpu_info); + +- return 0; ++ return -EINVAL; + } + + static void msm_gpio_irq_handler(struct irq_desc *desc) diff --git a/target/linux/generic/backport-5.15/350-v5.18-MIPS-pgalloc-fix-memory-leak-caused-by-pgd_free.patch b/target/linux/generic/backport-5.15/350-v5.18-MIPS-pgalloc-fix-memory-leak-caused-by-pgd_free.patch new file mode 100644 index 000000000..7ab9d0764 --- /dev/null +++ b/target/linux/generic/backport-5.15/350-v5.18-MIPS-pgalloc-fix-memory-leak-caused-by-pgd_free.patch @@ -0,0 +1,48 @@ +From 7f297c70bebd20f3e02c9b6046e4e5e71d38ffe9 Mon Sep 17 00:00:00 2001 +From: Yaliang Wang +Date: Thu, 10 Mar 2022 19:31:16 +0800 +Subject: [PATCH] MIPS: pgalloc: fix memory leak caused by pgd_free() + +pgd page is freed by generic implementation pgd_free() since commit +f9cb654cb550 ("asm-generic: pgalloc: provide generic pgd_free()"), +however, there are scenarios that the system uses more than one page as +the pgd table, in such cases the generic implementation pgd_free() won't +be applicable anymore. For example, when PAGE_SIZE_4KB is enabled and +MIPS_VA_BITS_48 is not enabled in a 64bit system, the macro "PGD_ORDER" +will be set as "1", which will cause allocating two pages as the pgd +table. Well, at the same time, the generic implementation pgd_free() +just free one pgd page, which will result in the memory leak. + +The memory leak can be easily detected by executing shell command: +"while true; do ls > /dev/null; grep MemFree /proc/meminfo; done" + +Fixes: f9cb654cb550 ("asm-generic: pgalloc: provide generic pgd_free()") +Signed-off-by: Yaliang Wang +Signed-off-by: Thomas Bogendoerfer +(cherry picked from commit 2bc5bab9a763d520937e4f3fe8df51c6a1eceb97) +--- + arch/mips/include/asm/pgalloc.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/arch/mips/include/asm/pgalloc.h ++++ b/arch/mips/include/asm/pgalloc.h +@@ -15,6 +15,7 @@ + + #define __HAVE_ARCH_PMD_ALLOC_ONE + #define __HAVE_ARCH_PUD_ALLOC_ONE ++#define __HAVE_ARCH_PGD_FREE + #include + + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, +@@ -48,6 +49,11 @@ static inline void pud_populate(struct m + extern void pgd_init(unsigned long page); + extern pgd_t *pgd_alloc(struct mm_struct *mm); + ++static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ++{ ++ free_pages((unsigned long)pgd, PGD_ORDER); ++} ++ + #define __pte_free_tlb(tlb,pte,address) \ + do { \ + pgtable_pte_page_dtor(pte); \ diff --git a/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch b/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch new file mode 100644 index 000000000..5c4984176 --- /dev/null +++ b/target/linux/generic/backport-5.15/410-mtd-next-mtd-parsers-trx-allow-to-use-on-MediaTek-MIPS-SoCs.patch @@ -0,0 +1,33 @@ +From 2365f91c861cbfeef7141c69842848c7b2d3c2db Mon Sep 17 00:00:00 2001 +From: INAGAKI Hiroshi +Date: Sun, 13 Feb 2022 15:40:44 +0900 +Subject: [PATCH] mtd: parsers: trx: allow to use on MediaTek MIPS SoCs + +Buffalo sells some router devices which have trx-formatted firmware, +based on MediaTek MIPS SoCs. To use parser_trx on those devices, add +"RALINK" to dependency and allow to compile for MediaTek MIPS SoCs. + +examples: + +- WCR-1166DS (MT7628) +- WSR-1166DHP (MT7621) +- WSR-2533DHP (MT7621) + +Signed-off-by: INAGAKI Hiroshi +Signed-off-by: Miquel Raynal +Link: https://lore.kernel.org/linux-mtd/20220213064045.1781-1-musashino.open@gmail.com +--- + drivers/mtd/parsers/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/mtd/parsers/Kconfig ++++ b/drivers/mtd/parsers/Kconfig +@@ -115,7 +115,7 @@ config MTD_AFS_PARTS + + config MTD_PARSER_TRX + tristate "Parser for TRX format partitions" +- depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST) ++ depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || RALINK || COMPILE_TEST) + help + TRX is a firmware format used by Broadcom on their devices. It + may contain up to 3/4 partitions (depending on the version). diff --git a/target/linux/generic/backport-5.15/700-net-next-net-dsa-introduce-tagger-owned-storage-for-private.patch b/target/linux/generic/backport-5.15/700-net-next-net-dsa-introduce-tagger-owned-storage-for-private.patch new file mode 100644 index 000000000..fe47c175a --- /dev/null +++ b/target/linux/generic/backport-5.15/700-net-next-net-dsa-introduce-tagger-owned-storage-for-private.patch @@ -0,0 +1,279 @@ +From dc452a471dbae8aca8257c565174212620880093 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Fri, 10 Dec 2021 01:34:37 +0200 +Subject: net: dsa: introduce tagger-owned storage for private and shared data + +Ansuel is working on register access over Ethernet for the qca8k switch +family. This requires the qca8k tagging protocol driver to receive +frames which aren't intended for the network stack, but instead for the +qca8k switch driver itself. + +The dp->priv is currently the prevailing method for passing data back +and forth between the tagging protocol driver and the switch driver. +However, this method is riddled with caveats. + +The DSA design allows in principle for any switch driver to return any +protocol it desires in ->get_tag_protocol(). The dsa_loop driver can be +modified to do just that. But in the current design, the memory behind +dp->priv has to be allocated by the switch driver, so if the tagging +protocol is paired to an unexpected switch driver, we may end up in NULL +pointer dereferences inside the kernel, or worse (a switch driver may +allocate dp->priv according to the expectations of a different tagger). + +The latter possibility is even more plausible considering that DSA +switches can dynamically change tagging protocols in certain cases +(dsa <-> edsa, ocelot <-> ocelot-8021q), and the current design lends +itself to mistakes that are all too easy to make. + +This patch proposes that the tagging protocol driver should manage its +own memory, instead of relying on the switch driver to do so. +After analyzing the different in-tree needs, it can be observed that the +required tagger storage is per switch, therefore a ds->tagger_data +pointer is introduced. In principle, per-port storage could also be +introduced, although there is no need for it at the moment. Future +changes will replace the current usage of dp->priv with ds->tagger_data. + +We define a "binding" event between the DSA switch tree and the tagging +protocol. During this binding event, the tagging protocol's ->connect() +method is called first, and this may allocate some memory for each +switch of the tree. Then a cross-chip notifier is emitted for the +switches within that tree, and they are given the opportunity to fix up +the tagger's memory (for example, they might set up some function +pointers that represent virtual methods for consuming packets). +Because the memory is owned by the tagger, there exists a ->disconnect() +method for the tagger (which is the place to free the resources), but +there doesn't exist a ->disconnect() method for the switch driver. +This is part of the design. The switch driver should make minimal use of +the public part of the tagger data, and only after type-checking it +using the supplied "proto" argument. + +In the code there are in fact two binding events, one is the initial +event in dsa_switch_setup_tag_protocol(). At this stage, the cross chip +notifier chains aren't initialized, so we call each switch's connect() +method by hand. Then there is dsa_tree_bind_tag_proto() during +dsa_tree_change_tag_proto(), and here we have an old protocol and a new +one. We first connect to the new one before disconnecting from the old +one, to simplify error handling a bit and to ensure we remain in a valid +state at all times. + +Co-developed-by: Ansuel Smith +Signed-off-by: Ansuel Smith +Signed-off-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + include/net/dsa.h | 12 +++++++++ + net/dsa/dsa2.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++--- + net/dsa/dsa_priv.h | 1 + + net/dsa/switch.c | 14 +++++++++++ + 4 files changed, 96 insertions(+), 4 deletions(-) + +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -80,12 +80,15 @@ enum dsa_tag_protocol { + }; + + struct dsa_switch; ++struct dsa_switch_tree; + + struct dsa_device_ops { + struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); + void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, + int *offset); ++ int (*connect)(struct dsa_switch_tree *dst); ++ void (*disconnect)(struct dsa_switch_tree *dst); + unsigned int needed_headroom; + unsigned int needed_tailroom; + const char *name; +@@ -329,6 +332,8 @@ struct dsa_switch { + */ + void *priv; + ++ void *tagger_data; ++ + /* + * Configuration data for this switch. + */ +@@ -584,6 +589,13 @@ struct dsa_switch_ops { + enum dsa_tag_protocol mprot); + int (*change_tag_protocol)(struct dsa_switch *ds, int port, + enum dsa_tag_protocol proto); ++ /* ++ * Method for switch drivers to connect to the tagging protocol driver ++ * in current use. The switch driver can provide handlers for certain ++ * types of packets for switch management. ++ */ ++ int (*connect_tag_protocol)(struct dsa_switch *ds, ++ enum dsa_tag_protocol proto); + + /* Optional switch-wide initialization and destruction methods */ + int (*setup)(struct dsa_switch *ds); +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -230,8 +230,12 @@ static struct dsa_switch_tree *dsa_tree_ + + static void dsa_tree_free(struct dsa_switch_tree *dst) + { +- if (dst->tag_ops) ++ if (dst->tag_ops) { ++ if (dst->tag_ops->disconnect) ++ dst->tag_ops->disconnect(dst); ++ + dsa_tag_driver_put(dst->tag_ops); ++ } + list_del(&dst->list); + kfree(dst); + } +@@ -805,7 +809,7 @@ static int dsa_switch_setup_tag_protocol + int port, err; + + if (tag_ops->proto == dst->default_proto) +- return 0; ++ goto connect; + + for (port = 0; port < ds->num_ports; port++) { + if (!dsa_is_cpu_port(ds, port)) +@@ -821,6 +825,17 @@ static int dsa_switch_setup_tag_protocol + } + } + ++connect: ++ if (ds->ops->connect_tag_protocol) { ++ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); ++ if (err) { ++ dev_err(ds->dev, ++ "Unable to connect to tag protocol \"%s\": %pe\n", ++ tag_ops->name, ERR_PTR(err)); ++ return err; ++ } ++ } ++ + return 0; + } + +@@ -1132,6 +1147,46 @@ static void dsa_tree_teardown(struct dsa + dst->setup = false; + } + ++static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, ++ const struct dsa_device_ops *tag_ops) ++{ ++ const struct dsa_device_ops *old_tag_ops = dst->tag_ops; ++ struct dsa_notifier_tag_proto_info info; ++ int err; ++ ++ dst->tag_ops = tag_ops; ++ ++ /* Notify the new tagger about the connection to this tree */ ++ if (tag_ops->connect) { ++ err = tag_ops->connect(dst); ++ if (err) ++ goto out_revert; ++ } ++ ++ /* Notify the switches from this tree about the connection ++ * to the new tagger ++ */ ++ info.tag_ops = tag_ops; ++ err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); ++ if (err && err != -EOPNOTSUPP) ++ goto out_disconnect; ++ ++ /* Notify the old tagger about the disconnection from this tree */ ++ if (old_tag_ops->disconnect) ++ old_tag_ops->disconnect(dst); ++ ++ return 0; ++ ++out_disconnect: ++ /* Revert the new tagger's connection to this tree */ ++ if (tag_ops->disconnect) ++ tag_ops->disconnect(dst); ++out_revert: ++ dst->tag_ops = old_tag_ops; ++ ++ return err; ++} ++ + /* Since the dsa/tagging sysfs device attribute is per master, the assumption + * is that all DSA switches within a tree share the same tagger, otherwise + * they would have formed disjoint trees (different "dsa,member" values). +@@ -1164,12 +1219,15 @@ int dsa_tree_change_tag_proto(struct dsa + goto out_unlock; + } + ++ /* Notify the tag protocol change */ + info.tag_ops = tag_ops; + err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); + if (err) +- goto out_unwind_tagger; ++ return err; + +- dst->tag_ops = tag_ops; ++ err = dsa_tree_bind_tag_proto(dst, tag_ops); ++ if (err) ++ goto out_unwind_tagger; + + rtnl_unlock(); + +@@ -1257,6 +1315,7 @@ static int dsa_port_parse_cpu(struct dsa + struct dsa_switch_tree *dst = ds->dst; + const struct dsa_device_ops *tag_ops; + enum dsa_tag_protocol default_proto; ++ int err; + + /* Find out which protocol the switch would prefer. */ + default_proto = dsa_get_tag_protocol(dp, master); +@@ -1304,6 +1363,12 @@ static int dsa_port_parse_cpu(struct dsa + */ + dsa_tag_driver_put(tag_ops); + } else { ++ if (tag_ops->connect) { ++ err = tag_ops->connect(dst); ++ if (err) ++ return err; ++ } ++ + dst->tag_ops = tag_ops; + } + +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -37,6 +37,7 @@ enum { + DSA_NOTIFIER_VLAN_DEL, + DSA_NOTIFIER_MTU, + DSA_NOTIFIER_TAG_PROTO, ++ DSA_NOTIFIER_TAG_PROTO_CONNECT, + DSA_NOTIFIER_MRP_ADD, + DSA_NOTIFIER_MRP_DEL, + DSA_NOTIFIER_MRP_ADD_RING_ROLE, +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -616,6 +616,17 @@ static int dsa_switch_change_tag_proto(s + return 0; + } + ++static int dsa_switch_connect_tag_proto(struct dsa_switch *ds, ++ struct dsa_notifier_tag_proto_info *info) ++{ ++ const struct dsa_device_ops *tag_ops = info->tag_ops; ++ ++ if (!ds->ops->connect_tag_protocol) ++ return -EOPNOTSUPP; ++ ++ return ds->ops->connect_tag_protocol(ds, tag_ops->proto); ++} ++ + static int dsa_switch_mrp_add(struct dsa_switch *ds, + struct dsa_notifier_mrp_info *info) + { +@@ -735,6 +746,9 @@ static int dsa_switch_event(struct notif + case DSA_NOTIFIER_TAG_PROTO: + err = dsa_switch_change_tag_proto(ds, info); + break; ++ case DSA_NOTIFIER_TAG_PROTO_CONNECT: ++ err = dsa_switch_connect_tag_proto(ds, info); ++ break; + case DSA_NOTIFIER_MRP_ADD: + err = dsa_switch_mrp_add(ds, info); + break; diff --git a/target/linux/generic/backport-5.15/701-net-dsa-make-tagging-protocols-connect-to-individual-switches.patch b/target/linux/generic/backport-5.15/701-net-dsa-make-tagging-protocols-connect-to-individual-switches.patch new file mode 100644 index 000000000..f68226069 --- /dev/null +++ b/target/linux/generic/backport-5.15/701-net-dsa-make-tagging-protocols-connect-to-individual-switches.patch @@ -0,0 +1,274 @@ +From 7f2973149c22e7a6fee4c0c9fa6b8e4108e9c208 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Tue, 14 Dec 2021 03:45:36 +0200 +Subject: net: dsa: make tagging protocols connect to individual switches from + a tree + +On the NXP Bluebox 3 board which uses a multi-switch setup with sja1105, +the mechanism through which the tagger connects to the switch tree is +broken, due to improper DSA code design. At the time when tag_ops->connect() +is called in dsa_port_parse_cpu(), DSA hasn't finished "touching" all +the ports, so it doesn't know how large the tree is and how many ports +it has. It has just seen the first CPU port by this time. As a result, +this function will call the tagger's ->connect method too early, and the +tagger will connect only to the first switch from the tree. + +This could be perhaps addressed a bit more simply by just moving the +tag_ops->connect(dst) call a bit later (for example in dsa_tree_setup), +but there is already a design inconsistency at present: on the switch +side, the notification is on a per-switch basis, but on the tagger side, +it is on a per-tree basis. Furthermore, the persistent storage itself is +per switch (ds->tagger_data). And the tagger connect and disconnect +procedures (at least the ones that exist currently) could see a fair bit +of simplification if they didn't have to iterate through the switches of +a tree. + +To fix the issue, this change transforms tag_ops->connect(dst) into +tag_ops->connect(ds) and moves it somewhere where we already iterate +over all switches of a tree. That is in dsa_switch_setup_tag_protocol(), +which is a good placement because we already have there the connection +call to the switch side of things. + +As for the dsa_tree_bind_tag_proto() method (called from the code path +that changes the tag protocol), things are a bit more complicated +because we receive the tree as argument, yet when we unwind on errors, +it would be nice to not call tag_ops->disconnect(ds) where we didn't +previously call tag_ops->connect(ds). We didn't have this problem before +because the tag_ops connection operations passed the entire dst before, +and this is more fine grained now. To solve the error rewind case using +the new API, we have to create yet one more cross-chip notifier for +disconnection, and stay connected with the old tag protocol to all the +switches in the tree until we've succeeded to connect with the new one +as well. So if something fails half way, the whole tree is still +connected to the old tagger. But there may still be leaks if the tagger +fails to connect to the 2nd out of 3 switches in a tree: somebody needs +to tell the tagger to disconnect from the first switch. Nothing comes +for free, and this was previously handled privately by the tagging +protocol driver before, but now we need to emit a disconnect cross-chip +notifier for that, because DSA has to take care of the unwind path. We +assume that the tagging protocol has connected to a switch if it has set +ds->tagger_data to something, otherwise we avoid calling its +disconnection method in the error rewind path. + +The rest of the changes are in the tagging protocol drivers, and have to +do with the replacement of dst with ds. The iteration is removed and the +error unwind path is simplified, as mentioned above. + +Signed-off-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + include/net/dsa.h | 5 ++-- + net/dsa/dsa2.c | 44 +++++++++++++----------------- + net/dsa/dsa_priv.h | 1 + + net/dsa/switch.c | 52 ++++++++++++++++++++++++++++++++--- + net/dsa/tag_ocelot_8021q.c | 53 +++++++++++------------------------- + net/dsa/tag_sja1105.c | 67 ++++++++++++++++------------------------------ + 6 files changed, 109 insertions(+), 113 deletions(-) + +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -80,15 +80,14 @@ enum dsa_tag_protocol { + }; + + struct dsa_switch; +-struct dsa_switch_tree; + + struct dsa_device_ops { + struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); + void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, + int *offset); +- int (*connect)(struct dsa_switch_tree *dst); +- void (*disconnect)(struct dsa_switch_tree *dst); ++ int (*connect)(struct dsa_switch *ds); ++ void (*disconnect)(struct dsa_switch *ds); + unsigned int needed_headroom; + unsigned int needed_tailroom; + const char *name; +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -230,12 +230,8 @@ static struct dsa_switch_tree *dsa_tree_ + + static void dsa_tree_free(struct dsa_switch_tree *dst) + { +- if (dst->tag_ops) { +- if (dst->tag_ops->disconnect) +- dst->tag_ops->disconnect(dst); +- ++ if (dst->tag_ops) + dsa_tag_driver_put(dst->tag_ops); +- } + list_del(&dst->list); + kfree(dst); + } +@@ -826,17 +822,29 @@ static int dsa_switch_setup_tag_protocol + } + + connect: ++ if (tag_ops->connect) { ++ err = tag_ops->connect(ds); ++ if (err) ++ return err; ++ } ++ + if (ds->ops->connect_tag_protocol) { + err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); + if (err) { + dev_err(ds->dev, + "Unable to connect to tag protocol \"%s\": %pe\n", + tag_ops->name, ERR_PTR(err)); +- return err; ++ goto disconnect; + } + } + + return 0; ++ ++disconnect: ++ if (tag_ops->disconnect) ++ tag_ops->disconnect(ds); ++ ++ return err; + } + + static int dsa_switch_setup(struct dsa_switch *ds) +@@ -1156,13 +1164,6 @@ static int dsa_tree_bind_tag_proto(struc + + dst->tag_ops = tag_ops; + +- /* Notify the new tagger about the connection to this tree */ +- if (tag_ops->connect) { +- err = tag_ops->connect(dst); +- if (err) +- goto out_revert; +- } +- + /* Notify the switches from this tree about the connection + * to the new tagger + */ +@@ -1172,16 +1173,14 @@ static int dsa_tree_bind_tag_proto(struc + goto out_disconnect; + + /* Notify the old tagger about the disconnection from this tree */ +- if (old_tag_ops->disconnect) +- old_tag_ops->disconnect(dst); ++ info.tag_ops = old_tag_ops; ++ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + + return 0; + + out_disconnect: +- /* Revert the new tagger's connection to this tree */ +- if (tag_ops->disconnect) +- tag_ops->disconnect(dst); +-out_revert: ++ info.tag_ops = tag_ops; ++ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); + dst->tag_ops = old_tag_ops; + + return err; +@@ -1315,7 +1314,6 @@ static int dsa_port_parse_cpu(struct dsa + struct dsa_switch_tree *dst = ds->dst; + const struct dsa_device_ops *tag_ops; + enum dsa_tag_protocol default_proto; +- int err; + + /* Find out which protocol the switch would prefer. */ + default_proto = dsa_get_tag_protocol(dp, master); +@@ -1363,12 +1361,6 @@ static int dsa_port_parse_cpu(struct dsa + */ + dsa_tag_driver_put(tag_ops); + } else { +- if (tag_ops->connect) { +- err = tag_ops->connect(dst); +- if (err) +- return err; +- } +- + dst->tag_ops = tag_ops; + } + +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -38,6 +38,7 @@ enum { + DSA_NOTIFIER_MTU, + DSA_NOTIFIER_TAG_PROTO, + DSA_NOTIFIER_TAG_PROTO_CONNECT, ++ DSA_NOTIFIER_TAG_PROTO_DISCONNECT, + DSA_NOTIFIER_MRP_ADD, + DSA_NOTIFIER_MRP_DEL, + DSA_NOTIFIER_MRP_ADD_RING_ROLE, +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -616,15 +616,58 @@ static int dsa_switch_change_tag_proto(s + return 0; + } + +-static int dsa_switch_connect_tag_proto(struct dsa_switch *ds, +- struct dsa_notifier_tag_proto_info *info) ++/* We use the same cross-chip notifiers to inform both the tagger side, as well ++ * as the switch side, of connection and disconnection events. ++ * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the ++ * switch side doesn't support connecting to this tagger, and therefore, the ++ * fact that we don't disconnect the tagger side doesn't constitute a memory ++ * leak: the tagger will still operate with persistent per-switch memory, just ++ * with the switch side unconnected to it. What does constitute a hard error is ++ * when the switch side supports connecting but fails. ++ */ ++static int ++dsa_switch_connect_tag_proto(struct dsa_switch *ds, ++ struct dsa_notifier_tag_proto_info *info) + { + const struct dsa_device_ops *tag_ops = info->tag_ops; ++ int err; ++ ++ /* Notify the new tagger about the connection to this switch */ ++ if (tag_ops->connect) { ++ err = tag_ops->connect(ds); ++ if (err) ++ return err; ++ } + + if (!ds->ops->connect_tag_protocol) + return -EOPNOTSUPP; + +- return ds->ops->connect_tag_protocol(ds, tag_ops->proto); ++ /* Notify the switch about the connection to the new tagger */ ++ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); ++ if (err) { ++ /* Revert the new tagger's connection to this tree */ ++ if (tag_ops->disconnect) ++ tag_ops->disconnect(ds); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int ++dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, ++ struct dsa_notifier_tag_proto_info *info) ++{ ++ const struct dsa_device_ops *tag_ops = info->tag_ops; ++ ++ /* Notify the tagger about the disconnection from this switch */ ++ if (tag_ops->disconnect && ds->tagger_data) ++ tag_ops->disconnect(ds); ++ ++ /* No need to notify the switch, since it shouldn't have any ++ * resources to tear down ++ */ ++ return 0; + } + + static int dsa_switch_mrp_add(struct dsa_switch *ds, +@@ -749,6 +792,9 @@ static int dsa_switch_event(struct notif + case DSA_NOTIFIER_TAG_PROTO_CONNECT: + err = dsa_switch_connect_tag_proto(ds, info); + break; ++ case DSA_NOTIFIER_TAG_PROTO_DISCONNECT: ++ err = dsa_switch_disconnect_tag_proto(ds, info); ++ break; + case DSA_NOTIFIER_MRP_ADD: + err = dsa_switch_mrp_add(ds, info); + break; diff --git a/target/linux/generic/backport-5.15/730-v5.16-hv-utils-add-PTP_1588_CLOCK-to-Kconfig-to-fix-build.patch b/target/linux/generic/backport-5.15/730-v5.16-hv-utils-add-PTP_1588_CLOCK-to-Kconfig-to-fix-build.patch new file mode 100644 index 000000000..7af35ab87 --- /dev/null +++ b/target/linux/generic/backport-5.15/730-v5.16-hv-utils-add-PTP_1588_CLOCK-to-Kconfig-to-fix-build.patch @@ -0,0 +1,46 @@ +From fba2153a200716c1fec1eafda7356bb347589efb Mon Sep 17 00:00:00 2001 +From: Randy Dunlap +Date: Thu, 25 Nov 2021 18:33:16 -0800 +Subject: [PATCH] hv: utils: add PTP_1588_CLOCK to Kconfig to fix build + +The hyperv utilities use PTP clock interfaces and should depend a +a kconfig symbol such that they will be built as a loadable module or +builtin so that linker errors do not happen. + +Prevents these build errors: + +ld: drivers/hv/hv_util.o: in function `hv_timesync_deinit': +hv_util.c:(.text+0x37d): undefined reference to `ptp_clock_unregister' +ld: drivers/hv/hv_util.o: in function `hv_timesync_init': +hv_util.c:(.text+0x738): undefined reference to `ptp_clock_register' + +References: https://lore.kernel.org/stable/20220328093115.7486-1-ynezz@true.cz/T/#u +Fixes: 3716a49a81ba ("hv_utils: implement Hyper-V PTP source") +Signed-off-by: Randy Dunlap +Reported-by: kernel test robot +Cc: Arnd Bergmann +Cc: "K. Y. Srinivasan" +Cc: Haiyang Zhang +Cc: Stephen Hemminger +Cc: Wei Liu +Cc: Dexuan Cui +Cc: linux-hyperv@vger.kernel.org +Cc: Greg Kroah-Hartman +Reviewed-by: Michael Kelley +Link: https://lore.kernel.org/r/20211126023316.25184-1-rdunlap@infradead.org +Signed-off-by: Wei Liu +(cherry picked from commit 1dc2f2b81a6a9895da59f3915760f6c0c3074492) +--- + drivers/hv/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/hv/Kconfig ++++ b/drivers/hv/Kconfig +@@ -18,6 +18,7 @@ config HYPERV_TIMER + config HYPERV_UTILS + tristate "Microsoft Hyper-V Utilities driver" + depends on HYPERV && CONNECTOR && NLS ++ depends on PTP_1588_CLOCK_OPTIONAL + help + Select this option to enable the Hyper-V Utilities. + diff --git a/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch b/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch new file mode 100644 index 000000000..df9518d86 --- /dev/null +++ b/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch @@ -0,0 +1,48 @@ +From 3b00a07c2443745d62babfe08dbb2ad8e649526e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Fri, 19 Nov 2021 03:03:49 +0100 +Subject: [PATCH] net: dsa: qca8k: fix internal delay applied to the wrong PAD + config + +With SGMII phy the internal delay is always applied to the PAD0 config. +This is caused by the falling edge configuration that hardcode the reg +to PAD0 (as the falling edge bits are present only in PAD0 reg) +Move the delay configuration before the reg overwrite to correctly apply +the delay. + +Fixes: cef08115846e ("net: dsa: qca8k: set internal delay also for sgmii") +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1433,6 +1433,12 @@ qca8k_phylink_mac_config(struct dsa_swit + + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + ++ /* From original code is reported port instability as SGMII also ++ * require delay set. Apply advised values here or take them from DT. ++ */ ++ if (state->interface == PHY_INTERFACE_MODE_SGMII) ++ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); ++ + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and + * falling edge is set writing in the PORT0 PAD reg + */ +@@ -1455,12 +1461,6 @@ qca8k_phylink_mac_config(struct dsa_swit + QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, + val); + +- /* From original code is reported port instability as SGMII also +- * require delay set. Apply advised values here or take them from DT. +- */ +- if (state->interface == PHY_INTERFACE_MODE_SGMII) +- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); +- + break; + default: + dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch b/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch new file mode 100644 index 000000000..7348d93ec --- /dev/null +++ b/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch @@ -0,0 +1,46 @@ +From 65258b9d8cde45689bdc86ca39b50f01f983733b Mon Sep 17 00:00:00 2001 +From: Robert Marko +Date: Fri, 19 Nov 2021 03:03:50 +0100 +Subject: [PATCH] net: dsa: qca8k: fix MTU calculation + +qca8k has a global MTU, so its tracking the MTU per port to make sure +that the largest MTU gets applied. +Since it uses the frame size instead of MTU the driver MTU change function +will then add the size of Ethernet header and checksum on top of MTU. + +The driver currently populates the per port MTU size as Ethernet frame +length + checksum which equals 1518. + +The issue is that then MTU change function will go through all of the +ports, find the largest MTU and apply the Ethernet header + checksum on +top of it again, so for a desired MTU of 1500 you will end up with 1536. + +This is obviously incorrect, so to correct it populate the per port struct +MTU with just the MTU and not include the Ethernet header + checksum size +as those will be added by the MTU change function. + +Fixes: f58d2598cf70 ("net: dsa: qca8k: implement the port MTU callbacks") +Signed-off-by: Robert Marko +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds) + /* Set initial MTU for every port. + * We have only have a general MTU setting. So track + * every port and set the max across all port. ++ * Set per port MTU to 1500 as the MTU change function ++ * will add the overhead and if its set to 1518 then it ++ * will apply the overhead again and we will end up with ++ * MTU of 1536 instead of 1518 + */ +- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; ++ priv->port_mtu[i] = ETH_DATA_LEN; + } + + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ diff --git a/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch b/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch new file mode 100644 index 000000000..f477b1b92 --- /dev/null +++ b/target/linux/generic/backport-5.15/753-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch @@ -0,0 +1,29 @@ +From b9133f3ef5a2659730cf47a74bd0a9259f1cf8ff Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:40 +0100 +Subject: net: dsa: qca8k: remove redundant check in parse_port_config + +The very next check for port 0 and 6 already makes sure we don't go out +of bounds with the ports_config delay table. +Remove the redundant check. + +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -983,7 +983,7 @@ qca8k_parse_port_config(struct qca8k_pri + u32 delay; + + /* We have 2 CPU port. Check them */ +- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { ++ for (port = 0; port < QCA8K_NUM_PORTS; port++) { + /* Skip every other port */ + if (port != 0 && port != 6) + continue; diff --git a/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch b/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch new file mode 100644 index 000000000..408a59df8 --- /dev/null +++ b/target/linux/generic/backport-5.15/754-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch @@ -0,0 +1,507 @@ +From 90ae68bfc2ffcb54a4ba4f64edbeb84a80cbb57c Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:41 +0100 +Subject: net: dsa: qca8k: convert to GENMASK/FIELD_PREP/FIELD_GET + +Convert and try to standardize bit fields using +GENMASK/FIELD_PREP/FIELD_GET macros. Rework some logic to support the +standard macro and tidy things up. No functional change intended. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 98 +++++++++++++++---------------- + drivers/net/dsa/qca8k.h | 153 ++++++++++++++++++++++++++---------------------- + 2 files changed, 130 insertions(+), 121 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -319,18 +320,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, + } + + /* vid - 83:72 */ +- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M; ++ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); + /* aging - 67:64 */ +- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M; ++ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); + /* portmask - 54:48 */ +- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M; ++ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); + /* mac - 47:0 */ +- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff; +- fdb->mac[1] = reg[1] & 0xff; +- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff; +- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff; +- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff; +- fdb->mac[5] = reg[0] & 0xff; ++ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); ++ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); ++ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); ++ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); ++ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); ++ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); + + return 0; + } +@@ -343,18 +344,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, + int i; + + /* vid - 83:72 */ +- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S; ++ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); + /* aging - 67:64 */ +- reg[2] |= aging & QCA8K_ATU_STATUS_M; ++ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); + /* portmask - 54:48 */ +- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S; ++ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); + /* mac - 47:0 */ +- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S; +- reg[1] |= mac[1]; +- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S; +- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S; +- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S; +- reg[0] |= mac[5]; ++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); ++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); ++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); + + /* load the array into the ARL table */ + for (i = 0; i < 3; i++) +@@ -372,7 +373,7 @@ qca8k_fdb_access(struct qca8k_priv *priv + reg |= cmd; + if (port >= 0) { + reg |= QCA8K_ATU_FUNC_PORT_EN; +- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S; ++ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); + } + + /* Write the function register triggering the table access */ +@@ -454,7 +455,7 @@ qca8k_vlan_access(struct qca8k_priv *pri + /* Set the command and VLAN index */ + reg = QCA8K_VTU_FUNC1_BUSY; + reg |= cmd; +- reg |= vid << QCA8K_VTU_FUNC1_VID_S; ++ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); + + /* Write the function register triggering the table access */ + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); +@@ -500,13 +501,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, + if (ret < 0) + goto out; + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; +- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); ++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); + if (untagged) +- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); + else +- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); + + ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + if (ret) +@@ -534,15 +533,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, + ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); + if (ret < 0) + goto out; +- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); +- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << +- QCA8K_VTU_FUNC0_EG_MODE_S(port); ++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); ++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); + + /* Check if we're the last member to be removed */ + del = true; + for (i = 0; i < QCA8K_NUM_PORTS; i++) { +- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; +- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); ++ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); + + if ((reg & mask) != mask) { + del = false; +@@ -1014,7 +1011,7 @@ qca8k_parse_port_config(struct qca8k_pri + mode == PHY_INTERFACE_MODE_RGMII_TXID) + delay = 1; + +- if (delay > QCA8K_MAX_DELAY) { ++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { + dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } +@@ -1030,7 +1027,7 @@ qca8k_parse_port_config(struct qca8k_pri + mode == PHY_INTERFACE_MODE_RGMII_RXID) + delay = 2; + +- if (delay > QCA8K_MAX_DELAY) { ++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { + dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); + delay = 3; + } +@@ -1141,8 +1138,8 @@ qca8k_setup(struct dsa_switch *ds) + /* Enable QCA header mode on all cpu ports */ + if (dsa_is_cpu_port(ds, i)) { + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | +- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); ++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | ++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); + if (ret) { + dev_err(priv->dev, "failed enabling QCA header mode"); + return ret; +@@ -1159,10 +1156,10 @@ qca8k_setup(struct dsa_switch *ds) + * for igmp, unknown, multicast and broadcast packet + */ + ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | +- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | ++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); + if (ret) + return ret; + +@@ -1180,8 +1177,6 @@ qca8k_setup(struct dsa_switch *ds) + + /* Individual user ports get connected to CPU port only */ + if (dsa_is_user_port(ds, i)) { +- int shift = 16 * (i % 2); +- + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_MEMBER, + BIT(cpu_port)); +@@ -1198,8 +1193,8 @@ qca8k_setup(struct dsa_switch *ds) + * default egress vid + */ + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), +- 0xfff << shift, +- QCA8K_PORT_VID_DEF << shift); ++ QCA8K_EGREES_VLAN_PORT_MASK(i), ++ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); + if (ret) + return ret; + +@@ -1246,7 +1241,7 @@ qca8k_setup(struct dsa_switch *ds) + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN; + qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), +- QCA8K_PORT_HOL_CTRL1_ING_BUF | ++ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN, +@@ -1269,8 +1264,8 @@ qca8k_setup(struct dsa_switch *ds) + mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | + QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); + qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, +- QCA8K_GLOBAL_FC_GOL_XON_THRES_S | +- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S, ++ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | ++ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, + mask); + } + +@@ -1916,11 +1911,11 @@ qca8k_port_vlan_filtering(struct dsa_swi + + if (vlan_filtering) { + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_VLAN_MODE, ++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); + } else { + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), +- QCA8K_PORT_LOOKUP_VLAN_MODE, ++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); + } + +@@ -1944,10 +1939,9 @@ qca8k_port_vlan_add(struct dsa_switch *d + } + + if (pvid) { +- int shift = 16 * (port % 2); +- + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), +- 0xfff << shift, vlan->vid << shift); ++ QCA8K_EGREES_VLAN_PORT_MASK(port), ++ QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); + if (ret) + return ret; + +@@ -2041,7 +2035,7 @@ static int qca8k_read_switch_id(struct q + if (ret < 0) + return -ENODEV; + +- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); ++ id = QCA8K_MASK_CTRL_DEVICE_ID(val); + if (id != data->id) { + dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); + return -ENODEV; +@@ -2050,7 +2044,7 @@ static int qca8k_read_switch_id(struct q + priv->switch_id = id; + + /* Save revision to communicate to the internal PHY driver */ +- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK); ++ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); + + return 0; + } +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -30,9 +30,9 @@ + /* Global control registers */ + #define QCA8K_REG_MASK_CTRL 0x000 + #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) +-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0) ++#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) + #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) +-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) ++#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) + #define QCA8K_REG_PORT0_PAD_CTRL 0x004 + #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) + #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) +@@ -41,12 +41,11 @@ + #define QCA8K_REG_PORT6_PAD_CTRL 0x00c + #define QCA8K_PORT_PAD_RGMII_EN BIT(26) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) +-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) ++#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) +-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) ++#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) + #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) + #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) +-#define QCA8K_MAX_DELAY 3 + #define QCA8K_PORT_PAD_SGMII_EN BIT(7) + #define QCA8K_REG_PWS 0x010 + #define QCA8K_PWS_POWER_ON_SEL BIT(31) +@@ -68,10 +67,12 @@ + #define QCA8K_MDIO_MASTER_READ BIT(27) + #define QCA8K_MDIO_MASTER_WRITE 0 + #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) +-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) +-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) +-#define QCA8K_MDIO_MASTER_DATA(x) (x) ++#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) ++#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) ++#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) ++#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) + #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) ++#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) + #define QCA8K_MDIO_MASTER_MAX_PORTS 5 + #define QCA8K_MDIO_MASTER_MAX_REG 32 + #define QCA8K_GOL_MAC_ADDR0 0x60 +@@ -93,9 +94,7 @@ + #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) + #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) + #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) +-#define QCA8K_PORT_HDR_CTRL_RX_S 2 + #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) +-#define QCA8K_PORT_HDR_CTRL_TX_S 0 + #define QCA8K_PORT_HDR_CTRL_ALL 2 + #define QCA8K_PORT_HDR_CTRL_MGMT 1 + #define QCA8K_PORT_HDR_CTRL_NONE 0 +@@ -105,10 +104,11 @@ + #define QCA8K_SGMII_EN_TX BIT(3) + #define QCA8K_SGMII_EN_SD BIT(4) + #define QCA8K_SGMII_CLK125M_DELAY BIT(7) +-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23)) +-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22) +-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) +-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) ++#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) ++#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) ++#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) ++#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) ++#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) + + /* MAC_PWR_SEL registers */ + #define QCA8K_REG_MAC_PWR_SEL 0x0e4 +@@ -121,100 +121,115 @@ + + /* ACL registers */ + #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) +-#define QCA8K_PORT_VLAN_CVID(x) (x << 16) +-#define QCA8K_PORT_VLAN_SVID(x) x ++#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) ++#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) ++#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) ++#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) + #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) + #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 + #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 + + /* Lookup registers */ + #define QCA8K_REG_ATU_DATA0 0x600 +-#define QCA8K_ATU_ADDR2_S 24 +-#define QCA8K_ATU_ADDR3_S 16 +-#define QCA8K_ATU_ADDR4_S 8 ++#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) ++#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) ++#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) ++#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) + #define QCA8K_REG_ATU_DATA1 0x604 +-#define QCA8K_ATU_PORT_M 0x7f +-#define QCA8K_ATU_PORT_S 16 +-#define QCA8K_ATU_ADDR0_S 8 ++#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) ++#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) ++#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) + #define QCA8K_REG_ATU_DATA2 0x608 +-#define QCA8K_ATU_VID_M 0xfff +-#define QCA8K_ATU_VID_S 8 +-#define QCA8K_ATU_STATUS_M 0xf ++#define QCA8K_ATU_VID_MASK GENMASK(19, 8) ++#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) + #define QCA8K_ATU_STATUS_STATIC 0xf + #define QCA8K_REG_ATU_FUNC 0x60c + #define QCA8K_ATU_FUNC_BUSY BIT(31) + #define QCA8K_ATU_FUNC_PORT_EN BIT(14) + #define QCA8K_ATU_FUNC_MULTI_EN BIT(13) + #define QCA8K_ATU_FUNC_FULL BIT(12) +-#define QCA8K_ATU_FUNC_PORT_M 0xf +-#define QCA8K_ATU_FUNC_PORT_S 8 ++#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) + #define QCA8K_REG_VTU_FUNC0 0x610 + #define QCA8K_VTU_FUNC0_VALID BIT(20) + #define QCA8K_VTU_FUNC0_IVL_EN BIT(19) +-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 +-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 +-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 +-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 +-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 ++/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) ++ * It does contain VLAN_MODE for each port [5:4] for port0, ++ * [7:6] for port1 ... [17:16] for port6. Use virtual port ++ * define to handle this. ++ */ ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) ++#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) ++#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) ++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) + #define QCA8K_REG_VTU_FUNC1 0x614 + #define QCA8K_VTU_FUNC1_BUSY BIT(31) +-#define QCA8K_VTU_FUNC1_VID_S 16 ++#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) + #define QCA8K_VTU_FUNC1_FULL BIT(4) + #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 + #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) + #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 +-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24 +-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16 +-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8 +-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 ++#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) ++#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) ++#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) ++#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) + #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) + #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) +-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) ++#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) + #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) +-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16) +-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16) +-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) ++#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) ++#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) ++#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) ++#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) ++#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) ++#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) + + #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 +-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16) +-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16) +-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0) +-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0) ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) ++#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) ++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) + + #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20) +-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20) +-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24) +-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) ++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) ++#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) + + #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) +-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0) +-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0) ++#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) ++#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) + #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) + #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) + #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) + #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) + + /* Pkt edit registers */ ++#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) ++#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) ++#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) + #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) + + /* L3 registers */ diff --git a/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch b/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch new file mode 100644 index 000000000..8c39b8ea2 --- /dev/null +++ b/target/linux/generic/backport-5.15/755-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch @@ -0,0 +1,25 @@ +From 994c28b6f971fa5db8ae977daea37eee87d93d51 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:42 +0100 +Subject: net: dsa: qca8k: remove extra mutex_init in qca8k_setup + +Mutex is already init in sw_probe. Remove the extra init in qca8k_setup. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1086,8 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- mutex_init(&priv->reg_mutex); +- + /* Start by setting up the register mapping */ + priv->regmap = devm_regmap_init(ds->dev, NULL, priv, + &qca8k_regmap_config); diff --git a/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch b/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch new file mode 100644 index 000000000..f873b70d0 --- /dev/null +++ b/target/linux/generic/backport-5.15/756-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch @@ -0,0 +1,46 @@ +From 36b8af12f424e7a7f60a935c60a0fd4aa0822378 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:43 +0100 +Subject: net: dsa: qca8k: move regmap init in probe and set it mandatory + +In preparation for regmap conversion, move regmap init in the probe +function and make it mandatory as any read/write/rmw operation will be +converted to regmap API. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1086,12 +1086,6 @@ qca8k_setup(struct dsa_switch *ds) + if (ret) + return ret; + +- /* Start by setting up the register mapping */ +- priv->regmap = devm_regmap_init(ds->dev, NULL, priv, +- &qca8k_regmap_config); +- if (IS_ERR(priv->regmap)) +- dev_warn(priv->dev, "regmap initialization failed"); +- + ret = qca8k_setup_mdio_bus(priv); + if (ret) + return ret; +@@ -2077,6 +2071,14 @@ qca8k_sw_probe(struct mdio_device *mdiod + gpiod_set_value_cansleep(priv->reset_gpio, 0); + } + ++ /* Start by setting up the register mapping */ ++ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, ++ &qca8k_regmap_config); ++ if (IS_ERR(priv->regmap)) { ++ dev_err(priv->dev, "regmap initialization failed"); ++ return PTR_ERR(priv->regmap); ++ } ++ + /* Check the detected switch id */ + ret = qca8k_read_switch_id(priv); + if (ret) diff --git a/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch b/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch new file mode 100644 index 000000000..4ca9c8ba4 --- /dev/null +++ b/target/linux/generic/backport-5.15/757-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch @@ -0,0 +1,249 @@ +From 8b5f3f29a81a71934d004e21a1292c1148b05926 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:44 +0100 +Subject: net: dsa: qca8k: initial conversion to regmap helper + +Convert any qca8k set/clear/pool to regmap helper and add +missing config to regmap_config struct. +Read/write/rmw operation are reworked to use the regmap helper +internally to keep the delta of this patch low. These additional +function will then be dropped when the code split will be proposed. + +Ipq40xx SoC have the internal switch based on the qca8k regmap but use +mmio for read/write/rmw operation instead of mdio. +In preparation for the support of this internal switch, convert the +driver to regmap API to later split the driver to common and specific +code. The overhead introduced by the use of regamp API is marginal as the +internal mdio will bypass it by using its direct access and regmap will be +used only by configuration functions or fdb access. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 107 +++++++++++++++++++++--------------------------- + 1 file changed, 47 insertions(+), 60 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -152,6 +153,25 @@ qca8k_set_page(struct mii_bus *bus, u16 + static int + qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) + { ++ return regmap_read(priv->regmap, reg, val); ++} ++ ++static int ++qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) ++{ ++ return regmap_write(priv->regmap, reg, val); ++} ++ ++static int ++qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) ++{ ++ return regmap_update_bits(priv->regmap, reg, mask, write_val); ++} ++ ++static int ++qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) ++{ ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + int ret; +@@ -172,8 +192,9 @@ exit: + } + + static int +-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) ++qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) + { ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + int ret; +@@ -194,8 +215,9 @@ exit: + } + + static int +-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) ++qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) + { ++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx; + struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; +@@ -223,34 +245,6 @@ exit: + return ret; + } + +-static int +-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val) +-{ +- return qca8k_rmw(priv, reg, 0, val); +-} +- +-static int +-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val) +-{ +- return qca8k_rmw(priv, reg, val, 0); +-} +- +-static int +-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +- +- return qca8k_read(priv, reg, val); +-} +- +-static int +-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) +-{ +- struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +- +- return qca8k_write(priv, reg, val); +-} +- + static const struct regmap_range qca8k_readable_ranges[] = { + regmap_reg_range(0x0000, 0x00e4), /* Global control */ + regmap_reg_range(0x0100, 0x0168), /* EEE control */ +@@ -282,26 +276,19 @@ static struct regmap_config qca8k_regmap + .max_register = 0x16ac, /* end MIB - Port6 range */ + .reg_read = qca8k_regmap_read, + .reg_write = qca8k_regmap_write, ++ .reg_update_bits = qca8k_regmap_update_bits, + .rd_table = &qca8k_readable_table, ++ .disable_locking = true, /* Locking is handled by qca8k read/write */ ++ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ + }; + + static int + qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) + { +- int ret, ret1; + u32 val; + +- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask), +- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, +- priv, reg, &val); +- +- /* Check if qca8k_read has failed for a different reason +- * before returning -ETIMEDOUT +- */ +- if (ret < 0 && ret1 < 0) +- return ret1; +- +- return ret; ++ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, ++ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); + } + + static int +@@ -568,7 +555,7 @@ qca8k_mib_init(struct qca8k_priv *priv) + int ret; + + mutex_lock(&priv->reg_mutex); +- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); + if (ret) + goto exit; + +@@ -576,7 +563,7 @@ qca8k_mib_init(struct qca8k_priv *priv) + if (ret) + goto exit; + +- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); + if (ret) + goto exit; + +@@ -597,9 +584,9 @@ qca8k_port_set_status(struct qca8k_priv + mask |= QCA8K_PORT_STATUS_LINK_AUTO; + + if (enable) +- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask); ++ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); + else +- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); ++ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); + } + + static u32 +@@ -861,8 +848,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv * + * a dt-overlay and driver reload changed the configuration + */ + +- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, +- QCA8K_MDIO_MASTER_EN); ++ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, ++ QCA8K_MDIO_MASTER_EN); + } + + /* Check if the devicetree declare the port:phy mapping */ +@@ -1099,16 +1086,16 @@ qca8k_setup(struct dsa_switch *ds) + return ret; + + /* Make sure MAC06 is disabled */ +- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, +- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); ++ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, ++ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); + if (ret) { + dev_err(priv->dev, "failed disabling MAC06 exchange"); + return ret; + } + + /* Enable CPU Port */ +- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, +- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); ++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, ++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + if (ret) { + dev_err(priv->dev, "failed enabling CPU port"); + return ret; +@@ -1176,8 +1163,8 @@ qca8k_setup(struct dsa_switch *ds) + return ret; + + /* Enable ARP Auto-learning by default */ +- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), +- QCA8K_PORT_LOOKUP_LEARN); ++ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), ++ QCA8K_PORT_LOOKUP_LEARN); + if (ret) + return ret; + +@@ -1745,9 +1732,9 @@ qca8k_port_bridge_join(struct dsa_switch + /* Add this port to the portvlan mask of the other ports + * in the bridge + */ +- ret = qca8k_reg_set(priv, +- QCA8K_PORT_LOOKUP_CTRL(i), +- BIT(port)); ++ ret = regmap_set_bits(priv->regmap, ++ QCA8K_PORT_LOOKUP_CTRL(i), ++ BIT(port)); + if (ret) + return ret; + if (i != port) +@@ -1777,9 +1764,9 @@ qca8k_port_bridge_leave(struct dsa_switc + /* Remove this port to the portvlan mask of the other ports + * in the bridge + */ +- qca8k_reg_clear(priv, +- QCA8K_PORT_LOOKUP_CTRL(i), +- BIT(port)); ++ regmap_clear_bits(priv->regmap, ++ QCA8K_PORT_LOOKUP_CTRL(i), ++ BIT(port)); + } + + /* Set the cpu port to be the only one in the portvlan mask of diff --git a/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch b/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch new file mode 100644 index 000000000..1465d1f35 --- /dev/null +++ b/target/linux/generic/backport-5.15/758-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch @@ -0,0 +1,120 @@ +From c126f118b330ccf0db0dda4a4bd6c729865a205f Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:45 +0100 +Subject: net: dsa: qca8k: add additional MIB counter and make it dynamic + +We are currently missing 2 additionals MIB counter present in QCA833x +switch. +QC832x switch have 39 MIB counter and QCA833X have 41 MIB counter. +Add the additional MIB counter and rework the MIB function to print the +correct supported counter from the match_data struct. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++--- + drivers/net/dsa/qca8k.h | 4 ++++ + 2 files changed, 24 insertions(+), 3 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -70,6 +70,8 @@ static const struct qca8k_mib_desc ar832 + MIB_DESC(1, 0x9c, "TxExcDefer"), + MIB_DESC(1, 0xa0, "TxDefer"), + MIB_DESC(1, 0xa4, "TxLateCol"), ++ MIB_DESC(1, 0xa8, "RXUnicast"), ++ MIB_DESC(1, 0xac, "TXUnicast"), + }; + + /* The 32bit switch registers are accessed indirectly. To achieve this we need +@@ -1605,12 +1607,16 @@ qca8k_phylink_mac_link_up(struct dsa_swi + static void + qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) + { ++ const struct qca8k_match_data *match_data; ++ struct qca8k_priv *priv = ds->priv; + int i; + + if (stringset != ETH_SS_STATS) + return; + +- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) ++ match_data = of_device_get_match_data(priv->dev); ++ ++ for (i = 0; i < match_data->mib_count; i++) + strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, + ETH_GSTRING_LEN); + } +@@ -1620,12 +1626,15 @@ qca8k_get_ethtool_stats(struct dsa_switc + uint64_t *data) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; ++ const struct qca8k_match_data *match_data; + const struct qca8k_mib_desc *mib; + u32 reg, i, val; + u32 hi = 0; + int ret; + +- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { ++ match_data = of_device_get_match_data(priv->dev); ++ ++ for (i = 0; i < match_data->mib_count; i++) { + mib = &ar8327_mib[i]; + reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; + +@@ -1648,10 +1657,15 @@ qca8k_get_ethtool_stats(struct dsa_switc + static int + qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) + { ++ const struct qca8k_match_data *match_data; ++ struct qca8k_priv *priv = ds->priv; ++ + if (sset != ETH_SS_STATS) + return 0; + +- return ARRAY_SIZE(ar8327_mib); ++ match_data = of_device_get_match_data(priv->dev); ++ ++ return match_data->mib_count; + } + + static int +@@ -2154,14 +2168,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, + static const struct qca8k_match_data qca8327 = { + .id = QCA8K_ID_QCA8327, + .reduced_package = true, ++ .mib_count = QCA8K_QCA832X_MIB_COUNT, + }; + + static const struct qca8k_match_data qca8328 = { + .id = QCA8K_ID_QCA8327, ++ .mib_count = QCA8K_QCA832X_MIB_COUNT, + }; + + static const struct qca8k_match_data qca833x = { + .id = QCA8K_ID_QCA8337, ++ .mib_count = QCA8K_QCA833X_MIB_COUNT, + }; + + static const struct of_device_id qca8k_of_match[] = { +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -21,6 +21,9 @@ + #define PHY_ID_QCA8337 0x004dd036 + #define QCA8K_ID_QCA8337 0x13 + ++#define QCA8K_QCA832X_MIB_COUNT 39 ++#define QCA8K_QCA833X_MIB_COUNT 41 ++ + #define QCA8K_BUSY_WAIT_TIMEOUT 2000 + + #define QCA8K_NUM_FDB_RECORDS 2048 +@@ -279,6 +282,7 @@ struct ar8xxx_port_status { + struct qca8k_match_data { + u8 id; + bool reduced_package; ++ u8 mib_count; + }; + + enum { diff --git a/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch b/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch new file mode 100644 index 000000000..973446ec5 --- /dev/null +++ b/target/linux/generic/backport-5.15/759-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch @@ -0,0 +1,53 @@ +From 4592538bfb0d5d3c3c8a1d7071724d081412ac91 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:46 +0100 +Subject: net: dsa: qca8k: add support for port fast aging + +The switch supports fast aging by flushing any rule in the ARL +table for a specific port. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 11 +++++++++++ + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 12 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1790,6 +1790,16 @@ qca8k_port_bridge_leave(struct dsa_switc + QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); + } + ++static void ++qca8k_port_fast_age(struct dsa_switch *ds, int port) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ ++ mutex_lock(&priv->reg_mutex); ++ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); ++ mutex_unlock(&priv->reg_mutex); ++} ++ + static int + qca8k_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +@@ -1998,6 +2008,7 @@ static const struct dsa_switch_ops qca8k + .port_stp_state_set = qca8k_port_stp_state_set, + .port_bridge_join = qca8k_port_bridge_join, + .port_bridge_leave = qca8k_port_bridge_leave, ++ .port_fast_age = qca8k_port_fast_age, + .port_fdb_add = qca8k_port_fdb_add, + .port_fdb_del = qca8k_port_fdb_del, + .port_fdb_dump = qca8k_port_fdb_dump, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -262,6 +262,7 @@ enum qca8k_fdb_cmd { + QCA8K_FDB_FLUSH = 1, + QCA8K_FDB_LOAD = 2, + QCA8K_FDB_PURGE = 3, ++ QCA8K_FDB_FLUSH_PORT = 5, + QCA8K_FDB_NEXT = 6, + QCA8K_FDB_SEARCH = 7, + }; diff --git a/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch b/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch new file mode 100644 index 000000000..29530065a --- /dev/null +++ b/target/linux/generic/backport-5.15/760-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch @@ -0,0 +1,78 @@ +From 6a3bdc5209f45d2af83aa92433ab6e5cf2297aa4 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:47 +0100 +Subject: net: dsa: qca8k: add set_ageing_time support + +qca8k support setting ageing time in step of 7s. Add support for it and +set the max value accepted of 7645m. +Documentation talks about support for 10000m but that values doesn't +make sense as the value doesn't match the max value in the reg. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 25 +++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 3 +++ + 2 files changed, 28 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1261,6 +1261,10 @@ qca8k_setup(struct dsa_switch *ds) + /* We don't have interrupts for link changes, so we need to poll */ + ds->pcs_poll = true; + ++ /* Set min a max ageing value supported */ ++ ds->ageing_time_min = 7000; ++ ds->ageing_time_max = 458745000; ++ + return 0; + } + +@@ -1801,6 +1805,26 @@ qca8k_port_fast_age(struct dsa_switch *d + } + + static int ++qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ unsigned int secs = msecs / 1000; ++ u32 val; ++ ++ /* AGE_TIME reg is set in 7s step */ ++ val = secs / 7; ++ ++ /* Handle case with 0 as val to NOT disable ++ * learning ++ */ ++ if (!val) ++ val = 1; ++ ++ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, ++ QCA8K_ATU_AGE_TIME(val)); ++} ++ ++static int + qca8k_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) + { +@@ -1999,6 +2023,7 @@ static const struct dsa_switch_ops qca8k + .get_strings = qca8k_get_strings, + .get_ethtool_stats = qca8k_get_ethtool_stats, + .get_sset_count = qca8k_get_sset_count, ++ .set_ageing_time = qca8k_set_ageing_time, + .get_mac_eee = qca8k_get_mac_eee, + .set_mac_eee = qca8k_set_mac_eee, + .port_enable = qca8k_port_enable, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -175,6 +175,9 @@ + #define QCA8K_VTU_FUNC1_BUSY BIT(31) + #define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) + #define QCA8K_VTU_FUNC1_FULL BIT(4) ++#define QCA8K_REG_ATU_CTRL 0x618 ++#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) ++#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) + #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 + #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) + #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 diff --git a/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch b/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch new file mode 100644 index 000000000..fa022d7ae --- /dev/null +++ b/target/linux/generic/backport-5.15/761-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch @@ -0,0 +1,142 @@ +From ba8f870dfa635113ce6e8095a5eb1835ecde2e9e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Mon, 22 Nov 2021 16:23:48 +0100 +Subject: net: dsa: qca8k: add support for mdb_add/del + +Add support for mdb add/del function. The ARL table is used to insert +the rule. The rule will be searched, deleted and reinserted with the +port mask updated. The function will check if the rule has to be updated +or insert directly with no deletion of the old rule. +If every port is removed from the port mask, the rule is removed. +The rule is set STATIC in the ARL table (aka it doesn't age) to not be +flushed by fast age function. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 99 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -436,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv) + } + + static int ++qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, ++ const u8 *mac, u16 vid) ++{ ++ struct qca8k_fdb fdb = { 0 }; ++ int ret; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ qca8k_fdb_write(priv, vid, 0, mac, 0); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); ++ if (ret < 0) ++ goto exit; ++ ++ ret = qca8k_fdb_read(priv, &fdb); ++ if (ret < 0) ++ goto exit; ++ ++ /* Rule exist. Delete first */ ++ if (!fdb.aging) { ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); ++ if (ret) ++ goto exit; ++ } ++ ++ /* Add port to fdb portmask */ ++ fdb.port_mask |= port_mask; ++ ++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); ++ ++exit: ++ mutex_unlock(&priv->reg_mutex); ++ return ret; ++} ++ ++static int ++qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, ++ const u8 *mac, u16 vid) ++{ ++ struct qca8k_fdb fdb = { 0 }; ++ int ret; ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ qca8k_fdb_write(priv, vid, 0, mac, 0); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); ++ if (ret < 0) ++ goto exit; ++ ++ /* Rule doesn't exist. Why delete? */ ++ if (!fdb.aging) { ++ ret = -EINVAL; ++ goto exit; ++ } ++ ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); ++ if (ret) ++ goto exit; ++ ++ /* Only port in the rule is this port. Don't re insert */ ++ if (fdb.port_mask == port_mask) ++ goto exit; ++ ++ /* Remove port from port mask */ ++ fdb.port_mask &= ~port_mask; ++ ++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); ++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); ++ ++exit: ++ mutex_unlock(&priv->reg_mutex); ++ return ret; ++} ++ ++static int + qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) + { + u32 reg; +@@ -1930,6 +2005,28 @@ qca8k_port_fdb_dump(struct dsa_switch *d + } + + static int ++qca8k_port_mdb_add(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ ++ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); ++} ++ ++static int ++qca8k_port_mdb_del(struct dsa_switch *ds, int port, ++ const struct switchdev_obj_port_mdb *mdb) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ const u8 *addr = mdb->addr; ++ u16 vid = mdb->vid; ++ ++ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); ++} ++ ++static int + qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, + struct netlink_ext_ack *extack) + { +@@ -2037,6 +2134,8 @@ static const struct dsa_switch_ops qca8k + .port_fdb_add = qca8k_port_fdb_add, + .port_fdb_del = qca8k_port_fdb_del, + .port_fdb_dump = qca8k_port_fdb_dump, ++ .port_mdb_add = qca8k_port_mdb_add, ++ .port_mdb_del = qca8k_port_mdb_del, + .port_vlan_filtering = qca8k_port_vlan_filtering, + .port_vlan_add = qca8k_port_vlan_add, + .port_vlan_del = qca8k_port_vlan_del, diff --git a/target/linux/generic/backport-5.15/762-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch b/target/linux/generic/backport-5.15/762-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch new file mode 100644 index 000000000..69e9b381c --- /dev/null +++ b/target/linux/generic/backport-5.15/762-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch @@ -0,0 +1,155 @@ +From 2c1bdbc7e7560d7de754cad277d968d56bb1899e Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Tue, 23 Nov 2021 03:59:10 +0100 +Subject: net: dsa: qca8k: add support for mirror mode + +The switch supports mirror mode. Only one port can set as mirror port and +every other port can set to both ingress and egress mode. The mirror +port is disabled and reverted to normal operation once every port is +removed from sending packet to it. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 4 +++ + 2 files changed, 99 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -2027,6 +2027,99 @@ qca8k_port_mdb_del(struct dsa_switch *ds + } + + static int ++qca8k_port_mirror_add(struct dsa_switch *ds, int port, ++ struct dsa_mall_mirror_tc_entry *mirror, ++ bool ingress) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ int monitor_port, ret; ++ u32 reg, val; ++ ++ /* Check for existent entry */ ++ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) ++ return -EEXIST; ++ ++ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); ++ if (ret) ++ return ret; ++ ++ /* QCA83xx can have only one port set to mirror mode. ++ * Check that the correct port is requested and return error otherwise. ++ * When no mirror port is set, the values is set to 0xF ++ */ ++ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); ++ if (monitor_port != 0xF && monitor_port != mirror->to_local_port) ++ return -EEXIST; ++ ++ /* Set the monitor port */ ++ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, ++ mirror->to_local_port); ++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, ++ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); ++ if (ret) ++ return ret; ++ ++ if (ingress) { ++ reg = QCA8K_PORT_LOOKUP_CTRL(port); ++ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; ++ } else { ++ reg = QCA8K_REG_PORT_HOL_CTRL1(port); ++ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; ++ } ++ ++ ret = regmap_update_bits(priv->regmap, reg, val, val); ++ if (ret) ++ return ret; ++ ++ /* Track mirror port for tx and rx to decide when the ++ * mirror port has to be disabled. ++ */ ++ if (ingress) ++ priv->mirror_rx |= BIT(port); ++ else ++ priv->mirror_tx |= BIT(port); ++ ++ return 0; ++} ++ ++static void ++qca8k_port_mirror_del(struct dsa_switch *ds, int port, ++ struct dsa_mall_mirror_tc_entry *mirror) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ u32 reg, val; ++ int ret; ++ ++ if (mirror->ingress) { ++ reg = QCA8K_PORT_LOOKUP_CTRL(port); ++ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; ++ } else { ++ reg = QCA8K_REG_PORT_HOL_CTRL1(port); ++ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; ++ } ++ ++ ret = regmap_clear_bits(priv->regmap, reg, val); ++ if (ret) ++ goto err; ++ ++ if (mirror->ingress) ++ priv->mirror_rx &= ~BIT(port); ++ else ++ priv->mirror_tx &= ~BIT(port); ++ ++ /* No port set to send packet to mirror port. Disable mirror port */ ++ if (!priv->mirror_rx && !priv->mirror_tx) { ++ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); ++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, ++ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); ++ if (ret) ++ goto err; ++ } ++err: ++ dev_err(priv->dev, "Failed to del mirror port from %d", port); ++} ++ ++static int + qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, + struct netlink_ext_ack *extack) + { +@@ -2136,6 +2229,8 @@ static const struct dsa_switch_ops qca8k + .port_fdb_dump = qca8k_port_fdb_dump, + .port_mdb_add = qca8k_port_mdb_add, + .port_mdb_del = qca8k_port_mdb_del, ++ .port_mirror_add = qca8k_port_mirror_add, ++ .port_mirror_del = qca8k_port_mirror_del, + .port_vlan_filtering = qca8k_port_vlan_filtering, + .port_vlan_add = qca8k_port_vlan_add, + .port_vlan_del = qca8k_port_vlan_del, +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -180,6 +180,7 @@ + #define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) + #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 + #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) ++#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4) + #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 + #define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) + #define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) +@@ -201,6 +202,7 @@ + #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) + #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) ++#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) + + #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 + #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) +@@ -305,6 +307,8 @@ struct qca8k_ports_config { + struct qca8k_priv { + u8 switch_id; + u8 switch_revision; ++ u8 mirror_rx; ++ u8 mirror_tx; + bool legacy_phy_port_mapping; + struct qca8k_ports_config ports_config; + struct regmap *regmap; diff --git a/target/linux/generic/backport-5.15/763-net-next-net-dsa-qca8k-add-LAG-support.patch b/target/linux/generic/backport-5.15/763-net-next-net-dsa-qca8k-add-LAG-support.patch new file mode 100644 index 000000000..bfc77db18 --- /dev/null +++ b/target/linux/generic/backport-5.15/763-net-next-net-dsa-qca8k-add-LAG-support.patch @@ -0,0 +1,288 @@ +From def975307c01191b6f0170048c3724b0ed3348af Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Tue, 23 Nov 2021 03:59:11 +0100 +Subject: net: dsa: qca8k: add LAG support + +Add LAG support to this switch. In Documentation this is described as +trunk mode. A max of 4 LAGs are supported and each can support up to 4 +port. The current tx mode supported is Hash mode with both L2 and L2+3 +mode. +When no port are present in the trunk, the trunk is disabled in the +switch. +When a port is disconnected, the traffic is redirected to the other +available port. +The hash mode is global and each LAG require to have the same hash mode +set. To change the hash mode when multiple LAG are configured, it's +required to remove each LAG and set the desired hash mode to the last. +An error is printed when it's asked to set a not supported hadh mode. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 33 +++++++++ + 2 files changed, 210 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -1340,6 +1340,9 @@ qca8k_setup(struct dsa_switch *ds) + ds->ageing_time_min = 7000; + ds->ageing_time_max = 458745000; + ++ /* Set max number of LAGs supported */ ++ ds->num_lag_ids = QCA8K_NUM_LAGS; ++ + return 0; + } + +@@ -2207,6 +2210,178 @@ qca8k_get_tag_protocol(struct dsa_switch + return DSA_TAG_PROTO_QCA; + } + ++static bool ++qca8k_lag_can_offload(struct dsa_switch *ds, ++ struct net_device *lag, ++ struct netdev_lag_upper_info *info) ++{ ++ struct dsa_port *dp; ++ int id, members = 0; ++ ++ id = dsa_lag_id(ds->dst, lag); ++ if (id < 0 || id >= ds->num_lag_ids) ++ return false; ++ ++ dsa_lag_foreach_port(dp, ds->dst, lag) ++ /* Includes the port joining the LAG */ ++ members++; ++ ++ if (members > QCA8K_NUM_PORTS_FOR_LAG) ++ return false; ++ ++ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) ++ return false; ++ ++ if (info->hash_type != NETDEV_LAG_HASH_L2 || ++ info->hash_type != NETDEV_LAG_HASH_L23) ++ return false; ++ ++ return true; ++} ++ ++static int ++qca8k_lag_setup_hash(struct dsa_switch *ds, ++ struct net_device *lag, ++ struct netdev_lag_upper_info *info) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ bool unique_lag = true; ++ int i, id; ++ u32 hash; ++ ++ id = dsa_lag_id(ds->dst, lag); ++ ++ switch (info->hash_type) { ++ case NETDEV_LAG_HASH_L23: ++ hash |= QCA8K_TRUNK_HASH_SIP_EN; ++ hash |= QCA8K_TRUNK_HASH_DIP_EN; ++ fallthrough; ++ case NETDEV_LAG_HASH_L2: ++ hash |= QCA8K_TRUNK_HASH_SA_EN; ++ hash |= QCA8K_TRUNK_HASH_DA_EN; ++ break; ++ default: /* We should NEVER reach this */ ++ return -EOPNOTSUPP; ++ } ++ ++ /* Check if we are the unique configured LAG */ ++ dsa_lags_foreach_id(i, ds->dst) ++ if (i != id && dsa_lag_dev(ds->dst, i)) { ++ unique_lag = false; ++ break; ++ } ++ ++ /* Hash Mode is global. Make sure the same Hash Mode ++ * is set to all the 4 possible lag. ++ * If we are the unique LAG we can set whatever hash ++ * mode we want. ++ * To change hash mode it's needed to remove all LAG ++ * and change the mode with the latest. ++ */ ++ if (unique_lag) { ++ priv->lag_hash_mode = hash; ++ } else if (priv->lag_hash_mode != hash) { ++ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, ++ QCA8K_TRUNK_HASH_MASK, hash); ++} ++ ++static int ++qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, ++ struct net_device *lag, bool delete) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ int ret, id, i; ++ u32 val; ++ ++ id = dsa_lag_id(ds->dst, lag); ++ ++ /* Read current port member */ ++ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); ++ if (ret) ++ return ret; ++ ++ /* Shift val to the correct trunk */ ++ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); ++ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; ++ if (delete) ++ val &= ~BIT(port); ++ else ++ val |= BIT(port); ++ ++ /* Update port member. With empty portmap disable trunk */ ++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, ++ QCA8K_REG_GOL_TRUNK_MEMBER(id) | ++ QCA8K_REG_GOL_TRUNK_EN(id), ++ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | ++ val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); ++ ++ /* Search empty member if adding or port on deleting */ ++ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { ++ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); ++ if (ret) ++ return ret; ++ ++ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); ++ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; ++ ++ if (delete) { ++ /* If port flagged to be disabled assume this member is ++ * empty ++ */ ++ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) ++ continue; ++ ++ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; ++ if (val != port) ++ continue; ++ } else { ++ /* If port flagged to be enabled assume this member is ++ * already set ++ */ ++ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) ++ continue; ++ } ++ ++ /* We have found the member to add/remove */ ++ break; ++ } ++ ++ /* Set port in the correct port mask or disable port if in delete mode */ ++ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), ++ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | ++ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), ++ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | ++ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); ++} ++ ++static int ++qca8k_port_lag_join(struct dsa_switch *ds, int port, ++ struct net_device *lag, ++ struct netdev_lag_upper_info *info) ++{ ++ int ret; ++ ++ if (!qca8k_lag_can_offload(ds, lag, info)) ++ return -EOPNOTSUPP; ++ ++ ret = qca8k_lag_setup_hash(ds, lag, info); ++ if (ret) ++ return ret; ++ ++ return qca8k_lag_refresh_portmap(ds, port, lag, false); ++} ++ ++static int ++qca8k_port_lag_leave(struct dsa_switch *ds, int port, ++ struct net_device *lag) ++{ ++ return qca8k_lag_refresh_portmap(ds, port, lag, true); ++} ++ + static const struct dsa_switch_ops qca8k_switch_ops = { + .get_tag_protocol = qca8k_get_tag_protocol, + .setup = qca8k_setup, +@@ -2240,6 +2415,8 @@ static const struct dsa_switch_ops qca8k + .phylink_mac_link_down = qca8k_phylink_mac_link_down, + .phylink_mac_link_up = qca8k_phylink_mac_link_up, + .get_phy_flags = qca8k_get_phy_flags, ++ .port_lag_join = qca8k_port_lag_join, ++ .port_lag_leave = qca8k_port_lag_leave, + }; + + static int qca8k_read_switch_id(struct qca8k_priv *priv) +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -15,6 +15,8 @@ + #define QCA8K_NUM_PORTS 7 + #define QCA8K_NUM_CPU_PORTS 2 + #define QCA8K_MAX_MTU 9000 ++#define QCA8K_NUM_LAGS 4 ++#define QCA8K_NUM_PORTS_FOR_LAG 4 + + #define PHY_ID_QCA8327 0x004dd034 + #define QCA8K_ID_QCA8327 0x12 +@@ -122,6 +124,14 @@ + #define QCA8K_REG_EEE_CTRL 0x100 + #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) + ++/* TRUNK_HASH_EN registers */ ++#define QCA8K_TRUNK_HASH_EN_CTRL 0x270 ++#define QCA8K_TRUNK_HASH_SIP_EN BIT(3) ++#define QCA8K_TRUNK_HASH_DIP_EN BIT(2) ++#define QCA8K_TRUNK_HASH_SA_EN BIT(1) ++#define QCA8K_TRUNK_HASH_DA_EN BIT(0) ++#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0) ++ + /* ACL registers */ + #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) + #define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) +@@ -204,6 +214,28 @@ + #define QCA8K_PORT_LOOKUP_LEARN BIT(20) + #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) + ++#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700 ++/* 4 max trunk first ++ * first 6 bit for member bitmap ++ * 7th bit is to enable trunk port ++ */ ++#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8) ++#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7) ++#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) ++#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0) ++#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) ++/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */ ++#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4)) ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0) ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3) ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0) ++#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16) ++#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4) ++/* Complex shift: FIRST shift for port THEN shift for trunk */ ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i)) ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) ++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) ++ + #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 + #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) + #define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) +@@ -309,6 +341,7 @@ struct qca8k_priv { + u8 switch_revision; + u8 mirror_rx; + u8 mirror_tx; ++ u8 lag_hash_mode; + bool legacy_phy_port_mapping; + struct qca8k_ports_config ports_config; + struct regmap *regmap; diff --git a/target/linux/generic/backport-5.15/764-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch b/target/linux/generic/backport-5.15/764-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch new file mode 100644 index 000000000..8c0a990b0 --- /dev/null +++ b/target/linux/generic/backport-5.15/764-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch @@ -0,0 +1,40 @@ +From 0898ca67b86e14207d4feb3f3fea8b87cec5aab1 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Tue, 23 Nov 2021 16:44:46 +0100 +Subject: net: dsa: qca8k: fix warning in LAG feature + +Fix warning reported by bot. +Make sure hash is init to 0 and fix wrong logic for hash_type in +qca8k_lag_can_offload. + +Reported-by: kernel test robot +Fixes: def975307c01 ("net: dsa: qca8k: add LAG support") +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Link: https://lore.kernel.org/r/20211123154446.31019-1-ansuelsmth@gmail.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/dsa/qca8k.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -2232,7 +2232,7 @@ qca8k_lag_can_offload(struct dsa_switch + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + +- if (info->hash_type != NETDEV_LAG_HASH_L2 || ++ if (info->hash_type != NETDEV_LAG_HASH_L2 && + info->hash_type != NETDEV_LAG_HASH_L23) + return false; + +@@ -2246,8 +2246,8 @@ qca8k_lag_setup_hash(struct dsa_switch * + { + struct qca8k_priv *priv = ds->priv; + bool unique_lag = true; ++ u32 hash = 0; + int i, id; +- u32 hash; + + id = dsa_lag_id(ds->dst, lag); + diff --git a/target/linux/generic/backport-5.15/765-1-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch b/target/linux/generic/backport-5.15/765-1-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch new file mode 100644 index 000000000..1786bf034 --- /dev/null +++ b/target/linux/generic/backport-5.15/765-1-net-next-net-dsa-reorder-PHY-initialization-with-MTU-setup-in.patch @@ -0,0 +1,52 @@ +From 904e112ad431492b34f235f59738e8312802bbf9 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:12 +0200 +Subject: [PATCH 1/6] net: dsa: reorder PHY initialization with MTU setup in + slave.c + +In dsa_slave_create() there are 2 sections that take rtnl_lock(): +MTU change and netdev registration. They are separated by PHY +initialization. + +There isn't any strict ordering requirement except for the fact that +netdev registration should be last. Therefore, we can perform the MTU +change a bit later, after the PHY setup. A future change will then be +able to merge the two rtnl_lock sections into one. + +Signed-off-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/slave.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1986,13 +1986,6 @@ int dsa_slave_create(struct dsa_port *po + port->slave = slave_dev; + dsa_slave_setup_tagger(slave_dev); + +- rtnl_lock(); +- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); +- rtnl_unlock(); +- if (ret && ret != -EOPNOTSUPP) +- dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", +- ret, ETH_DATA_LEN, port->index); +- + netif_carrier_off(slave_dev); + + ret = dsa_slave_phy_setup(slave_dev); +@@ -2004,6 +1997,13 @@ int dsa_slave_create(struct dsa_port *po + } + + rtnl_lock(); ++ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); ++ rtnl_unlock(); ++ if (ret && ret != -EOPNOTSUPP) ++ dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", ++ ret, ETH_DATA_LEN, port->index); ++ ++ rtnl_lock(); + + ret = register_netdevice(slave_dev); + if (ret) { diff --git a/target/linux/generic/backport-5.15/765-2-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch b/target/linux/generic/backport-5.15/765-2-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch new file mode 100644 index 000000000..c2493a08f --- /dev/null +++ b/target/linux/generic/backport-5.15/765-2-net-next-net-dsa-merge-rtnl_lock-sections-in-dsa_slave_create.patch @@ -0,0 +1,34 @@ +From e31dbd3b6aba585231cd84a87adeb22e7c6a8c19 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:13 +0200 +Subject: [PATCH 2/6] net: dsa: merge rtnl_lock sections in dsa_slave_create + +Currently dsa_slave_create() has two sequences of rtnl_lock/rtnl_unlock +in a row. Remove the rtnl_unlock() and rtnl_lock() in between, such that +the operation can execute slighly faster. + +Signed-off-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/slave.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1997,14 +1997,12 @@ int dsa_slave_create(struct dsa_port *po + } + + rtnl_lock(); ++ + ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); +- rtnl_unlock(); + if (ret && ret != -EOPNOTSUPP) + dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", + ret, ETH_DATA_LEN, port->index); + +- rtnl_lock(); +- + ret = register_netdevice(slave_dev); + if (ret) { + netdev_err(master, "error %d registering interface %s\n", diff --git a/target/linux/generic/backport-5.15/765-3-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch b/target/linux/generic/backport-5.15/765-3-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch new file mode 100644 index 000000000..d1126de5d --- /dev/null +++ b/target/linux/generic/backport-5.15/765-3-net-next-net-dsa-stop-updating-master-MTU-from-master.c.patch @@ -0,0 +1,91 @@ +From a1ff94c2973c43bc1e2677ac63ebb15b1d1ff846 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:14 +0200 +Subject: [PATCH 3/6] net: dsa: stop updating master MTU from master.c + +At present there are two paths for changing the MTU of the DSA master. + +The first is: + +dsa_tree_setup +-> dsa_tree_setup_ports + -> dsa_port_setup + -> dsa_slave_create + -> dsa_slave_change_mtu + -> dev_set_mtu(master) + +The second is: + +dsa_tree_setup +-> dsa_tree_setup_master + -> dsa_master_setup + -> dev_set_mtu(dev) + +So the dev_set_mtu() call from dsa_master_setup() has been effectively +superseded by the dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN) that is +done from dsa_slave_create() for each user port. The later function also +updates the master MTU according to the largest user port MTU from the +tree. Therefore, updating the master MTU through a separate code path +isn't needed. + +Signed-off-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/master.c | 25 +------------------------ + 1 file changed, 1 insertion(+), 24 deletions(-) + +--- a/net/dsa/master.c ++++ b/net/dsa/master.c +@@ -330,28 +330,13 @@ static const struct attribute_group dsa_ + .attrs = dsa_slave_attrs, + }; + +-static void dsa_master_reset_mtu(struct net_device *dev) +-{ +- int err; +- +- rtnl_lock(); +- err = dev_set_mtu(dev, ETH_DATA_LEN); +- if (err) +- netdev_dbg(dev, +- "Unable to reset MTU to exclude DSA overheads\n"); +- rtnl_unlock(); +-} +- + static struct lock_class_key dsa_master_addr_list_lock_key; + + int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) + { +- const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops; + struct dsa_switch *ds = cpu_dp->ds; + struct device_link *consumer_link; +- int mtu, ret; +- +- mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops); ++ int ret; + + /* The DSA master must use SET_NETDEV_DEV for this to work. */ + consumer_link = device_link_add(ds->dev, dev->dev.parent, +@@ -361,13 +346,6 @@ int dsa_master_setup(struct net_device * + "Failed to create a device link to DSA switch %s\n", + dev_name(ds->dev)); + +- rtnl_lock(); +- ret = dev_set_mtu(dev, mtu); +- rtnl_unlock(); +- if (ret) +- netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", +- ret, mtu); +- + /* If we use a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point on get + * sent to the tag format's receive function. +@@ -405,7 +383,6 @@ void dsa_master_teardown(struct net_devi + sysfs_remove_group(&dev->dev.kobj, &dsa_group); + dsa_netdev_ops_set(dev, NULL); + dsa_master_ethtool_teardown(dev); +- dsa_master_reset_mtu(dev); + dsa_master_set_promiscuity(dev, -1); + + dev->dsa_ptr = NULL; diff --git a/target/linux/generic/backport-5.15/765-4-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch b/target/linux/generic/backport-5.15/765-4-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch new file mode 100644 index 000000000..67d434006 --- /dev/null +++ b/target/linux/generic/backport-5.15/765-4-net-next-net-dsa-hold-rtnl_mutex-when-calling-dsa_master_-set.patch @@ -0,0 +1,78 @@ +From c146f9bc195a9dc3ad7fd000a14540e7c9df952d Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:15 +0200 +Subject: [PATCH 4/6] net: dsa: hold rtnl_mutex when calling + dsa_master_{setup,teardown} + +DSA needs to simulate master tracking events when a binding is first +with a DSA master established and torn down, in order to give drivers +the simplifying guarantee that ->master_state_change calls are made +only when the master's readiness state to pass traffic changes. +master_state_change() provide a operational bool that DSA driver can use +to understand if DSA master is operational or not. +To avoid races, we need to block the reception of +NETDEV_UP/NETDEV_CHANGE/NETDEV_GOING_DOWN events in the netdev notifier +chain while we are changing the master's dev->dsa_ptr (this changes what +netdev_uses_dsa(dev) reports). + +The dsa_master_setup() and dsa_master_teardown() functions optionally +require the rtnl_mutex to be held, if the tagger needs the master to be +promiscuous, these functions call dev_set_promiscuity(). Move the +rtnl_lock() from that function and make it top-level. + +Signed-off-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/dsa2.c | 8 ++++++++ + net/dsa/master.c | 4 ++-- + 2 files changed, 10 insertions(+), 2 deletions(-) + +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -1034,6 +1034,8 @@ static int dsa_tree_setup_master(struct + struct dsa_port *dp; + int err; + ++ rtnl_lock(); ++ + list_for_each_entry(dp, &dst->ports, list) { + if (dsa_port_is_cpu(dp)) { + err = dsa_master_setup(dp->master, dp); +@@ -1042,6 +1044,8 @@ static int dsa_tree_setup_master(struct + } + } + ++ rtnl_unlock(); ++ + return 0; + } + +@@ -1049,9 +1053,13 @@ static void dsa_tree_teardown_master(str + { + struct dsa_port *dp; + ++ rtnl_lock(); ++ + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_is_cpu(dp)) + dsa_master_teardown(dp->master); ++ ++ rtnl_unlock(); + } + + static int dsa_tree_setup_lags(struct dsa_switch_tree *dst) +--- a/net/dsa/master.c ++++ b/net/dsa/master.c +@@ -267,9 +267,9 @@ static void dsa_master_set_promiscuity(s + if (!ops->promisc_on_master) + return; + +- rtnl_lock(); ++ ASSERT_RTNL(); ++ + dev_set_promiscuity(dev, inc); +- rtnl_unlock(); + } + + static ssize_t tagging_show(struct device *d, struct device_attribute *attr, diff --git a/target/linux/generic/backport-5.15/765-5-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch b/target/linux/generic/backport-5.15/765-5-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch new file mode 100644 index 000000000..e6472c61d --- /dev/null +++ b/target/linux/generic/backport-5.15/765-5-net-next-net-dsa-first-set-up-shared-ports-then-non-shared-po.patch @@ -0,0 +1,118 @@ +From 1e3f407f3cacc5dcfe27166c412ed9bc263d82bf Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:16 +0200 +Subject: [PATCH 5/6] net: dsa: first set up shared ports, then non-shared + ports + +After commit a57d8c217aad ("net: dsa: flush switchdev workqueue before +tearing down CPU/DSA ports"), the port setup and teardown procedure +became asymmetric. + +The fact of the matter is that user ports need the shared ports to be up +before they can be used for CPU-initiated termination. And since we +register net devices for the user ports, those won't be functional until +we also call the setup for the shared (CPU, DSA) ports. But we may do +that later, depending on the port numbering scheme of the hardware we +are dealing with. + +It just makes sense that all shared ports are brought up before any user +port is. I can't pinpoint any issue due to the current behavior, but +let's change it nonetheless, for consistency's sake. + +Signed-off-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + net/dsa/dsa2.c | 50 +++++++++++++++++++++++++++++++++++++------------- + 1 file changed, 37 insertions(+), 13 deletions(-) + +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -999,23 +999,28 @@ static void dsa_tree_teardown_switches(s + dsa_switch_teardown(dp->ds); + } + +-static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) ++/* Bring shared ports up first, then non-shared ports */ ++static int dsa_tree_setup_ports(struct dsa_switch_tree *dst) + { + struct dsa_port *dp; +- int err; ++ int err = 0; + + list_for_each_entry(dp, &dst->ports, list) { +- err = dsa_switch_setup(dp->ds); +- if (err) +- goto teardown; ++ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) { ++ err = dsa_port_setup(dp); ++ if (err) ++ goto teardown; ++ } + } + + list_for_each_entry(dp, &dst->ports, list) { +- err = dsa_port_setup(dp); +- if (err) { +- err = dsa_port_reinit_as_unused(dp); +- if (err) +- goto teardown; ++ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { ++ err = dsa_port_setup(dp); ++ if (err) { ++ err = dsa_port_reinit_as_unused(dp); ++ if (err) ++ goto teardown; ++ } + } + } + +@@ -1024,7 +1029,21 @@ static int dsa_tree_setup_switches(struc + teardown: + dsa_tree_teardown_ports(dst); + +- dsa_tree_teardown_switches(dst); ++ return err; ++} ++ ++static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) ++{ ++ struct dsa_port *dp; ++ int err = 0; ++ ++ list_for_each_entry(dp, &dst->ports, list) { ++ err = dsa_switch_setup(dp->ds); ++ if (err) { ++ dsa_tree_teardown_switches(dst); ++ break; ++ } ++ } + + return err; + } +@@ -1111,10 +1130,14 @@ static int dsa_tree_setup(struct dsa_swi + if (err) + goto teardown_cpu_ports; + +- err = dsa_tree_setup_master(dst); ++ err = dsa_tree_setup_ports(dst); + if (err) + goto teardown_switches; + ++ err = dsa_tree_setup_master(dst); ++ if (err) ++ goto teardown_ports; ++ + err = dsa_tree_setup_lags(dst); + if (err) + goto teardown_master; +@@ -1127,8 +1150,9 @@ static int dsa_tree_setup(struct dsa_swi + + teardown_master: + dsa_tree_teardown_master(dst); +-teardown_switches: ++teardown_ports: + dsa_tree_teardown_ports(dst); ++teardown_switches: + dsa_tree_teardown_switches(dst); + teardown_cpu_ports: + dsa_tree_teardown_cpu_ports(dst); diff --git a/target/linux/generic/backport-5.15/765-6-net-next-net-dsa-setup-master-before-ports.patch b/target/linux/generic/backport-5.15/765-6-net-next-net-dsa-setup-master-before-ports.patch new file mode 100644 index 000000000..93cad0c98 --- /dev/null +++ b/target/linux/generic/backport-5.15/765-6-net-next-net-dsa-setup-master-before-ports.patch @@ -0,0 +1,115 @@ +From 11fd667dac315ea3f2469961f6d2869271a46cae Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Thu, 6 Jan 2022 01:11:17 +0200 +Subject: [PATCH 6/6] net: dsa: setup master before ports + +It is said that as soon as a network interface is registered, all its +resources should have already been prepared, so that it is available for +sending and receiving traffic. One of the resources needed by a DSA +slave interface is the master. + +dsa_tree_setup +-> dsa_tree_setup_ports + -> dsa_port_setup + -> dsa_slave_create + -> register_netdevice +-> dsa_tree_setup_master + -> dsa_master_setup + -> sets up master->dsa_ptr, which enables reception + +Therefore, there is a short period of time after register_netdevice() +during which the master isn't prepared to pass traffic to the DSA layer +(master->dsa_ptr is checked by eth_type_trans). Same thing during +unregistration, there is a time frame in which packets might be missed. + +Note that this change opens us to another race: dsa_master_find_slave() +will get invoked potentially earlier than the slave creation, and later +than the slave deletion. Since dp->slave starts off as a NULL pointer, +the earlier calls aren't a problem, but the later calls are. To avoid +use-after-free, we should zeroize dp->slave before calling +dsa_slave_destroy(). + +In practice I cannot really test real life improvements brought by this +change, since in my systems, netdevice creation races with PHY autoneg +which takes a few seconds to complete, and that masks quite a few races. +Effects might be noticeable in a setup with fixed links all the way to +an external system. + +Signed-off-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + net/dsa/dsa2.c | 23 +++++++++++++---------- + 1 file changed, 13 insertions(+), 10 deletions(-) + +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -545,6 +545,7 @@ static void dsa_port_teardown(struct dsa + struct devlink_port *dlp = &dp->devlink_port; + struct dsa_switch *ds = dp->ds; + struct dsa_mac_addr *a, *tmp; ++ struct net_device *slave; + + if (!dp->setup) + return; +@@ -566,9 +567,11 @@ static void dsa_port_teardown(struct dsa + dsa_port_link_unregister_of(dp); + break; + case DSA_PORT_TYPE_USER: +- if (dp->slave) { +- dsa_slave_destroy(dp->slave); ++ slave = dp->slave; ++ ++ if (slave) { + dp->slave = NULL; ++ dsa_slave_destroy(slave); + } + break; + } +@@ -1130,17 +1133,17 @@ static int dsa_tree_setup(struct dsa_swi + if (err) + goto teardown_cpu_ports; + +- err = dsa_tree_setup_ports(dst); ++ err = dsa_tree_setup_master(dst); + if (err) + goto teardown_switches; + +- err = dsa_tree_setup_master(dst); ++ err = dsa_tree_setup_ports(dst); + if (err) +- goto teardown_ports; ++ goto teardown_master; + + err = dsa_tree_setup_lags(dst); + if (err) +- goto teardown_master; ++ goto teardown_ports; + + dst->setup = true; + +@@ -1148,10 +1151,10 @@ static int dsa_tree_setup(struct dsa_swi + + return 0; + +-teardown_master: +- dsa_tree_teardown_master(dst); + teardown_ports: + dsa_tree_teardown_ports(dst); ++teardown_master: ++ dsa_tree_teardown_master(dst); + teardown_switches: + dsa_tree_teardown_switches(dst); + teardown_cpu_ports: +@@ -1169,10 +1172,10 @@ static void dsa_tree_teardown(struct dsa + + dsa_tree_teardown_lags(dst); + +- dsa_tree_teardown_master(dst); +- + dsa_tree_teardown_ports(dst); + ++ dsa_tree_teardown_master(dst); ++ + dsa_tree_teardown_switches(dst); + + dsa_tree_teardown_cpu_ports(dst); diff --git a/target/linux/generic/backport-5.15/766-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch b/target/linux/generic/backport-5.15/766-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch new file mode 100644 index 000000000..d73b74558 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-01-net-dsa-provide-switch-operations-for-tracking-the-m.patch @@ -0,0 +1,254 @@ +From 295ab96f478d0fa56393e85406f19a867e26ce22 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Wed, 2 Feb 2022 01:03:20 +0100 +Subject: [PATCH 01/16] net: dsa: provide switch operations for tracking the + master state + +Certain drivers may need to send management traffic to the switch for +things like register access, FDB dump, etc, to accelerate what their +slow bus (SPI, I2C, MDIO) can already do. + +Ethernet is faster (especially in bulk transactions) but is also more +unreliable, since the user may decide to bring the DSA master down (or +not bring it up), therefore severing the link between the host and the +attached switch. + +Drivers needing Ethernet-based register access already should have +fallback logic to the slow bus if the Ethernet method fails, but that +fallback may be based on a timeout, and the I/O to the switch may slow +down to a halt if the master is down, because every Ethernet packet will +have to time out. The driver also doesn't have the option to turn off +Ethernet-based I/O momentarily, because it wouldn't know when to turn it +back on. + +Which is where this change comes in. By tracking NETDEV_CHANGE, +NETDEV_UP and NETDEV_GOING_DOWN events on the DSA master, we should know +the exact interval of time during which this interface is reliably +available for traffic. Provide this information to switches so they can +use it as they wish. + +An helper is added dsa_port_master_is_operational() to check if a master +port is operational. + +Signed-off-by: Vladimir Oltean +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + include/net/dsa.h | 17 +++++++++++++++++ + net/dsa/dsa2.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ + net/dsa/dsa_priv.h | 13 +++++++++++++ + net/dsa/slave.c | 32 ++++++++++++++++++++++++++++++++ + net/dsa/switch.c | 15 +++++++++++++++ + 5 files changed, 123 insertions(+) + +--- a/include/net/dsa.h ++++ b/include/net/dsa.h +@@ -291,6 +291,10 @@ struct dsa_port { + struct list_head mdbs; + + bool setup; ++ /* Master state bits, valid only on CPU ports */ ++ u8 master_admin_up:1; ++ u8 master_oper_up:1; ++ + }; + + /* TODO: ideally DSA ports would have a single dp->link_dp member, +@@ -456,6 +460,12 @@ static inline bool dsa_port_is_unused(st + return dp->type == DSA_PORT_TYPE_UNUSED; + } + ++static inline bool dsa_port_master_is_operational(struct dsa_port *dp) ++{ ++ return dsa_port_is_cpu(dp) && dp->master_admin_up && ++ dp->master_oper_up; ++} ++ + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) + { + return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; +@@ -916,6 +926,13 @@ struct dsa_switch_ops { + int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, + u16 flags); + int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); ++ ++ /* ++ * DSA master tracking operations ++ */ ++ void (*master_state_change)(struct dsa_switch *ds, ++ const struct net_device *master, ++ bool operational); + }; + + #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -1275,6 +1275,52 @@ out_unlock: + return err; + } + ++static void dsa_tree_master_state_change(struct dsa_switch_tree *dst, ++ struct net_device *master) ++{ ++ struct dsa_notifier_master_state_info info; ++ struct dsa_port *cpu_dp = master->dsa_ptr; ++ ++ info.master = master; ++ info.operational = dsa_port_master_is_operational(cpu_dp); ++ ++ dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info); ++} ++ ++void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, ++ struct net_device *master, ++ bool up) ++{ ++ struct dsa_port *cpu_dp = master->dsa_ptr; ++ bool notify = false; ++ ++ if ((dsa_port_master_is_operational(cpu_dp)) != ++ (up && cpu_dp->master_oper_up)) ++ notify = true; ++ ++ cpu_dp->master_admin_up = up; ++ ++ if (notify) ++ dsa_tree_master_state_change(dst, master); ++} ++ ++void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, ++ struct net_device *master, ++ bool up) ++{ ++ struct dsa_port *cpu_dp = master->dsa_ptr; ++ bool notify = false; ++ ++ if ((dsa_port_master_is_operational(cpu_dp)) != ++ (cpu_dp->master_admin_up && up)) ++ notify = true; ++ ++ cpu_dp->master_oper_up = up; ++ ++ if (notify) ++ dsa_tree_master_state_change(dst, master); ++} ++ + static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) + { + struct dsa_switch_tree *dst = ds->dst; +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -45,6 +45,7 @@ enum { + DSA_NOTIFIER_MRP_DEL_RING_ROLE, + DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, + DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, ++ DSA_NOTIFIER_MASTER_STATE_CHANGE, + }; + + /* DSA_NOTIFIER_AGEING_TIME */ +@@ -127,6 +128,12 @@ struct dsa_notifier_tag_8021q_vlan_info + u16 vid; + }; + ++/* DSA_NOTIFIER_MASTER_STATE_CHANGE */ ++struct dsa_notifier_master_state_info { ++ const struct net_device *master; ++ bool operational; ++}; ++ + struct dsa_switchdev_event_work { + struct dsa_switch *ds; + int port; +@@ -548,6 +555,12 @@ int dsa_tree_change_tag_proto(struct dsa + struct net_device *master, + const struct dsa_device_ops *tag_ops, + const struct dsa_device_ops *old_tag_ops); ++void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, ++ struct net_device *master, ++ bool up); ++void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, ++ struct net_device *master, ++ bool up); + int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); + void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num); + +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -2320,6 +2320,36 @@ static int dsa_slave_netdevice_event(str + err = dsa_port_lag_change(dp, info->lower_state_info); + return notifier_from_errno(err); + } ++ case NETDEV_CHANGE: ++ case NETDEV_UP: { ++ /* Track state of master port. ++ * DSA driver may require the master port (and indirectly ++ * the tagger) to be available for some special operation. ++ */ ++ if (netdev_uses_dsa(dev)) { ++ struct dsa_port *cpu_dp = dev->dsa_ptr; ++ struct dsa_switch_tree *dst = cpu_dp->ds->dst; ++ ++ /* Track when the master port is UP */ ++ dsa_tree_master_oper_state_change(dst, dev, ++ netif_oper_up(dev)); ++ ++ /* Track when the master port is ready and can accept ++ * packet. ++ * NETDEV_UP event is not enough to flag a port as ready. ++ * We also have to wait for linkwatch_do_dev to dev_activate ++ * and emit a NETDEV_CHANGE event. ++ * We check if a master port is ready by checking if the dev ++ * have a qdisc assigned and is not noop. ++ */ ++ dsa_tree_master_admin_state_change(dst, dev, ++ !qdisc_tx_is_noop(dev)); ++ ++ return NOTIFY_OK; ++ } ++ ++ return NOTIFY_DONE; ++ } + case NETDEV_GOING_DOWN: { + struct dsa_port *dp, *cpu_dp; + struct dsa_switch_tree *dst; +@@ -2331,6 +2361,8 @@ static int dsa_slave_netdevice_event(str + cpu_dp = dev->dsa_ptr; + dst = cpu_dp->ds->dst; + ++ dsa_tree_master_admin_state_change(dst, dev, false); ++ + list_for_each_entry(dp, &dst->ports, list) { + if (!dsa_is_user_port(dp->ds, dp->index)) + continue; +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -722,6 +722,18 @@ dsa_switch_mrp_del_ring_role(struct dsa_ + return 0; + } + ++static int ++dsa_switch_master_state_change(struct dsa_switch *ds, ++ struct dsa_notifier_master_state_info *info) ++{ ++ if (!ds->ops->master_state_change) ++ return 0; ++ ++ ds->ops->master_state_change(ds, info->master, info->operational); ++ ++ return 0; ++} ++ + static int dsa_switch_event(struct notifier_block *nb, + unsigned long event, void *info) + { +@@ -813,6 +825,9 @@ static int dsa_switch_event(struct notif + case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: + err = dsa_switch_tag_8021q_vlan_del(ds, info); + break; ++ case DSA_NOTIFIER_MASTER_STATE_CHANGE: ++ err = dsa_switch_master_state_change(ds, info); ++ break; + default: + err = -EOPNOTSUPP; + break; diff --git a/target/linux/generic/backport-5.15/766-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch b/target/linux/generic/backport-5.15/766-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch new file mode 100644 index 000000000..6478d580c --- /dev/null +++ b/target/linux/generic/backport-5.15/766-02-net-dsa-replay-master-state-events-in-dsa_tree_-setu.patch @@ -0,0 +1,89 @@ +From e83d56537859849f2223b90749e554831b1f3c27 Mon Sep 17 00:00:00 2001 +From: Vladimir Oltean +Date: Wed, 2 Feb 2022 01:03:21 +0100 +Subject: [PATCH 02/16] net: dsa: replay master state events in + dsa_tree_{setup,teardown}_master + +In order for switch driver to be able to make simple and reliable use of +the master tracking operations, they must also be notified of the +initial state of the DSA master, not just of the changes. This is +because they might enable certain features only during the time when +they know that the DSA master is up and running. + +Therefore, this change explicitly checks the state of the DSA master +under the same rtnl_mutex as we were holding during the +dsa_master_setup() and dsa_master_teardown() call. The idea being that +if the DSA master became operational in between the moment in which it +became a DSA master (dsa_master_setup set dev->dsa_ptr) and the moment +when we checked for the master being up, there is a chance that we +would emit a ->master_state_change() call with no actual state change. +We need to avoid that by serializing the concurrent netdevice event with +us. If the netdevice event started before, we force it to finish before +we begin, because we take rtnl_lock before making netdev_uses_dsa() +return true. So we also handle that early event and do nothing on it. +Similarly, if the dev_open() attempt is concurrent with us, it will +attempt to take the rtnl_mutex, but we're holding it. We'll see that +the master flag IFF_UP isn't set, then when we release the rtnl_mutex +we'll process the NETDEV_UP notifier. + +Signed-off-by: Vladimir Oltean +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/dsa2.c | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include "dsa_priv.h" + +@@ -1060,9 +1061,18 @@ static int dsa_tree_setup_master(struct + + list_for_each_entry(dp, &dst->ports, list) { + if (dsa_port_is_cpu(dp)) { +- err = dsa_master_setup(dp->master, dp); ++ struct net_device *master = dp->master; ++ bool admin_up = (master->flags & IFF_UP) && ++ !qdisc_tx_is_noop(master); ++ ++ err = dsa_master_setup(master, dp); + if (err) + return err; ++ ++ /* Replay master state event */ ++ dsa_tree_master_admin_state_change(dst, master, admin_up); ++ dsa_tree_master_oper_state_change(dst, master, ++ netif_oper_up(master)); + } + } + +@@ -1077,9 +1087,19 @@ static void dsa_tree_teardown_master(str + + rtnl_lock(); + +- list_for_each_entry(dp, &dst->ports, list) +- if (dsa_port_is_cpu(dp)) +- dsa_master_teardown(dp->master); ++ list_for_each_entry(dp, &dst->ports, list) { ++ if (dsa_port_is_cpu(dp)) { ++ struct net_device *master = dp->master; ++ ++ /* Synthesizing an "admin down" state is sufficient for ++ * the switches to get a notification if the master is ++ * currently up and running. ++ */ ++ dsa_tree_master_admin_state_change(dst, master, false); ++ ++ dsa_master_teardown(master); ++ } ++ } + + rtnl_unlock(); + } diff --git a/target/linux/generic/backport-5.15/766-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch b/target/linux/generic/backport-5.15/766-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch new file mode 100644 index 000000000..82c94b385 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-03-net-dsa-tag_qca-convert-to-FIELD-macro.patch @@ -0,0 +1,86 @@ +From 6b0458299297ca4ab6fb295800e29a4e501d50c1 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:22 +0100 +Subject: [PATCH 03/16] net: dsa: tag_qca: convert to FIELD macro + +Convert driver to FIELD macro to drop redundant define. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/tag_qca.c | 34 +++++++++++++++------------------- + 1 file changed, 15 insertions(+), 19 deletions(-) + +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -4,29 +4,24 @@ + */ + + #include ++#include + + #include "dsa_priv.h" + + #define QCA_HDR_LEN 2 + #define QCA_HDR_VERSION 0x2 + +-#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14) +-#define QCA_HDR_RECV_VERSION_S 14 +-#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11) +-#define QCA_HDR_RECV_PRIORITY_S 11 +-#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6) +-#define QCA_HDR_RECV_TYPE_S 6 ++#define QCA_HDR_RECV_VERSION GENMASK(15, 14) ++#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) ++#define QCA_HDR_RECV_TYPE GENMASK(10, 6) + #define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) +-#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) ++#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) + +-#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14) +-#define QCA_HDR_XMIT_VERSION_S 14 +-#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11) +-#define QCA_HDR_XMIT_PRIORITY_S 11 +-#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8) +-#define QCA_HDR_XMIT_CONTROL_S 8 ++#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) ++#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) ++#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) + #define QCA_HDR_XMIT_FROM_CPU BIT(7) +-#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0) ++#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) + + static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) + { +@@ -40,8 +35,9 @@ static struct sk_buff *qca_tag_xmit(stru + phdr = dsa_etype_header_pos_tx(skb); + + /* Set the version field, and set destination port information */ +- hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S | +- QCA_HDR_XMIT_FROM_CPU | BIT(dp->index); ++ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); ++ hdr |= QCA_HDR_XMIT_FROM_CPU; ++ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index)); + + *phdr = htons(hdr); + +@@ -62,7 +58,7 @@ static struct sk_buff *qca_tag_rcv(struc + hdr = ntohs(*phdr); + + /* Make sure the version is correct */ +- ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S; ++ ver = FIELD_GET(QCA_HDR_RECV_VERSION, hdr); + if (unlikely(ver != QCA_HDR_VERSION)) + return NULL; + +@@ -71,7 +67,7 @@ static struct sk_buff *qca_tag_rcv(struc + dsa_strip_etype_header(skb, QCA_HDR_LEN); + + /* Get source port information */ +- port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK); ++ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr); + + skb->dev = dsa_master_find_slave(dev, 0, port); + if (!skb->dev) diff --git a/target/linux/generic/backport-5.15/766-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch b/target/linux/generic/backport-5.15/766-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch new file mode 100644 index 000000000..c1e74ceee --- /dev/null +++ b/target/linux/generic/backport-5.15/766-04-net-dsa-tag_qca-move-define-to-include-linux-dsa.patch @@ -0,0 +1,71 @@ +From 3ec762fb13c7e7273800b94c80db1c2cc37590d1 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:23 +0100 +Subject: [PATCH 04/16] net: dsa: tag_qca: move define to include linux/dsa + +Move tag_qca define to include dir linux/dsa as the qca8k require access +to the tagger define to support in-band mdio read/write using ethernet +packet. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + include/linux/dsa/tag_qca.h | 21 +++++++++++++++++++++ + net/dsa/tag_qca.c | 16 +--------------- + 2 files changed, 22 insertions(+), 15 deletions(-) + create mode 100644 include/linux/dsa/tag_qca.h + +--- /dev/null ++++ b/include/linux/dsa/tag_qca.h +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#ifndef __TAG_QCA_H ++#define __TAG_QCA_H ++ ++#define QCA_HDR_LEN 2 ++#define QCA_HDR_VERSION 0x2 ++ ++#define QCA_HDR_RECV_VERSION GENMASK(15, 14) ++#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) ++#define QCA_HDR_RECV_TYPE GENMASK(10, 6) ++#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) ++#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) ++ ++#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) ++#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) ++#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) ++#define QCA_HDR_XMIT_FROM_CPU BIT(7) ++#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) ++ ++#endif /* __TAG_QCA_H */ +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -5,24 +5,10 @@ + + #include + #include ++#include + + #include "dsa_priv.h" + +-#define QCA_HDR_LEN 2 +-#define QCA_HDR_VERSION 0x2 +- +-#define QCA_HDR_RECV_VERSION GENMASK(15, 14) +-#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) +-#define QCA_HDR_RECV_TYPE GENMASK(10, 6) +-#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) +-#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) +- +-#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) +-#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) +-#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) +-#define QCA_HDR_XMIT_FROM_CPU BIT(7) +-#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) +- + static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct dsa_port *dp = dsa_slave_to_port(dev); diff --git a/target/linux/generic/backport-5.15/766-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch b/target/linux/generic/backport-5.15/766-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch new file mode 100644 index 000000000..9394a0dab --- /dev/null +++ b/target/linux/generic/backport-5.15/766-05-net-dsa-tag_qca-enable-promisc_on_master-flag.patch @@ -0,0 +1,27 @@ +From 101c04c3463b87061e6a3d4f72c1bc57670685a6 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:24 +0100 +Subject: [PATCH 05/16] net: dsa: tag_qca: enable promisc_on_master flag + +Ethernet MDIO packets are non-standard and DSA master expects the first +6 octets to be the MAC DA. To address these kind of packet, enable +promisc_on_master flag for the tagger. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + net/dsa/tag_qca.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -68,6 +68,7 @@ static const struct dsa_device_ops qca_n + .xmit = qca_tag_xmit, + .rcv = qca_tag_rcv, + .needed_headroom = QCA_HDR_LEN, ++ .promisc_on_master = true, + }; + + MODULE_LICENSE("GPL"); diff --git a/target/linux/generic/backport-5.15/766-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch b/target/linux/generic/backport-5.15/766-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch new file mode 100644 index 000000000..459454e03 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-06-net-dsa-tag_qca-add-define-for-handling-mgmt-Etherne.patch @@ -0,0 +1,110 @@ +From c2ee8181fddb293d296477f60b3eb4fa3ce4e1a6 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:25 +0100 +Subject: [PATCH 06/16] net: dsa: tag_qca: add define for handling mgmt + Ethernet packet + +Add all the required define to prepare support for mgmt read/write in +Ethernet packet. Any packet of this type has to be dropped as the only +use of these special packet is receive ack for an mgmt write request or +receive data for an mgmt read request. +A struct is used that emulates the Ethernet header but is used for a +different purpose. + +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + include/linux/dsa/tag_qca.h | 44 +++++++++++++++++++++++++++++++++++++ + net/dsa/tag_qca.c | 15 ++++++++++--- + 2 files changed, 56 insertions(+), 3 deletions(-) + +--- a/include/linux/dsa/tag_qca.h ++++ b/include/linux/dsa/tag_qca.h +@@ -12,10 +12,54 @@ + #define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) + #define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) + ++/* Packet type for recv */ ++#define QCA_HDR_RECV_TYPE_NORMAL 0x0 ++#define QCA_HDR_RECV_TYPE_MIB 0x1 ++#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2 ++ + #define QCA_HDR_XMIT_VERSION GENMASK(15, 14) + #define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) + #define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) + #define QCA_HDR_XMIT_FROM_CPU BIT(7) + #define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) + ++/* Packet type for xmit */ ++#define QCA_HDR_XMIT_TYPE_NORMAL 0x0 ++#define QCA_HDR_XMIT_TYPE_RW_REG 0x1 ++ ++/* Check code for a valid mgmt packet. Switch will ignore the packet ++ * with this wrong. ++ */ ++#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5 ++ ++/* Specific define for in-band MDIO read/write with Ethernet packet */ ++#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */ ++#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */ ++#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */ ++#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \ ++ QCA_HDR_MGMT_COMMAND_LEN + \ ++ QCA_HDR_MGMT_DATA1_LEN) ++ ++#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */ ++#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */ ++ ++#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \ ++ QCA_HDR_LEN + \ ++ QCA_HDR_MGMT_DATA2_LEN + \ ++ QCA_HDR_MGMT_PADDING_LEN) ++ ++#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */ ++#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */ ++#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */ ++#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */ ++#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */ ++ ++/* Special struct emulating a Ethernet header */ ++struct qca_mgmt_ethhdr { ++ u32 command; /* command bit 31:0 */ ++ u32 seq; /* seq 63:32 */ ++ u32 mdio_data; /* first 4byte mdio */ ++ __be16 hdr; /* qca hdr */ ++} __packed; ++ + #endif /* __TAG_QCA_H */ +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -32,10 +32,12 @@ static struct sk_buff *qca_tag_xmit(stru + + static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) + { +- u8 ver; +- u16 hdr; +- int port; ++ u8 ver, pk_type; + __be16 *phdr; ++ int port; ++ u16 hdr; ++ ++ BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); + + if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) + return NULL; +@@ -48,6 +50,13 @@ static struct sk_buff *qca_tag_rcv(struc + if (unlikely(ver != QCA_HDR_VERSION)) + return NULL; + ++ /* Get pk type */ ++ pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); ++ ++ /* Ethernet MDIO read/write packet */ ++ if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) ++ return NULL; ++ + /* Remove QCA tag and recalculate checksum */ + skb_pull_rcsum(skb, QCA_HDR_LEN); + dsa_strip_etype_header(skb, QCA_HDR_LEN); diff --git a/target/linux/generic/backport-5.15/766-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch b/target/linux/generic/backport-5.15/766-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch new file mode 100644 index 000000000..7e5dc6573 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-07-net-dsa-tag_qca-add-define-for-handling-MIB-packet.patch @@ -0,0 +1,45 @@ +From 18be654a4345f7d937b4bfbad74bea8093e3a93c Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:26 +0100 +Subject: [PATCH 07/16] net: dsa: tag_qca: add define for handling MIB packet + +Add struct to correctly parse a mib Ethernet packet. + +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + include/linux/dsa/tag_qca.h | 10 ++++++++++ + net/dsa/tag_qca.c | 4 ++++ + 2 files changed, 14 insertions(+) + +--- a/include/linux/dsa/tag_qca.h ++++ b/include/linux/dsa/tag_qca.h +@@ -62,4 +62,14 @@ struct qca_mgmt_ethhdr { + __be16 hdr; /* qca hdr */ + } __packed; + ++enum mdio_cmd { ++ MDIO_WRITE = 0x0, ++ MDIO_READ ++}; ++ ++struct mib_ethhdr { ++ u32 data[3]; /* first 3 mib counter */ ++ __be16 hdr; /* qca hdr */ ++} __packed; ++ + #endif /* __TAG_QCA_H */ +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -57,6 +57,10 @@ static struct sk_buff *qca_tag_rcv(struc + if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) + return NULL; + ++ /* Ethernet MIB counter packet */ ++ if (pk_type == QCA_HDR_RECV_TYPE_MIB) ++ return NULL; ++ + /* Remove QCA tag and recalculate checksum */ + skb_pull_rcsum(skb, QCA_HDR_LEN); + dsa_strip_etype_header(skb, QCA_HDR_LEN); diff --git a/target/linux/generic/backport-5.15/766-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch b/target/linux/generic/backport-5.15/766-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch new file mode 100644 index 000000000..ad25da30e --- /dev/null +++ b/target/linux/generic/backport-5.15/766-08-net-dsa-tag_qca-add-support-for-handling-mgmt-and-MI.patch @@ -0,0 +1,116 @@ +From 31eb6b4386ad91930417e3f5c8157a4b5e31cbd5 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:27 +0100 +Subject: [PATCH 08/16] net: dsa: tag_qca: add support for handling mgmt and + MIB Ethernet packet + +Add connect/disconnect helper to assign private struct to the DSA switch. +Add support for Ethernet mgmt and MIB if the DSA driver provide an handler +to correctly parse and elaborate the data. + +Signed-off-by: Ansuel Smith +Reviewed-by: Vladimir Oltean +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + include/linux/dsa/tag_qca.h | 7 +++++++ + net/dsa/tag_qca.c | 39 ++++++++++++++++++++++++++++++++++--- + 2 files changed, 43 insertions(+), 3 deletions(-) + +--- a/include/linux/dsa/tag_qca.h ++++ b/include/linux/dsa/tag_qca.h +@@ -72,4 +72,11 @@ struct mib_ethhdr { + __be16 hdr; /* qca hdr */ + } __packed; + ++struct qca_tagger_data { ++ void (*rw_reg_ack_handler)(struct dsa_switch *ds, ++ struct sk_buff *skb); ++ void (*mib_autocast_handler)(struct dsa_switch *ds, ++ struct sk_buff *skb); ++}; ++ + #endif /* __TAG_QCA_H */ +--- a/net/dsa/tag_qca.c ++++ b/net/dsa/tag_qca.c +@@ -5,6 +5,7 @@ + + #include + #include ++#include + #include + + #include "dsa_priv.h" +@@ -32,6 +33,9 @@ static struct sk_buff *qca_tag_xmit(stru + + static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) + { ++ struct qca_tagger_data *tagger_data; ++ struct dsa_port *dp = dev->dsa_ptr; ++ struct dsa_switch *ds = dp->ds; + u8 ver, pk_type; + __be16 *phdr; + int port; +@@ -39,6 +43,8 @@ static struct sk_buff *qca_tag_rcv(struc + + BUILD_BUG_ON(sizeof(struct qca_mgmt_ethhdr) != QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); + ++ tagger_data = ds->tagger_data; ++ + if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) + return NULL; + +@@ -53,13 +59,19 @@ static struct sk_buff *qca_tag_rcv(struc + /* Get pk type */ + pk_type = FIELD_GET(QCA_HDR_RECV_TYPE, hdr); + +- /* Ethernet MDIO read/write packet */ +- if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) ++ /* Ethernet mgmt read/write packet */ ++ if (pk_type == QCA_HDR_RECV_TYPE_RW_REG_ACK) { ++ if (likely(tagger_data->rw_reg_ack_handler)) ++ tagger_data->rw_reg_ack_handler(ds, skb); + return NULL; ++ } + + /* Ethernet MIB counter packet */ +- if (pk_type == QCA_HDR_RECV_TYPE_MIB) ++ if (pk_type == QCA_HDR_RECV_TYPE_MIB) { ++ if (likely(tagger_data->mib_autocast_handler)) ++ tagger_data->mib_autocast_handler(ds, skb); + return NULL; ++ } + + /* Remove QCA tag and recalculate checksum */ + skb_pull_rcsum(skb, QCA_HDR_LEN); +@@ -75,9 +87,30 @@ static struct sk_buff *qca_tag_rcv(struc + return skb; + } + ++static int qca_tag_connect(struct dsa_switch *ds) ++{ ++ struct qca_tagger_data *tagger_data; ++ ++ tagger_data = kzalloc(sizeof(*tagger_data), GFP_KERNEL); ++ if (!tagger_data) ++ return -ENOMEM; ++ ++ ds->tagger_data = tagger_data; ++ ++ return 0; ++} ++ ++static void qca_tag_disconnect(struct dsa_switch *ds) ++{ ++ kfree(ds->tagger_data); ++ ds->tagger_data = NULL; ++} ++ + static const struct dsa_device_ops qca_netdev_ops = { + .name = "qca", + .proto = DSA_TAG_PROTO_QCA, ++ .connect = qca_tag_connect, ++ .disconnect = qca_tag_disconnect, + .xmit = qca_tag_xmit, + .rcv = qca_tag_rcv, + .needed_headroom = QCA_HDR_LEN, diff --git a/target/linux/generic/backport-5.15/766-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch b/target/linux/generic/backport-5.15/766-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch new file mode 100644 index 000000000..ff8fdca51 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-09-net-dsa-qca8k-add-tracking-state-of-master-port.patch @@ -0,0 +1,67 @@ +From cddbec19466a1dfb4d45ddd507d9f09f991d54ae Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:28 +0100 +Subject: [PATCH 09/16] net: dsa: qca8k: add tracking state of master port + +MDIO/MIB Ethernet require the master port and the tagger availabale to +correctly work. Use the new api master_state_change to track when master +is operational or not and set a bool in qca8k_priv. +We cache the first cached master available and we check if other cpu +port are operational when the cached one goes down. +This cached master will later be used by mdio read/write and mib request to +correctly use the working function. + +qca8k implementation for MDIO/MIB Ethernet is bad. CPU port0 is the only +one that answers with the ack packet or sends MIB Ethernet packets. For +this reason the master_state_change ignore CPU port6 and only checks +CPU port0 if it's operational and enables this mode. + +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 15 +++++++++++++++ + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 16 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -2382,6 +2382,20 @@ qca8k_port_lag_leave(struct dsa_switch * + return qca8k_lag_refresh_portmap(ds, port, lag, true); + } + ++static void ++qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, ++ bool operational) ++{ ++ struct dsa_port *dp = master->dsa_ptr; ++ struct qca8k_priv *priv = ds->priv; ++ ++ /* Ethernet MIB/MDIO is only supported for CPU port 0 */ ++ if (dp->index != 0) ++ return; ++ ++ priv->mgmt_master = operational ? (struct net_device *)master : NULL; ++} ++ + static const struct dsa_switch_ops qca8k_switch_ops = { + .get_tag_protocol = qca8k_get_tag_protocol, + .setup = qca8k_setup, +@@ -2417,6 +2431,7 @@ static const struct dsa_switch_ops qca8k + .get_phy_flags = qca8k_get_phy_flags, + .port_lag_join = qca8k_port_lag_join, + .port_lag_leave = qca8k_port_lag_leave, ++ .master_state_change = qca8k_master_change, + }; + + static int qca8k_read_switch_id(struct qca8k_priv *priv) +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -353,6 +353,7 @@ struct qca8k_priv { + struct dsa_switch_ops ops; + struct gpio_desc *reset_gpio; + unsigned int port_mtu[QCA8K_NUM_PORTS]; ++ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ + }; + + struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-5.15/766-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch b/target/linux/generic/backport-5.15/766-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch new file mode 100644 index 000000000..43656ad79 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-10-net-dsa-qca8k-add-support-for-mgmt-read-write-in-Eth.patch @@ -0,0 +1,363 @@ +From 5950c7c0a68c915b336c70f79388626e2d576ab7 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:29 +0100 +Subject: [PATCH 10/16] net: dsa: qca8k: add support for mgmt read/write in + Ethernet packet + +Add qca8k side support for mgmt read/write in Ethernet packet. +qca8k supports some specially crafted Ethernet packet that can be used +for mgmt read/write instead of the legacy method uart/internal mdio. +This add support for the qca8k side to craft the packet and enqueue it. +Each port and the qca8k_priv have a special struct to put data in it. +The completion API is used to wait for the packet to be received back +with the requested data. + +The various steps are: +1. Craft the special packet with the qca hdr set to mgmt read/write + mode. +2. Set the lock in the dedicated mgmt struct. +3. Increment the seq number and set it in the mgmt pkt +4. Reinit the completion. +5. Enqueue the packet. +6. Wait the packet to be received. +7. Use the data set by the tagger to complete the mdio operation. + +If the completion timeouts or the ack value is not true, the legacy +mdio way is used. + +It has to be considered that in the initial setup mdio is still used and +mdio is still used until DSA is ready to accept and tag packet. + +tag_proto_connect() is used to fill the required handler for the tagger +to correctly parse and elaborate the special Ethernet mdio packet. + +Locking is added to qca8k_master_change() to make sure no mgmt Ethernet +are in progress. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 225 ++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 13 +++ + 2 files changed, 238 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #include "qca8k.h" + +@@ -170,6 +171,194 @@ qca8k_rmw(struct qca8k_priv *priv, u32 r + return regmap_update_bits(priv->regmap, reg, mask, write_val); + } + ++static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) ++{ ++ struct qca8k_mgmt_eth_data *mgmt_eth_data; ++ struct qca8k_priv *priv = ds->priv; ++ struct qca_mgmt_ethhdr *mgmt_ethhdr; ++ u8 len, cmd; ++ ++ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); ++ mgmt_eth_data = &priv->mgmt_eth_data; ++ ++ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); ++ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); ++ ++ /* Make sure the seq match the requested packet */ ++ if (mgmt_ethhdr->seq == mgmt_eth_data->seq) ++ mgmt_eth_data->ack = true; ++ ++ if (cmd == MDIO_READ) { ++ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; ++ ++ /* Get the rest of the 12 byte of data */ ++ if (len > QCA_HDR_MGMT_DATA1_LEN) ++ memcpy(mgmt_eth_data->data + 1, skb->data, ++ QCA_HDR_MGMT_DATA2_LEN); ++ } ++ ++ complete(&mgmt_eth_data->rw_done); ++} ++ ++static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, ++ int priority) ++{ ++ struct qca_mgmt_ethhdr *mgmt_ethhdr; ++ struct sk_buff *skb; ++ u16 hdr; ++ ++ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); ++ if (!skb) ++ return NULL; ++ ++ skb_reset_mac_header(skb); ++ skb_set_network_header(skb, skb->len); ++ ++ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN); ++ ++ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION); ++ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority); ++ hdr |= QCA_HDR_XMIT_FROM_CPU; ++ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); ++ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); ++ ++ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); ++ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, 4); ++ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); ++ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, ++ QCA_HDR_MGMT_CHECK_CODE_VAL); ++ ++ if (cmd == MDIO_WRITE) ++ mgmt_ethhdr->mdio_data = *val; ++ ++ mgmt_ethhdr->hdr = htons(hdr); ++ ++ skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); ++ ++ return skb; ++} ++ ++static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) ++{ ++ struct qca_mgmt_ethhdr *mgmt_ethhdr; ++ ++ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; ++ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); ++} ++ ++static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val) ++{ ++ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; ++ struct sk_buff *skb; ++ bool ack; ++ int ret; ++ ++ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, ++ QCA8K_ETHERNET_MDIO_PRIORITY); ++ if (!skb) ++ return -ENOMEM; ++ ++ mutex_lock(&mgmt_eth_data->mutex); ++ ++ /* Check mgmt_master if is operational */ ++ if (!priv->mgmt_master) { ++ kfree_skb(skb); ++ mutex_unlock(&mgmt_eth_data->mutex); ++ return -EINVAL; ++ } ++ ++ skb->dev = priv->mgmt_master; ++ ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the mdio pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(skb); ++ ++ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); ++ ++ *val = mgmt_eth_data->data[0]; ++ ack = mgmt_eth_data->ack; ++ ++ mutex_unlock(&mgmt_eth_data->mutex); ++ ++ if (ret <= 0) ++ return -ETIMEDOUT; ++ ++ if (!ack) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 val) ++{ ++ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; ++ struct sk_buff *skb; ++ bool ack; ++ int ret; ++ ++ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, &val, ++ QCA8K_ETHERNET_MDIO_PRIORITY); ++ if (!skb) ++ return -ENOMEM; ++ ++ mutex_lock(&mgmt_eth_data->mutex); ++ ++ /* Check mgmt_master if is operational */ ++ if (!priv->mgmt_master) { ++ kfree_skb(skb); ++ mutex_unlock(&mgmt_eth_data->mutex); ++ return -EINVAL; ++ } ++ ++ skb->dev = priv->mgmt_master; ++ ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the mdio pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(skb); ++ ++ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); ++ ++ ack = mgmt_eth_data->ack; ++ ++ mutex_unlock(&mgmt_eth_data->mutex); ++ ++ if (ret <= 0) ++ return -ETIMEDOUT; ++ ++ if (!ack) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int ++qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) ++{ ++ u32 val = 0; ++ int ret; ++ ++ ret = qca8k_read_eth(priv, reg, &val); ++ if (ret) ++ return ret; ++ ++ val &= ~mask; ++ val |= write_val; ++ ++ return qca8k_write_eth(priv, reg, val); ++} ++ + static int + qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) + { +@@ -178,6 +367,9 @@ qca8k_regmap_read(void *ctx, uint32_t re + u16 r1, r2, page; + int ret; + ++ if (!qca8k_read_eth(priv, reg, val)) ++ return 0; ++ + qca8k_split_addr(reg, &r1, &r2, &page); + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); +@@ -201,6 +393,9 @@ qca8k_regmap_write(void *ctx, uint32_t r + u16 r1, r2, page; + int ret; + ++ if (!qca8k_write_eth(priv, reg, val)) ++ return 0; ++ + qca8k_split_addr(reg, &r1, &r2, &page); + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); +@@ -225,6 +420,9 @@ qca8k_regmap_update_bits(void *ctx, uint + u32 val; + int ret; + ++ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val)) ++ return 0; ++ + qca8k_split_addr(reg, &r1, &r2, &page); + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); +@@ -2393,7 +2591,30 @@ qca8k_master_change(struct dsa_switch *d + if (dp->index != 0) + return; + ++ mutex_lock(&priv->mgmt_eth_data.mutex); ++ + priv->mgmt_master = operational ? (struct net_device *)master : NULL; ++ ++ mutex_unlock(&priv->mgmt_eth_data.mutex); ++} ++ ++static int qca8k_connect_tag_protocol(struct dsa_switch *ds, ++ enum dsa_tag_protocol proto) ++{ ++ struct qca_tagger_data *tagger_data; ++ ++ switch (proto) { ++ case DSA_TAG_PROTO_QCA: ++ tagger_data = ds->tagger_data; ++ ++ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; ++ ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; + } + + static const struct dsa_switch_ops qca8k_switch_ops = { +@@ -2432,6 +2653,7 @@ static const struct dsa_switch_ops qca8k + .port_lag_join = qca8k_port_lag_join, + .port_lag_leave = qca8k_port_lag_leave, + .master_state_change = qca8k_master_change, ++ .connect_tag_protocol = qca8k_connect_tag_protocol, + }; + + static int qca8k_read_switch_id(struct qca8k_priv *priv) +@@ -2511,6 +2733,9 @@ qca8k_sw_probe(struct mdio_device *mdiod + if (!priv->ds) + return -ENOMEM; + ++ mutex_init(&priv->mgmt_eth_data.mutex); ++ init_completion(&priv->mgmt_eth_data.rw_done); ++ + priv->ds->dev = &mdiodev->dev; + priv->ds->num_ports = QCA8K_NUM_PORTS; + priv->ds->priv = priv; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -11,6 +11,10 @@ + #include + #include + #include ++#include ++ ++#define QCA8K_ETHERNET_MDIO_PRIORITY 7 ++#define QCA8K_ETHERNET_TIMEOUT 100 + + #define QCA8K_NUM_PORTS 7 + #define QCA8K_NUM_CPU_PORTS 2 +@@ -328,6 +332,14 @@ enum { + QCA8K_CPU_PORT6, + }; + ++struct qca8k_mgmt_eth_data { ++ struct completion rw_done; ++ struct mutex mutex; /* Enforce one mdio read/write at time */ ++ bool ack; ++ u32 seq; ++ u32 data[4]; ++}; ++ + struct qca8k_ports_config { + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; +@@ -354,6 +366,7 @@ struct qca8k_priv { + struct gpio_desc *reset_gpio; + unsigned int port_mtu[QCA8K_NUM_PORTS]; + struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ ++ struct qca8k_mgmt_eth_data mgmt_eth_data; + }; + + struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-5.15/766-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch b/target/linux/generic/backport-5.15/766-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch new file mode 100644 index 000000000..c4bc2b364 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-11-net-dsa-qca8k-add-support-for-mib-autocast-in-Ethern.patch @@ -0,0 +1,226 @@ +From 5c957c7ca78cce5e4b96866722b0115bd758d945 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:30 +0100 +Subject: [PATCH 11/16] net: dsa: qca8k: add support for mib autocast in + Ethernet packet + +The switch can autocast MIB counter using Ethernet packet. +Add support for this and provide a handler for the tagger. +The switch will send packet with MIB counter for each port, the switch +will use completion API to wait for the correct packet to be received +and will complete the task only when each packet is received. +Although the handler will drop all the other packet, we still have to +consume each MIB packet to complete the request. This is done to prevent +mixed data with concurrent ethtool request. + +connect_tag_protocol() is used to add the handler to the tag_qca tagger, +master_state_change() use the MIB lock to make sure no MIB Ethernet is +in progress. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 106 +++++++++++++++++++++++++++++++++++++++- + drivers/net/dsa/qca8k.h | 17 ++++++- + 2 files changed, 121 insertions(+), 2 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -830,7 +830,10 @@ qca8k_mib_init(struct qca8k_priv *priv) + int ret; + + mutex_lock(&priv->reg_mutex); +- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); ++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, ++ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, ++ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) | ++ QCA8K_MIB_BUSY); + if (ret) + goto exit; + +@@ -1901,6 +1904,97 @@ qca8k_get_strings(struct dsa_switch *ds, + ETH_GSTRING_LEN); + } + ++static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) ++{ ++ const struct qca8k_match_data *match_data; ++ struct qca8k_mib_eth_data *mib_eth_data; ++ struct qca8k_priv *priv = ds->priv; ++ const struct qca8k_mib_desc *mib; ++ struct mib_ethhdr *mib_ethhdr; ++ int i, mib_len, offset = 0; ++ u64 *data; ++ u8 port; ++ ++ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); ++ mib_eth_data = &priv->mib_eth_data; ++ ++ /* The switch autocast every port. Ignore other packet and ++ * parse only the requested one. ++ */ ++ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); ++ if (port != mib_eth_data->req_port) ++ goto exit; ++ ++ match_data = device_get_match_data(priv->dev); ++ data = mib_eth_data->data; ++ ++ for (i = 0; i < match_data->mib_count; i++) { ++ mib = &ar8327_mib[i]; ++ ++ /* First 3 mib are present in the skb head */ ++ if (i < 3) { ++ data[i] = mib_ethhdr->data[i]; ++ continue; ++ } ++ ++ mib_len = sizeof(uint32_t); ++ ++ /* Some mib are 64 bit wide */ ++ if (mib->size == 2) ++ mib_len = sizeof(uint64_t); ++ ++ /* Copy the mib value from packet to the */ ++ memcpy(data + i, skb->data + offset, mib_len); ++ ++ /* Set the offset for the next mib */ ++ offset += mib_len; ++ } ++ ++exit: ++ /* Complete on receiving all the mib packet */ ++ if (refcount_dec_and_test(&mib_eth_data->port_parsed)) ++ complete(&mib_eth_data->rw_done); ++} ++ ++static int ++qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) ++{ ++ struct dsa_port *dp = dsa_to_port(ds, port); ++ struct qca8k_mib_eth_data *mib_eth_data; ++ struct qca8k_priv *priv = ds->priv; ++ int ret; ++ ++ mib_eth_data = &priv->mib_eth_data; ++ ++ mutex_lock(&mib_eth_data->mutex); ++ ++ reinit_completion(&mib_eth_data->rw_done); ++ ++ mib_eth_data->req_port = dp->index; ++ mib_eth_data->data = data; ++ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); ++ ++ mutex_lock(&priv->reg_mutex); ++ ++ /* Send mib autocast request */ ++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, ++ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, ++ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | ++ QCA8K_MIB_BUSY); ++ ++ mutex_unlock(&priv->reg_mutex); ++ ++ if (ret) ++ goto exit; ++ ++ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); ++ ++exit: ++ mutex_unlock(&mib_eth_data->mutex); ++ ++ return ret; ++} ++ + static void + qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +@@ -1912,6 +2006,10 @@ qca8k_get_ethtool_stats(struct dsa_switc + u32 hi = 0; + int ret; + ++ if (priv->mgmt_master && ++ qca8k_get_ethtool_stats_eth(ds, port, data) > 0) ++ return; ++ + match_data = of_device_get_match_data(priv->dev); + + for (i = 0; i < match_data->mib_count; i++) { +@@ -2592,9 +2690,11 @@ qca8k_master_change(struct dsa_switch *d + return; + + mutex_lock(&priv->mgmt_eth_data.mutex); ++ mutex_lock(&priv->mib_eth_data.mutex); + + priv->mgmt_master = operational ? (struct net_device *)master : NULL; + ++ mutex_unlock(&priv->mib_eth_data.mutex); + mutex_unlock(&priv->mgmt_eth_data.mutex); + } + +@@ -2608,6 +2708,7 @@ static int qca8k_connect_tag_protocol(st + tagger_data = ds->tagger_data; + + tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler; ++ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler; + + break; + default: +@@ -2736,6 +2837,9 @@ qca8k_sw_probe(struct mdio_device *mdiod + mutex_init(&priv->mgmt_eth_data.mutex); + init_completion(&priv->mgmt_eth_data.rw_done); + ++ mutex_init(&priv->mib_eth_data.mutex); ++ init_completion(&priv->mib_eth_data.rw_done); ++ + priv->ds->dev = &mdiodev->dev; + priv->ds->num_ports = QCA8K_NUM_PORTS; + priv->ds->priv = priv; +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -67,7 +67,7 @@ + #define QCA8K_REG_MODULE_EN 0x030 + #define QCA8K_MODULE_EN_MIB BIT(0) + #define QCA8K_REG_MIB 0x034 +-#define QCA8K_MIB_FLUSH BIT(24) ++#define QCA8K_MIB_FUNC GENMASK(26, 24) + #define QCA8K_MIB_CPU_KEEP BIT(20) + #define QCA8K_MIB_BUSY BIT(17) + #define QCA8K_MDIO_MASTER_CTRL 0x3c +@@ -317,6 +317,12 @@ enum qca8k_vlan_cmd { + QCA8K_VLAN_READ = 6, + }; + ++enum qca8k_mid_cmd { ++ QCA8K_MIB_FLUSH = 1, ++ QCA8K_MIB_FLUSH_PORT = 2, ++ QCA8K_MIB_CAST = 3, ++}; ++ + struct ar8xxx_port_status { + int enabled; + }; +@@ -340,6 +346,14 @@ struct qca8k_mgmt_eth_data { + u32 data[4]; + }; + ++struct qca8k_mib_eth_data { ++ struct completion rw_done; ++ struct mutex mutex; /* Process one command at time */ ++ refcount_t port_parsed; /* Counter to track parsed port */ ++ u8 req_port; ++ u64 *data; /* pointer to ethtool data */ ++}; ++ + struct qca8k_ports_config { + bool sgmii_rx_clk_falling_edge; + bool sgmii_tx_clk_falling_edge; +@@ -367,6 +381,7 @@ struct qca8k_priv { + unsigned int port_mtu[QCA8K_NUM_PORTS]; + struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ + struct qca8k_mgmt_eth_data mgmt_eth_data; ++ struct qca8k_mib_eth_data mib_eth_data; + }; + + struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-5.15/766-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch b/target/linux/generic/backport-5.15/766-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch new file mode 100644 index 000000000..f5899eb59 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-12-net-dsa-qca8k-add-support-for-phy-read-write-with-mg.patch @@ -0,0 +1,287 @@ +From 2cd5485663847d468dc207b3ff85fb1fab44d97f Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:31 +0100 +Subject: [PATCH 12/16] net: dsa: qca8k: add support for phy read/write with + mgmt Ethernet + +Use mgmt Ethernet also for phy read/write if availabale. Use a different +seq number to make sure we receive the correct packet. +On any error, we fallback to the legacy mdio read/write. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 216 ++++++++++++++++++++++++++++++++++++++++ + drivers/net/dsa/qca8k.h | 1 + + 2 files changed, 217 insertions(+) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -867,6 +867,199 @@ qca8k_port_set_status(struct qca8k_priv + regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); + } + ++static int ++qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, ++ struct sk_buff *read_skb, u32 *val) ++{ ++ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); ++ bool ack; ++ int ret; ++ ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the copy pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(skb); ++ ++ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ QCA8K_ETHERNET_TIMEOUT); ++ ++ ack = mgmt_eth_data->ack; ++ ++ if (ret <= 0) ++ return -ETIMEDOUT; ++ ++ if (!ack) ++ return -EINVAL; ++ ++ *val = mgmt_eth_data->data[0]; ++ ++ return 0; ++} ++ ++static int ++qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, ++ int regnum, u16 data) ++{ ++ struct sk_buff *write_skb, *clear_skb, *read_skb; ++ struct qca8k_mgmt_eth_data *mgmt_eth_data; ++ u32 write_val, clear_val = 0, val; ++ struct net_device *mgmt_master; ++ int ret, ret1; ++ bool ack; ++ ++ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) ++ return -EINVAL; ++ ++ mgmt_eth_data = &priv->mgmt_eth_data; ++ ++ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | ++ QCA8K_MDIO_MASTER_PHY_ADDR(phy) | ++ QCA8K_MDIO_MASTER_REG_ADDR(regnum); ++ ++ if (read) { ++ write_val |= QCA8K_MDIO_MASTER_READ; ++ } else { ++ write_val |= QCA8K_MDIO_MASTER_WRITE; ++ write_val |= QCA8K_MDIO_MASTER_DATA(data); ++ } ++ ++ /* Prealloc all the needed skb before the lock */ ++ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, ++ &write_val, QCA8K_ETHERNET_PHY_PRIORITY); ++ if (!write_skb) ++ return -ENOMEM; ++ ++ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, ++ &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); ++ if (!write_skb) { ++ ret = -ENOMEM; ++ goto err_clear_skb; ++ } ++ ++ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, ++ &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); ++ if (!write_skb) { ++ ret = -ENOMEM; ++ goto err_read_skb; ++ } ++ ++ /* Actually start the request: ++ * 1. Send mdio master packet ++ * 2. Busy Wait for mdio master command ++ * 3. Get the data if we are reading ++ * 4. Reset the mdio master (even with error) ++ */ ++ mutex_lock(&mgmt_eth_data->mutex); ++ ++ /* Check if mgmt_master is operational */ ++ mgmt_master = priv->mgmt_master; ++ if (!mgmt_master) { ++ mutex_unlock(&mgmt_eth_data->mutex); ++ ret = -EINVAL; ++ goto err_mgmt_master; ++ } ++ ++ read_skb->dev = mgmt_master; ++ clear_skb->dev = mgmt_master; ++ write_skb->dev = mgmt_master; ++ ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the write pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(write_skb); ++ ++ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ QCA8K_ETHERNET_TIMEOUT); ++ ++ ack = mgmt_eth_data->ack; ++ ++ if (ret <= 0) { ++ ret = -ETIMEDOUT; ++ kfree_skb(read_skb); ++ goto exit; ++ } ++ ++ if (!ack) { ++ ret = -EINVAL; ++ kfree_skb(read_skb); ++ goto exit; ++ } ++ ++ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1, ++ !(val & QCA8K_MDIO_MASTER_BUSY), 0, ++ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, ++ mgmt_eth_data, read_skb, &val); ++ ++ if (ret < 0 && ret1 < 0) { ++ ret = ret1; ++ goto exit; ++ } ++ ++ if (read) { ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the read pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(read_skb); ++ ++ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ QCA8K_ETHERNET_TIMEOUT); ++ ++ ack = mgmt_eth_data->ack; ++ ++ if (ret <= 0) { ++ ret = -ETIMEDOUT; ++ goto exit; ++ } ++ ++ if (!ack) { ++ ret = -EINVAL; ++ goto exit; ++ } ++ ++ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK; ++ } else { ++ kfree_skb(read_skb); ++ } ++exit: ++ reinit_completion(&mgmt_eth_data->rw_done); ++ ++ /* Increment seq_num and set it in the clear pkt */ ++ mgmt_eth_data->seq++; ++ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq); ++ mgmt_eth_data->ack = false; ++ ++ dev_queue_xmit(clear_skb); ++ ++ wait_for_completion_timeout(&mgmt_eth_data->rw_done, ++ QCA8K_ETHERNET_TIMEOUT); ++ ++ mutex_unlock(&mgmt_eth_data->mutex); ++ ++ return ret; ++ ++ /* Error handling before lock */ ++err_mgmt_master: ++ kfree_skb(read_skb); ++err_read_skb: ++ kfree_skb(clear_skb); ++err_clear_skb: ++ kfree_skb(write_skb); ++ ++ return ret; ++} ++ + static u32 + qca8k_port_to_phy(int port) + { +@@ -989,6 +1182,12 @@ qca8k_internal_mdio_write(struct mii_bus + { + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; ++ int ret; ++ ++ /* Use mdio Ethernet when available, fallback to legacy one on error */ ++ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data); ++ if (!ret) ++ return 0; + + return qca8k_mdio_write(bus, phy, regnum, data); + } +@@ -998,6 +1197,12 @@ qca8k_internal_mdio_read(struct mii_bus + { + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; ++ int ret; ++ ++ /* Use mdio Ethernet when available, fallback to legacy one on error */ ++ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0); ++ if (ret >= 0) ++ return ret; + + return qca8k_mdio_read(bus, phy, regnum); + } +@@ -1006,6 +1211,7 @@ static int + qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) + { + struct qca8k_priv *priv = ds->priv; ++ int ret; + + /* Check if the legacy mapping should be used and the + * port is not correctly mapped to the right PHY in the +@@ -1014,6 +1220,11 @@ qca8k_phy_write(struct dsa_switch *ds, i + if (priv->legacy_phy_port_mapping) + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + ++ /* Use mdio Ethernet when available, fallback to legacy one on error */ ++ ret = qca8k_phy_eth_command(priv, false, port, regnum, 0); ++ if (!ret) ++ return ret; ++ + return qca8k_mdio_write(priv->bus, port, regnum, data); + } + +@@ -1030,6 +1241,11 @@ qca8k_phy_read(struct dsa_switch *ds, in + if (priv->legacy_phy_port_mapping) + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + ++ /* Use mdio Ethernet when available, fallback to legacy one on error */ ++ ret = qca8k_phy_eth_command(priv, true, port, regnum, 0); ++ if (ret >= 0) ++ return ret; ++ + ret = qca8k_mdio_read(priv->bus, port, regnum); + + if (ret < 0) +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -14,6 +14,7 @@ + #include + + #define QCA8K_ETHERNET_MDIO_PRIORITY 7 ++#define QCA8K_ETHERNET_PHY_PRIORITY 6 + #define QCA8K_ETHERNET_TIMEOUT 100 + + #define QCA8K_NUM_PORTS 7 diff --git a/target/linux/generic/backport-5.15/766-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch b/target/linux/generic/backport-5.15/766-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch new file mode 100644 index 000000000..dc8160966 --- /dev/null +++ b/target/linux/generic/backport-5.15/766-13-net-dsa-qca8k-move-page-cache-to-driver-priv.patch @@ -0,0 +1,208 @@ +From 4264350acb75430d5021a1d7de56a33faf69a097 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:32 +0100 +Subject: [PATCH 13/16] net: dsa: qca8k: move page cache to driver priv + +There can be multiple qca8k switch on the same system. Move the static +qca8k_current_page to qca8k_priv and make it specific for each switch. + +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 42 ++++++++++++++++++++--------------------- + drivers/net/dsa/qca8k.h | 9 +++++++++ + 2 files changed, 29 insertions(+), 22 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -75,12 +75,6 @@ static const struct qca8k_mib_desc ar832 + MIB_DESC(1, 0xac, "TXUnicast"), + }; + +-/* The 32bit switch registers are accessed indirectly. To achieve this we need +- * to set the page of the register. Track the last page that was set to reduce +- * mdio writes +- */ +-static u16 qca8k_current_page = 0xffff; +- + static void + qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) + { +@@ -134,11 +128,13 @@ qca8k_mii_write32(struct mii_bus *bus, i + } + + static int +-qca8k_set_page(struct mii_bus *bus, u16 page) ++qca8k_set_page(struct qca8k_priv *priv, u16 page) + { ++ u16 *cached_page = &priv->mdio_cache.page; ++ struct mii_bus *bus = priv->bus; + int ret; + +- if (page == qca8k_current_page) ++ if (page == *cached_page) + return 0; + + ret = bus->write(bus, 0x18, 0, page); +@@ -148,7 +144,7 @@ qca8k_set_page(struct mii_bus *bus, u16 + return ret; + } + +- qca8k_current_page = page; ++ *cached_page = page; + usleep_range(1000, 2000); + return 0; + } +@@ -374,7 +370,7 @@ qca8k_regmap_read(void *ctx, uint32_t re + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(bus, page); ++ ret = qca8k_set_page(priv, page); + if (ret < 0) + goto exit; + +@@ -400,7 +396,7 @@ qca8k_regmap_write(void *ctx, uint32_t r + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(bus, page); ++ ret = qca8k_set_page(priv, page); + if (ret < 0) + goto exit; + +@@ -427,7 +423,7 @@ qca8k_regmap_update_bits(void *ctx, uint + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(bus, page); ++ ret = qca8k_set_page(priv, page); + if (ret < 0) + goto exit; + +@@ -1098,8 +1094,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus + } + + static int +-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) ++qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data) + { ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -1116,7 +1113,7 @@ qca8k_mdio_write(struct mii_bus *bus, in + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(bus, page); ++ ret = qca8k_set_page(priv, page); + if (ret) + goto exit; + +@@ -1135,8 +1132,9 @@ exit: + } + + static int +-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) ++qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum) + { ++ struct mii_bus *bus = priv->bus; + u16 r1, r2, page; + u32 val; + int ret; +@@ -1152,7 +1150,7 @@ qca8k_mdio_read(struct mii_bus *bus, int + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + +- ret = qca8k_set_page(bus, page); ++ ret = qca8k_set_page(priv, page); + if (ret) + goto exit; + +@@ -1181,7 +1179,6 @@ static int + qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) + { + struct qca8k_priv *priv = slave_bus->priv; +- struct mii_bus *bus = priv->bus; + int ret; + + /* Use mdio Ethernet when available, fallback to legacy one on error */ +@@ -1189,14 +1186,13 @@ qca8k_internal_mdio_write(struct mii_bus + if (!ret) + return 0; + +- return qca8k_mdio_write(bus, phy, regnum, data); ++ return qca8k_mdio_write(priv, phy, regnum, data); + } + + static int + qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) + { + struct qca8k_priv *priv = slave_bus->priv; +- struct mii_bus *bus = priv->bus; + int ret; + + /* Use mdio Ethernet when available, fallback to legacy one on error */ +@@ -1204,7 +1200,7 @@ qca8k_internal_mdio_read(struct mii_bus + if (ret >= 0) + return ret; + +- return qca8k_mdio_read(bus, phy, regnum); ++ return qca8k_mdio_read(priv, phy, regnum); + } + + static int +@@ -1225,7 +1221,7 @@ qca8k_phy_write(struct dsa_switch *ds, i + if (!ret) + return ret; + +- return qca8k_mdio_write(priv->bus, port, regnum, data); ++ return qca8k_mdio_write(priv, port, regnum, data); + } + + static int +@@ -1246,7 +1242,7 @@ qca8k_phy_read(struct dsa_switch *ds, in + if (ret >= 0) + return ret; + +- ret = qca8k_mdio_read(priv->bus, port, regnum); ++ ret = qca8k_mdio_read(priv, port, regnum); + + if (ret < 0) + return 0xffff; +@@ -3041,6 +3037,8 @@ qca8k_sw_probe(struct mdio_device *mdiod + return PTR_ERR(priv->regmap); + } + ++ priv->mdio_cache.page = 0xffff; ++ + /* Check the detected switch id */ + ret = qca8k_read_switch_id(priv); + if (ret) +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -363,6 +363,14 @@ struct qca8k_ports_config { + u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */ + }; + ++struct qca8k_mdio_cache { ++/* The 32bit switch registers are accessed indirectly. To achieve this we need ++ * to set the page of the register. Track the last page that was set to reduce ++ * mdio writes ++ */ ++ u16 page; ++}; ++ + struct qca8k_priv { + u8 switch_id; + u8 switch_revision; +@@ -383,6 +391,7 @@ struct qca8k_priv { + struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ + struct qca8k_mgmt_eth_data mgmt_eth_data; + struct qca8k_mib_eth_data mib_eth_data; ++ struct qca8k_mdio_cache mdio_cache; + }; + + struct qca8k_mib_desc { diff --git a/target/linux/generic/backport-5.15/766-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch b/target/linux/generic/backport-5.15/766-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch new file mode 100644 index 000000000..2d483730c --- /dev/null +++ b/target/linux/generic/backport-5.15/766-14-net-dsa-qca8k-cache-lo-and-hi-for-mdio-write.patch @@ -0,0 +1,164 @@ +From 2481d206fae7884cd07014fd1318e63af35e99eb Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:33 +0100 +Subject: [PATCH 14/16] net: dsa: qca8k: cache lo and hi for mdio write + +From Documentation, we can cache lo and hi the same way we do with the +page. This massively reduce the mdio write as 3/4 of the time as we only +require to write the lo or hi part for a mdio write. + +Signed-off-by: Ansuel Smith +Reviewed-by: Florian Fainelli +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 61 +++++++++++++++++++++++++++++++++-------- + drivers/net/dsa/qca8k.h | 5 ++++ + 2 files changed, 54 insertions(+), 12 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -89,6 +89,44 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u + } + + static int ++qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo) ++{ ++ u16 *cached_lo = &priv->mdio_cache.lo; ++ struct mii_bus *bus = priv->bus; ++ int ret; ++ ++ if (lo == *cached_lo) ++ return 0; ++ ++ ret = bus->write(bus, phy_id, regnum, lo); ++ if (ret < 0) ++ dev_err_ratelimited(&bus->dev, ++ "failed to write qca8k 32bit lo register\n"); ++ ++ *cached_lo = lo; ++ return 0; ++} ++ ++static int ++qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi) ++{ ++ u16 *cached_hi = &priv->mdio_cache.hi; ++ struct mii_bus *bus = priv->bus; ++ int ret; ++ ++ if (hi == *cached_hi) ++ return 0; ++ ++ ret = bus->write(bus, phy_id, regnum, hi); ++ if (ret < 0) ++ dev_err_ratelimited(&bus->dev, ++ "failed to write qca8k 32bit hi register\n"); ++ ++ *cached_hi = hi; ++ return 0; ++} ++ ++static int + qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val) + { + int ret; +@@ -111,7 +149,7 @@ qca8k_mii_read32(struct mii_bus *bus, in + } + + static void +-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val) ++qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val) + { + u16 lo, hi; + int ret; +@@ -119,12 +157,9 @@ qca8k_mii_write32(struct mii_bus *bus, i + lo = val & 0xffff; + hi = (u16)(val >> 16); + +- ret = bus->write(bus, phy_id, regnum, lo); ++ ret = qca8k_set_lo(priv, phy_id, regnum, lo); + if (ret >= 0) +- ret = bus->write(bus, phy_id, regnum + 1, hi); +- if (ret < 0) +- dev_err_ratelimited(&bus->dev, +- "failed to write qca8k 32bit register\n"); ++ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi); + } + + static int +@@ -400,7 +435,7 @@ qca8k_regmap_write(void *ctx, uint32_t r + if (ret < 0) + goto exit; + +- qca8k_mii_write32(bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, val); + + exit: + mutex_unlock(&bus->mdio_lock); +@@ -433,7 +468,7 @@ qca8k_regmap_update_bits(void *ctx, uint + + val &= ~mask; + val |= write_val; +- qca8k_mii_write32(bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, val); + + exit: + mutex_unlock(&bus->mdio_lock); +@@ -1117,14 +1152,14 @@ qca8k_mdio_write(struct qca8k_priv *priv + if (ret) + goto exit; + +- qca8k_mii_write32(bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, val); + + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); + + exit: + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_mii_write32(bus, 0x10 | r2, r1, 0); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); + + mutex_unlock(&bus->mdio_lock); + +@@ -1154,7 +1189,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, + if (ret) + goto exit; + +- qca8k_mii_write32(bus, 0x10 | r2, r1, val); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, val); + + ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_BUSY); +@@ -1165,7 +1200,7 @@ qca8k_mdio_read(struct qca8k_priv *priv, + + exit: + /* even if the busy_wait timeouts try to clear the MASTER_EN */ +- qca8k_mii_write32(bus, 0x10 | r2, r1, 0); ++ qca8k_mii_write32(priv, 0x10 | r2, r1, 0); + + mutex_unlock(&bus->mdio_lock); + +@@ -3038,6 +3073,8 @@ qca8k_sw_probe(struct mdio_device *mdiod + } + + priv->mdio_cache.page = 0xffff; ++ priv->mdio_cache.lo = 0xffff; ++ priv->mdio_cache.hi = 0xffff; + + /* Check the detected switch id */ + ret = qca8k_read_switch_id(priv); +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -369,6 +369,11 @@ struct qca8k_mdio_cache { + * mdio writes + */ + u16 page; ++/* lo and hi can also be cached and from Documentation we can skip one ++ * extra mdio write if lo or hi is didn't change. ++ */ ++ u16 lo; ++ u16 hi; + }; + + struct qca8k_priv { diff --git a/target/linux/generic/backport-5.15/766-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch b/target/linux/generic/backport-5.15/766-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch new file mode 100644 index 000000000..5acd13dba --- /dev/null +++ b/target/linux/generic/backport-5.15/766-15-net-dsa-qca8k-add-support-for-larger-read-write-size.patch @@ -0,0 +1,206 @@ +From 90386223f44e2a751d7e9e9ac8f78ea33358a891 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:34 +0100 +Subject: [PATCH 15/16] net: dsa: qca8k: add support for larger read/write size + with mgmt Ethernet + +mgmt Ethernet packet can read/write up to 16byte at times. The len reg +is limited to 15 (0xf). The switch actually sends and accepts data in 4 +different steps of len values. +Len steps: +- 0: nothing +- 1-4: first 4 byte +- 5-6: first 12 byte +- 7-15: all 16 byte + +In the alloc skb function we check if the len is 16 and we fix it to a +len of 15. It the read/write function interest to extract the real asked +data. The tagger handler will always copy the fully 16byte with a READ +command. This is useful for some big regs like the fdb reg that are +more than 4byte of data. This permits to introduce a bulk function that +will send and request the entire entry in one go. +Write function is changed and it does now require to pass the pointer to +val to also handle array val. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 61 +++++++++++++++++++++++++++-------------- + 1 file changed, 41 insertions(+), 20 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -222,7 +222,9 @@ static void qca8k_rw_reg_ack_handler(str + if (cmd == MDIO_READ) { + mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; + +- /* Get the rest of the 12 byte of data */ ++ /* Get the rest of the 12 byte of data. ++ * The read/write function will extract the requested data. ++ */ + if (len > QCA_HDR_MGMT_DATA1_LEN) + memcpy(mgmt_eth_data->data + 1, skb->data, + QCA_HDR_MGMT_DATA2_LEN); +@@ -232,16 +234,30 @@ static void qca8k_rw_reg_ack_handler(str + } + + static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val, +- int priority) ++ int priority, unsigned int len) + { + struct qca_mgmt_ethhdr *mgmt_ethhdr; ++ unsigned int real_len; + struct sk_buff *skb; ++ u32 *data2; + u16 hdr; + + skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); + if (!skb) + return NULL; + ++ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte ++ * Actually for some reason the steps are: ++ * 0: nothing ++ * 1-4: first 4 byte ++ * 5-6: first 12 byte ++ * 7-15: all 16 byte ++ */ ++ if (len == 16) ++ real_len = 15; ++ else ++ real_len = len; ++ + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb->len); + +@@ -254,7 +270,7 @@ static struct sk_buff *qca8k_alloc_mdio_ + hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); + + mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); +- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, 4); ++ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); + mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, + QCA_HDR_MGMT_CHECK_CODE_VAL); +@@ -264,7 +280,9 @@ static struct sk_buff *qca8k_alloc_mdio_ + + mgmt_ethhdr->hdr = htons(hdr); + +- skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); ++ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); ++ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) ++ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); + + return skb; + } +@@ -277,7 +295,7 @@ static void qca8k_mdio_header_fill_seq_n + mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); + } + +-static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val) ++static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) + { + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; + struct sk_buff *skb; +@@ -285,7 +303,7 @@ static int qca8k_read_eth(struct qca8k_p + int ret; + + skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL, +- QCA8K_ETHERNET_MDIO_PRIORITY); ++ QCA8K_ETHERNET_MDIO_PRIORITY, len); + if (!skb) + return -ENOMEM; + +@@ -313,6 +331,9 @@ static int qca8k_read_eth(struct qca8k_p + msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT)); + + *val = mgmt_eth_data->data[0]; ++ if (len > QCA_HDR_MGMT_DATA1_LEN) ++ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN); ++ + ack = mgmt_eth_data->ack; + + mutex_unlock(&mgmt_eth_data->mutex); +@@ -326,15 +347,15 @@ static int qca8k_read_eth(struct qca8k_p + return 0; + } + +-static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 val) ++static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) + { + struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data; + struct sk_buff *skb; + bool ack; + int ret; + +- skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, &val, +- QCA8K_ETHERNET_MDIO_PRIORITY); ++ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val, ++ QCA8K_ETHERNET_MDIO_PRIORITY, len); + if (!skb) + return -ENOMEM; + +@@ -380,14 +401,14 @@ qca8k_regmap_update_bits_eth(struct qca8 + u32 val = 0; + int ret; + +- ret = qca8k_read_eth(priv, reg, &val); ++ ret = qca8k_read_eth(priv, reg, &val, sizeof(val)); + if (ret) + return ret; + + val &= ~mask; + val |= write_val; + +- return qca8k_write_eth(priv, reg, val); ++ return qca8k_write_eth(priv, reg, &val, sizeof(val)); + } + + static int +@@ -398,7 +419,7 @@ qca8k_regmap_read(void *ctx, uint32_t re + u16 r1, r2, page; + int ret; + +- if (!qca8k_read_eth(priv, reg, val)) ++ if (!qca8k_read_eth(priv, reg, val, sizeof(val))) + return 0; + + qca8k_split_addr(reg, &r1, &r2, &page); +@@ -424,7 +445,7 @@ qca8k_regmap_write(void *ctx, uint32_t r + u16 r1, r2, page; + int ret; + +- if (!qca8k_write_eth(priv, reg, val)) ++ if (!qca8k_write_eth(priv, reg, &val, sizeof(val))) + return 0; + + qca8k_split_addr(reg, &r1, &r2, &page); +@@ -959,21 +980,21 @@ qca8k_phy_eth_command(struct qca8k_priv + } + + /* Prealloc all the needed skb before the lock */ +- write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, +- &write_val, QCA8K_ETHERNET_PHY_PRIORITY); ++ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, ++ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val)); + if (!write_skb) + return -ENOMEM; + +- clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, +- &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); ++ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, ++ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); + if (!write_skb) { + ret = -ENOMEM; + goto err_clear_skb; + } + +- read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, +- &clear_val, QCA8K_ETHERNET_PHY_PRIORITY); +- if (!write_skb) { ++ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, ++ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); ++ if (!read_skb) { + ret = -ENOMEM; + goto err_read_skb; + } diff --git a/target/linux/generic/backport-5.15/766-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch b/target/linux/generic/backport-5.15/766-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch new file mode 100644 index 000000000..f26c6b91a --- /dev/null +++ b/target/linux/generic/backport-5.15/766-16-net-dsa-qca8k-introduce-qca8k_bulk_read-write-functi.patch @@ -0,0 +1,104 @@ +From 4f3701fc599820568ba4395070d34e4248800fc0 Mon Sep 17 00:00:00 2001 +From: Ansuel Smith +Date: Wed, 2 Feb 2022 01:03:35 +0100 +Subject: [PATCH 16/16] net: dsa: qca8k: introduce qca8k_bulk_read/write + function + +Introduce qca8k_bulk_read/write() function to use mgmt Ethernet way to +read/write packet in bulk. Make use of this new function in the fdb +function and while at it reduce the reg for fdb_read from 4 to 3 as the +max bit for the ARL(fdb) table is 83 bits. + +Signed-off-by: Ansuel Smith +Signed-off-by: David S. Miller +--- + drivers/net/dsa/qca8k.c | 55 ++++++++++++++++++++++++++++++++--------- + 1 file changed, 43 insertions(+), 12 deletions(-) + +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -412,6 +412,43 @@ qca8k_regmap_update_bits_eth(struct qca8 + } + + static int ++qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len) ++{ ++ int i, count = len / sizeof(u32), ret; ++ ++ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len)) ++ return 0; ++ ++ for (i = 0; i < count; i++) { ++ ret = regmap_read(priv->regmap, reg + (i * 4), val + i); ++ if (ret < 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len) ++{ ++ int i, count = len / sizeof(u32), ret; ++ u32 tmp; ++ ++ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len)) ++ return 0; ++ ++ for (i = 0; i < count; i++) { ++ tmp = val[i]; ++ ++ ret = regmap_write(priv->regmap, reg + (i * 4), tmp); ++ if (ret < 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int + qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; +@@ -546,17 +583,13 @@ qca8k_busy_wait(struct qca8k_priv *priv, + static int + qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) + { +- u32 reg[4], val; +- int i, ret; ++ u32 reg[3]; ++ int ret; + + /* load the ARL table into an array */ +- for (i = 0; i < 4; i++) { +- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val); +- if (ret < 0) +- return ret; +- +- reg[i] = val; +- } ++ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); ++ if (ret) ++ return ret; + + /* vid - 83:72 */ + fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); +@@ -580,7 +613,6 @@ qca8k_fdb_write(struct qca8k_priv *priv, + u8 aging) + { + u32 reg[3] = { 0 }; +- int i; + + /* vid - 83:72 */ + reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); +@@ -597,8 +629,7 @@ qca8k_fdb_write(struct qca8k_priv *priv, + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); + + /* load the array into the ARL table */ +- for (i = 0; i < 3; i++) +- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]); ++ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg)); + } + + static int diff --git a/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch b/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch new file mode 100644 index 000000000..44093eab9 --- /dev/null +++ b/target/linux/generic/backport-5.15/773-v5.18-1-net-dsa-Move-VLAN-filtering-syncing-out-of-dsa_switc.patch @@ -0,0 +1,77 @@ +From 381a730182f1d174e1950cd4e63e885b1c302051 Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz +Date: Mon, 24 Jan 2022 22:09:43 +0100 +Subject: net: dsa: Move VLAN filtering syncing out of dsa_switch_bridge_leave + +Most of dsa_switch_bridge_leave was, in fact, dealing with the syncing +of VLAN filtering for switches on which that is a global +setting. Separate the two phases to prepare for the cross-chip related +bugfix in the following commit. + +Signed-off-by: Tobias Waldekranz +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + net/dsa/switch.c | 38 +++++++++++++++++++++++++------------- + 1 file changed, 25 insertions(+), 13 deletions(-) + +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -113,25 +113,14 @@ static int dsa_switch_bridge_join(struct + return dsa_tag_8021q_bridge_join(ds, info); + } + +-static int dsa_switch_bridge_leave(struct dsa_switch *ds, +- struct dsa_notifier_bridge_info *info) ++static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, ++ struct dsa_notifier_bridge_info *info) + { +- struct dsa_switch_tree *dst = ds->dst; + struct netlink_ext_ack extack = {0}; + bool change_vlan_filtering = false; + bool vlan_filtering; + int err, port; + +- if (dst->index == info->tree_index && ds->index == info->sw_index && +- ds->ops->port_bridge_leave) +- ds->ops->port_bridge_leave(ds, info->port, info->br); +- +- if ((dst->index != info->tree_index || ds->index != info->sw_index) && +- ds->ops->crosschip_bridge_leave) +- ds->ops->crosschip_bridge_leave(ds, info->tree_index, +- info->sw_index, info->port, +- info->br); +- + if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) { + change_vlan_filtering = true; + vlan_filtering = true; +@@ -172,6 +161,29 @@ static int dsa_switch_bridge_leave(struc + return err; + } + ++ return 0; ++} ++ ++static int dsa_switch_bridge_leave(struct dsa_switch *ds, ++ struct dsa_notifier_bridge_info *info) ++{ ++ struct dsa_switch_tree *dst = ds->dst; ++ int err; ++ ++ if (dst->index == info->tree_index && ds->index == info->sw_index && ++ ds->ops->port_bridge_leave) ++ ds->ops->port_bridge_leave(ds, info->port, info->br); ++ ++ if ((dst->index != info->tree_index || ds->index != info->sw_index) && ++ ds->ops->crosschip_bridge_leave) ++ ds->ops->crosschip_bridge_leave(ds, info->tree_index, ++ info->sw_index, info->port, ++ info->br); ++ ++ err = dsa_switch_sync_vlan_filtering(ds, info); ++ if (err) ++ return err; ++ + return dsa_tag_8021q_bridge_leave(ds, info); + } + diff --git a/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch b/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch new file mode 100644 index 000000000..cdddbcf14 --- /dev/null +++ b/target/linux/generic/backport-5.15/773-v5.18-2-net-dsa-Avoid-cross-chip-syncing-of-VLAN-filtering.patch @@ -0,0 +1,52 @@ +From 108dc8741c203e9d6ce4e973367f1bac20c7192b Mon Sep 17 00:00:00 2001 +From: Tobias Waldekranz +Date: Mon, 24 Jan 2022 22:09:44 +0100 +Subject: net: dsa: Avoid cross-chip syncing of VLAN filtering + +Changes to VLAN filtering are not applicable to cross-chip +notifications. + +On a system like this: + +.-----. .-----. .-----. +| sw1 +---+ sw2 +---+ sw3 | +'-1-2-' '-1-2-' '-1-2-' + +Before this change, upon sw1p1 leaving a bridge, a call to +dsa_port_vlan_filtering would also be made to sw2p1 and sw3p1. + +In this scenario: + +.---------. .-----. .-----. +| sw1 +---+ sw2 +---+ sw3 | +'-1-2-3-4-' '-1-2-' '-1-2-' + +When sw1p4 would leave a bridge, dsa_port_vlan_filtering would be +called for sw2 and sw3 with a non-existing port - leading to array +out-of-bounds accesses and crashes on mv88e6xxx. + +Fixes: d371b7c92d19 ("net: dsa: Unset vlan_filtering when ports leave the bridge") +Signed-off-by: Tobias Waldekranz +Reviewed-by: Vladimir Oltean +Signed-off-by: David S. Miller +--- + net/dsa/switch.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/net/dsa/switch.c ++++ b/net/dsa/switch.c +@@ -180,9 +180,11 @@ static int dsa_switch_bridge_leave(struc + info->sw_index, info->port, + info->br); + +- err = dsa_switch_sync_vlan_filtering(ds, info); +- if (err) +- return err; ++ if (ds->dst->index == info->tree_index && ds->index == info->sw_index) { ++ err = dsa_switch_sync_vlan_filtering(ds, info); ++ if (err) ++ return err; ++ } + + return dsa_tag_8021q_bridge_leave(ds, info); + } diff --git a/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch b/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch new file mode 100644 index 000000000..19a4be2a9 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0001-PCI-pci-bridge-emul-Add-description-for-class_revisi.patch @@ -0,0 +1,44 @@ +From 9319230ac147067652b58fe849ffe0ceec098665 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:03 +0100 +Subject: [PATCH] PCI: pci-bridge-emul: Add description for class_revision + field +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The current assignment to the class_revision member + + class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16); + +can make the reader think that class is at high 16 bits of the member and +revision at low 16 bits. + +In reality, class is at high 24 bits, but the class for PCI Bridge Normal +Decode is PCI_CLASS_BRIDGE_PCI << 8. + +Change the assignment and add a comment to make this clearer. + +Link: https://lore.kernel.org/r/20211130172913.9727-2-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/pci-bridge-emul.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -284,7 +284,11 @@ int pci_bridge_emul_init(struct pci_brid + { + BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END); + +- bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16); ++ /* ++ * class_revision: Class is high 24 bits and revision is low 8 bit of this member, ++ * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8 ++ */ ++ bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8); + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; + bridge->conf.cache_line_size = 0x10; + bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch b/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch new file mode 100644 index 000000000..3dd82710e --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0002-PCI-pci-bridge-emul-Add-definitions-for-missing-capa.patch @@ -0,0 +1,73 @@ +From 8ea673a8b30b4a32516b8adabb15e2a68ff02ec8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:04 +0100 +Subject: [PATCH] PCI: pci-bridge-emul: Add definitions for missing + capabilities registers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +pci-bridge-emul driver already allocates buffer for capabilities up to the +PCI_EXP_SLTSTA2 register, but does not define bit access behavior for these +registers. Add these missing definitions. + +Link: https://lore.kernel.org/r/20211130172913.9727-3-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/pci-bridge-emul.c | 43 +++++++++++++++++++++++++++++++++++ + 1 file changed, 43 insertions(+) + +--- a/drivers/pci/pci-bridge-emul.c ++++ b/drivers/pci/pci-bridge-emul.c +@@ -270,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_ + .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING, + .w1c = PCI_EXP_RTSTA_PME, + }, ++ ++ [PCI_EXP_DEVCAP2 / 4] = { ++ /* ++ * Device capabilities 2 register has reserved bits [30:27]. ++ * Also bits [26:24] are reserved for non-upstream ports. ++ */ ++ .ro = BIT(31) | GENMASK(23, 0), ++ }, ++ ++ [PCI_EXP_DEVCTL2 / 4] = { ++ /* ++ * Device control 2 register is RW. Bit 11 is reserved for ++ * non-upstream ports. ++ * ++ * Device status 2 register is reserved. ++ */ ++ .rw = GENMASK(15, 12) | GENMASK(10, 0), ++ }, ++ ++ [PCI_EXP_LNKCAP2 / 4] = { ++ /* Link capabilities 2 register has reserved bits [30:25] and 0. */ ++ .ro = BIT(31) | GENMASK(24, 1), ++ }, ++ ++ [PCI_EXP_LNKCTL2 / 4] = { ++ /* ++ * Link control 2 register is RW. ++ * ++ * Link status 2 register has bits 5, 15 W1C; ++ * bits 10, 11 reserved and others are RO. ++ */ ++ .rw = GENMASK(15, 0), ++ .w1c = (BIT(15) | BIT(5)) << 16, ++ .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16, ++ }, ++ ++ [PCI_EXP_SLTCAP2 / 4] = { ++ /* Slot capabilities 2 register is reserved. */ ++ }, ++ ++ [PCI_EXP_SLTCTL2 / 4] = { ++ /* Both Slot control 2 and Slot status 2 registers are reserved. */ ++ }, + }; + + /* diff --git a/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch b/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch new file mode 100644 index 000000000..b84eb442a --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0003-PCI-aardvark-Add-support-for-DEVCAP2-DEVCTL2-LNKCAP2.patch @@ -0,0 +1,61 @@ +From 1d3e170344dff2cef8827db6c09909b78cbc11d7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:05 +0100 +Subject: [PATCH] PCI: aardvark: Add support for DEVCAP2, DEVCTL2, LNKCAP2 and + LNKCTL2 registers on emulated bridge +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +PCI aardvark hardware supports access to DEVCAP2, DEVCTL2, LNKCAP2 and +LNKCTL2 configuration registers of PCIe core via PCIE_CORE_PCIEXP_CAP. +Export them via emulated software root bridge. + +Link: https://lore.kernel.org/r/20211130172913.9727-4-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 15 +++++++++++---- + 1 file changed, 11 insertions(+), 4 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -874,8 +874,13 @@ advk_pci_bridge_emul_pcie_conf_read(stru + + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVCAP2: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCAP2: ++ case PCI_EXP_LNKCTL2: + *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); + return PCI_BRIDGE_EMUL_HANDLED; ++ + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } +@@ -889,10 +894,6 @@ advk_pci_bridge_emul_pcie_conf_write(str + struct advk_pcie *pcie = bridge->data; + + switch (reg) { +- case PCI_EXP_DEVCTL: +- advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); +- break; +- + case PCI_EXP_LNKCTL: + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); + if (new & PCI_EXP_LNKCTL_RL) +@@ -914,6 +915,12 @@ advk_pci_bridge_emul_pcie_conf_write(str + advk_writel(pcie, new, PCIE_ISR0_REG); + break; + ++ case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCTL2: ++ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); ++ break; ++ + default: + break; + } diff --git a/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch b/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch new file mode 100644 index 000000000..a22aacf17 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0004-PCI-aardvark-Clear-all-MSIs-at-setup.patch @@ -0,0 +1,59 @@ +From 7d8dc1f7cd007a7ce94c5b4c20d63a8b8d6d7751 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:06 +0100 +Subject: [PATCH] PCI: aardvark: Clear all MSIs at setup +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We already clear all the other interrupts (ISR0, ISR1, HOST_CTRL_INT). + +Define a new macro PCIE_MSI_ALL_MASK and do the same clearing for MSIs, +to ensure that we don't start receiving spurious interrupts. + +Use this new mask in advk_pcie_handle_msi(); + +Link: https://lore.kernel.org/r/20211130172913.9727-5-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -115,6 +115,7 @@ + #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) + #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) + #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) ++#define PCIE_MSI_ALL_MASK GENMASK(31, 0) + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) + #define PCIE_MSI_DATA_MASK GENMASK(15, 0) + +@@ -570,6 +571,7 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Clear all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); +@@ -582,7 +584,7 @@ static void advk_pcie_setup_hw(struct ad + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSIs */ +- advk_writel(pcie, 0, PCIE_MSI_MASK_REG); ++ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); +@@ -1392,7 +1394,7 @@ static void advk_pcie_handle_msi(struct + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); +- msi_status = msi_val & ~msi_mask; ++ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); + + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { + if (!(BIT(msi_idx) & msi_status)) diff --git a/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch b/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch new file mode 100644 index 000000000..16e09bc1f --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0005-PCI-aardvark-Comment-actions-in-driver-remove-method.patch @@ -0,0 +1,34 @@ +From a4ca7948e1d47275f8f3e5023243440c40561916 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:07 +0100 +Subject: [PATCH] PCI: aardvark: Comment actions in driver remove method +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Add two more comments into the advk_pcie_remove() method. + +Link: https://lore.kernel.org/r/20211130172913.9727-6-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1688,11 +1688,13 @@ static int advk_pcie_remove(struct platf + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + int i; + ++ /* Remove PCI bus with all devices */ + pci_lock_rescan_remove(); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + pci_unlock_rescan_remove(); + ++ /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + diff --git a/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch b/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch new file mode 100644 index 000000000..6c48b3bc2 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0006-PCI-aardvark-Disable-bus-mastering-when-unbinding-dr.patch @@ -0,0 +1,41 @@ +From a46f2f6dd4093438d9615dfbf5c0fea2a9835dba Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:08 +0100 +Subject: [PATCH] PCI: aardvark: Disable bus mastering when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Ensure that after driver unbind PCIe cards are not able to forward +memory and I/O requests in the upstream direction. + +Link: https://lore.kernel.org/r/20211130172913.9727-7-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1686,6 +1686,7 @@ static int advk_pcie_remove(struct platf + { + struct advk_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); ++ u32 val; + int i; + + /* Remove PCI bus with all devices */ +@@ -1694,6 +1695,11 @@ static int advk_pcie_remove(struct platf + pci_remove_root_bus(bridge->bus); + pci_unlock_rescan_remove(); + ++ /* Disable Root Bridge I/O space, memory space and bus mastering */ ++ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); ++ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); ++ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); ++ + /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch b/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch new file mode 100644 index 000000000..3cc5ba667 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0007-PCI-aardvark-Mask-all-interrupts-when-unbinding-driv.patch @@ -0,0 +1,48 @@ +From 13bcdf07cb2ecff5d45d2c141df2539b15211448 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:09 +0100 +Subject: [PATCH] PCI: aardvark: Mask all interrupts when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Ensure that no interrupt can be triggered after driver unbind. + +Link: https://lore.kernel.org/r/20211130172913.9727-8-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1700,6 +1700,27 @@ static int advk_pcie_remove(struct platf + val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG); + ++ /* Disable MSI */ ++ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG); ++ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE; ++ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG); ++ ++ /* Clear MSI address */ ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG); ++ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG); ++ ++ /* Mask all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG); ++ ++ /* Clear all interrupts */ ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); ++ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); ++ + /* Remove IRQ domains */ + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch b/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch new file mode 100644 index 000000000..fdd049b97 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0008-PCI-aardvark-Fix-memory-leak-in-driver-unbind.patch @@ -0,0 +1,33 @@ +From 2f040a17f5061457ae95035326d3159eddc1e5cc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:10 +0100 +Subject: [PATCH] PCI: aardvark: Fix memory leak in driver unbind +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Free config space for emulated root bridge when unbinding driver to fix +memory leak. Do it after disabling and masking all interrupts, since +aardvark interrupt handler accesses config space of emulated root +bridge. + +Link: https://lore.kernel.org/r/20211130172913.9727-9-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1725,6 +1725,9 @@ static int advk_pcie_remove(struct platf + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + ++ /* Free config space for emulated root bridge */ ++ pci_bridge_emul_cleanup(&pcie->bridge); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch b/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch new file mode 100644 index 000000000..2e95d26ce --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0009-PCI-aardvark-Assert-PERST-when-unbinding-driver.patch @@ -0,0 +1,33 @@ +From 1f54391be8ce0c981d312cb93acdc5608def576a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:11 +0100 +Subject: [PATCH] PCI: aardvark: Assert PERST# when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Put the PCIe card into reset by asserting PERST# signal when unbinding +driver. It doesn't make sense to leave the card working if it can't +communicate with the host. This should also save some power. + +Link: https://lore.kernel.org/r/20211130172913.9727-10-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1728,6 +1728,10 @@ static int advk_pcie_remove(struct platf + /* Free config space for emulated root bridge */ + pci_bridge_emul_cleanup(&pcie->bridge); + ++ /* Assert PERST# signal which prepares PCIe card for power down */ ++ if (pcie->reset_gpio) ++ gpiod_set_value_cansleep(pcie->reset_gpio, 1); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch b/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch new file mode 100644 index 000000000..826f10a35 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0010-PCI-aardvark-Disable-link-training-when-unbinding-dr.patch @@ -0,0 +1,34 @@ +From 759dec2e3dfdbd261c41d2279f04f2351c971a49 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:12 +0100 +Subject: [PATCH] PCI: aardvark: Disable link training when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Disable link training circuit in driver unbind sequence. We want to +leave link training in the same state as it was before the driver was +probed. + +Link: https://lore.kernel.org/r/20211130172913.9727-11-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1732,6 +1732,11 @@ static int advk_pcie_remove(struct platf + if (pcie->reset_gpio) + gpiod_set_value_cansleep(pcie->reset_gpio, 1); + ++ /* Disable link training */ ++ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG); ++ val &= ~LINK_TRAINING_EN; ++ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG); ++ + /* Disable outbound address windows mapping */ + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); diff --git a/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch b/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch new file mode 100644 index 000000000..c01377a88 --- /dev/null +++ b/target/linux/generic/backport-5.15/850-v5.17-0011-PCI-aardvark-Disable-common-PHY-when-unbinding-drive.patch @@ -0,0 +1,30 @@ +From fdbbe242c15a8f2cd0e3ad8a56cd0a447b771d0d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pali=20Roh=C3=A1r?= +Date: Tue, 30 Nov 2021 18:29:13 +0100 +Subject: [PATCH] PCI: aardvark: Disable common PHY when unbinding driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Disable the PCIe PHY when unbinding driver. This should save some power. + +Link: https://lore.kernel.org/r/20211130172913.9727-12-kabel@kernel.org +Signed-off-by: Pali Rohár +Signed-off-by: Marek Behún +Signed-off-by: Lorenzo Pieralisi +--- + drivers/pci/controller/pci-aardvark.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1741,6 +1741,9 @@ static int advk_pcie_remove(struct platf + for (i = 0; i < OB_WIN_COUNT; i++) + advk_pcie_disable_ob_win(pcie, i); + ++ /* Disable phy */ ++ advk_pcie_disable_phy(pcie); ++ + return 0; + } + diff --git a/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch b/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch new file mode 100644 index 000000000..e9d692b65 --- /dev/null +++ b/target/linux/generic/backport-5.15/860-v5.17-MIPS-ath79-drop-_machine_restart-again.patch @@ -0,0 +1,49 @@ +From d3115128bdafb62628ab41861a4f06f6d02ac320 Mon Sep 17 00:00:00 2001 +From: Lech Perczak +Date: Mon, 10 Jan 2022 23:48:44 +0100 +Subject: MIPS: ath79: drop _machine_restart again + +Commit 81424d0ad0d4 ("MIPS: ath79: Use the reset controller to restart +OF machines") removed setup of _machine_restart on OF machines to use +reset handler in reset controller driver. +While removing remnants of non-OF machines in commit 3a77e0d75eed +("MIPS: ath79: drop machfiles"), this was introduced again, making it +impossible to use additional restart handlers registered through device +tree. Drop setting _machine_restart altogether, and ath79_restart +function, which is no longer used after this. + +Fixes: 3a77e0d75eed ("MIPS: ath79: drop machfiles") +Cc: John Crispin +Cc: Florian Fainelli +Signed-off-by: Lech Perczak +Signed-off-by: Thomas Bogendoerfer +--- + arch/mips/ath79/setup.c | 10 ---------- + 1 file changed, 10 deletions(-) + +--- a/arch/mips/ath79/setup.c ++++ b/arch/mips/ath79/setup.c +@@ -34,15 +34,6 @@ + + static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; + +-static void ath79_restart(char *command) +-{ +- local_irq_disable(); +- ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); +- for (;;) +- if (cpu_wait) +- cpu_wait(); +-} +- + static void ath79_halt(void) + { + while (1) +@@ -234,7 +225,6 @@ void __init plat_mem_setup(void) + + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); + +- _machine_restart = ath79_restart; + _machine_halt = ath79_halt; + pm_power_off = ath79_halt; + } diff --git a/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch b/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch new file mode 100644 index 000000000..fabf17762 --- /dev/null +++ b/target/linux/generic/backport-5.15/870-hwmon-next-hwmon-lm70-Add-ti-tmp125-support.patch @@ -0,0 +1,71 @@ +From 31d8f414e1596ba54a4315418e4c0086fda9e428 Mon Sep 17 00:00:00 2001 +From: Christian Lamparter +Date: Fri, 18 Feb 2022 10:06:43 +0100 +Subject: hwmon: (lm70) Add ti,tmp125 support + +The TMP125 is a 2 degree Celsius accurate Digital +Temperature Sensor with a SPI interface. + +The temperature register is a 16-bit, read-only register. +The MSB (Bit 15) is a leading zero and never set. Bits 14 +to 5 are the 1+9 temperature data bits in a two's +complement format. Bits 4 to 0 are useless copies of +Bit 5 value and therefore ignored. + +Signed-off-by: Christian Lamparter +Link: https://lore.kernel.org/r/43b19cbd4e7f51e9509e561b02b5d8d0e7079fac.1645175187.git.chunkeey@gmail.com +Signed-off-by: Guenter Roeck +--- +--- a/drivers/hwmon/lm70.c ++++ b/drivers/hwmon/lm70.c +@@ -34,6 +34,7 @@ + #define LM70_CHIP_LM71 2 /* NS LM71 */ + #define LM70_CHIP_LM74 3 /* NS LM74 */ + #define LM70_CHIP_TMP122 4 /* TI TMP122/TMP124 */ ++#define LM70_CHIP_TMP125 5 /* TI TMP125 */ + + struct lm70 { + struct spi_device *spi; +@@ -87,6 +88,12 @@ static ssize_t temp1_input_show(struct d + * LM71: + * 14 bits of 2's complement data, discard LSB 2 bits, + * resolution 0.0312 degrees celsius. ++ * ++ * TMP125: ++ * MSB/D15 is a leading zero. D14 is the sign-bit. This is ++ * followed by 9 temperature bits (D13..D5) in 2's complement ++ * data format with a resolution of 0.25 degrees celsius per unit. ++ * LSB 5 bits (D4..D0) share the same value as D5 and get discarded. + */ + switch (p_lm70->chip) { + case LM70_CHIP_LM70: +@@ -102,6 +109,10 @@ static ssize_t temp1_input_show(struct d + case LM70_CHIP_LM71: + val = ((int)raw / 4) * 3125 / 100; + break; ++ ++ case LM70_CHIP_TMP125: ++ val = (sign_extend32(raw, 14) / 32) * 250; ++ break; + } + + status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */ +@@ -136,6 +147,10 @@ static const struct of_device_id lm70_of + .data = (void *) LM70_CHIP_TMP122, + }, + { ++ .compatible = "ti,tmp125", ++ .data = (void *) LM70_CHIP_TMP125, ++ }, ++ { + .compatible = "ti,lm71", + .data = (void *) LM70_CHIP_LM71, + }, +@@ -184,6 +199,7 @@ static const struct spi_device_id lm70_i + { "lm70", LM70_CHIP_LM70 }, + { "tmp121", LM70_CHIP_TMP121 }, + { "tmp122", LM70_CHIP_TMP122 }, ++ { "tmp125", LM70_CHIP_TMP125 }, + { "lm71", LM70_CHIP_LM71 }, + { "lm74", LM70_CHIP_LM74 }, + { }, diff --git a/target/linux/x86/patches-5.15/011-tune_lzma_options.patch b/target/linux/x86/patches-5.15/011-tune_lzma_options.patch deleted file mode 100644 index fe7772aa3..000000000 --- a/target/linux/x86/patches-5.15/011-tune_lzma_options.patch +++ /dev/null @@ -1,22 +0,0 @@ ---- a/scripts/Makefile.lib -+++ b/scripts/Makefile.lib -@@ -413,7 +413,7 @@ quiet_cmd_bzip2 = BZIP2 $@ - # --------------------------------------------------------------------------- - - quiet_cmd_lzma = LZMA $@ -- cmd_lzma = { cat $(real-prereqs) | $(LZMA) e -d20 -lc1 -lp2 -pb2 -eos -si -so; $(size_append); } > $@ -+ cmd_lzma = { cat $(real-prereqs) | $(LZMA) e -lc8 -eos -si -so; $(size_append); } > $@ - - quiet_cmd_lzo = LZO $@ - cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@ ---- a/arch/x86/include/asm/boot.h -+++ b/arch/x86/include/asm/boot.h -@@ -24,7 +24,7 @@ - # error "Invalid value for CONFIG_PHYSICAL_ALIGN" - #endif - --#if defined(CONFIG_KERNEL_BZIP2) -+#if defined(CONFIG_KERNEL_BZIP2) || defined(CONFIG_KERNEL_LZMA) - # define BOOT_HEAP_SIZE 0x400000 - #elif defined(CONFIG_KERNEL_ZSTD) - /* diff --git a/target/linux/x86/patches-5.15/012-pcengines-apu2-detect-apuv4-board.patch b/target/linux/x86/patches-5.15/012-pcengines-apu2-detect-apuv4-board.patch new file mode 100644 index 000000000..520c66d29 --- /dev/null +++ b/target/linux/x86/patches-5.15/012-pcengines-apu2-detect-apuv4-board.patch @@ -0,0 +1,50 @@ +From 3d00da1de3ea36ba44f4a7ba76c8c8b16f98204b Mon Sep 17 00:00:00 2001 +From: "Enrico Weigelt, metux IT consult" +Date: Thu, 12 Dec 2019 14:27:56 +0100 +Subject: [PATCH] platform/x86: pcengines-apuv2: detect apuv4 board + +GPIO stuff on APUv4 seems to be the same as on APUv2, so we just +need to match on DMI data. + +Signed-off-by: Enrico Weigelt, metux IT consult +Signed-off-by: Andy Shevchenko +--- + drivers/platform/x86/pcengines-apuv2.c | 27 ++++++++++++++++++++++++++ + 1 file changed, 27 insertions(+) + +--- a/drivers/platform/x86/pcengines-apuv2.c ++++ b/drivers/platform/x86/pcengines-apuv2.c +@@ -215,6 +215,33 @@ static const struct dmi_system_id apu_gp + }, + .driver_data = (void *)&board_apu2, + }, ++ /* APU4 w/ legacy bios < 4.0.8 */ ++ { ++ .ident = "apu4", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), ++ DMI_MATCH(DMI_BOARD_NAME, "APU4") ++ }, ++ .driver_data = (void *)&board_apu2, ++ }, ++ /* APU4 w/ legacy bios >= 4.0.8 */ ++ { ++ .ident = "apu4", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), ++ DMI_MATCH(DMI_BOARD_NAME, "apu4") ++ }, ++ .driver_data = (void *)&board_apu2, ++ }, ++ /* APU4 w/ mainline bios */ ++ { ++ .ident = "apu4", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), ++ DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4") ++ }, ++ .driver_data = (void *)&board_apu2, ++ }, + {} + }; + diff --git a/target/linux/x86/patches-5.15/090-fix-hv.patch b/target/linux/x86/patches-5.15/090-fix-hv.patch deleted file mode 100644 index f1f9435c2..000000000 --- a/target/linux/x86/patches-5.15/090-fix-hv.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- a/drivers/hv/Kconfig -+++ b/drivers/hv/Kconfig -@@ -18,6 +18,7 @@ config HYPERV_TIMER - config HYPERV_UTILS - tristate "Microsoft Hyper-V Utilities driver" - depends on HYPERV && CONNECTOR && NLS -+ depends on PTP_1588_CLOCK_OPTIONAL - help - Select this option to enable the Hyper-V Utilities. - diff --git a/target/linux/x86/patches-5.15/100-fix_cs5535_clockevt.patch b/target/linux/x86/patches-5.15/100-fix_cs5535_clockevt.patch new file mode 100644 index 000000000..d4de2027b --- /dev/null +++ b/target/linux/x86/patches-5.15/100-fix_cs5535_clockevt.patch @@ -0,0 +1,13 @@ +--- a/drivers/clocksource/timer-cs5535.c ++++ b/drivers/clocksource/timer-cs5535.c +@@ -127,7 +127,9 @@ static irqreturn_t mfgpt_tick(int irq, v + cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, + MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2); + +- cs5535_clockevent.event_handler(&cs5535_clockevent); ++ if (cs5535_clockevent.event_handler) ++ cs5535_clockevent.event_handler(&cs5535_clockevent); ++ + return IRQ_HANDLED; + } +