From 177dd50bc647b2792dff128fc68993a389491b97 Mon Sep 17 00:00:00 2001 From: AmadeusGhost <42570690+AmadeusGhost@users.noreply.github.com> Date: Wed, 25 May 2022 17:18:08 +0800 Subject: [PATCH] ramips: add MediaTek Vendor Ethernet driver --- .../files/drivers/net/ethernet/mtk/Kconfig | 30 + .../files/drivers/net/ethernet/mtk/Makefile | 8 + .../drivers/net/ethernet/mtk/mtk_eth_dbg.c | 1640 +++++++ .../drivers/net/ethernet/mtk/mtk_eth_dbg.h | 287 ++ .../drivers/net/ethernet/mtk/mtk_eth_path.c | 304 ++ .../drivers/net/ethernet/mtk/mtk_eth_reset.c | 405 ++ .../drivers/net/ethernet/mtk/mtk_eth_reset.h | 68 + .../drivers/net/ethernet/mtk/mtk_eth_soc.c | 3850 +++++++++++++++++ .../drivers/net/ethernet/mtk/mtk_eth_soc.h | 1318 ++++++ .../net/ethernet/mtk/mtk_hnat/Makefile | 5 + .../drivers/net/ethernet/mtk/mtk_hnat/hnat.c | 912 ++++ .../drivers/net/ethernet/mtk/mtk_hnat/hnat.h | 975 +++++ .../net/ethernet/mtk/mtk_hnat/hnat_debugfs.c | 2351 ++++++++++ .../net/ethernet/mtk/mtk_hnat/hnat_mcast.c | 354 ++ .../net/ethernet/mtk/mtk_hnat/hnat_mcast.h | 69 + .../net/ethernet/mtk/mtk_hnat/hnat_nf_hook.c | 2379 ++++++++++ .../net/ethernet/mtk/mtk_hnat/hnat_stag.c | 63 + .../net/ethernet/mtk/mtk_hnat/nf_hnat_mtk.h | 129 + .../drivers/net/ethernet/mtk/mtk_sgmii.c | 135 + .../files/drivers/net/phy/mtk/mt753x/Kconfig | 2 + .../files/drivers/net/phy/mtk/mt753x/Makefile | 11 + .../files/drivers/net/phy/mtk/mt753x/mt7530.c | 644 +++ .../files/drivers/net/phy/mtk/mt753x/mt7530.h | 13 + .../files/drivers/net/phy/mtk/mt753x/mt7531.c | 1058 +++++ .../files/drivers/net/phy/mtk/mt753x/mt7531.h | 13 + .../files/drivers/net/phy/mtk/mt753x/mt753x.h | 224 + .../net/phy/mtk/mt753x/mt753x_common.c | 90 + .../drivers/net/phy/mtk/mt753x/mt753x_mdio.c | 861 ++++ .../drivers/net/phy/mtk/mt753x/mt753x_nl.c | 381 ++ .../drivers/net/phy/mtk/mt753x/mt753x_nl.h | 43 + .../drivers/net/phy/mtk/mt753x/mt753x_regs.h | 345 ++ .../net/phy/mtk/mt753x/mt753x_swconfig.c | 517 +++ .../net/phy/mtk/mt753x/mt753x_swconfig.h | 29 + .../drivers/net/phy/mtk/mt753x/mt753x_vlan.c | 193 + .../drivers/net/phy/mtk/mt753x/mt753x_vlan.h | 40 + .../linux/ramips/files/include/net/ra_nat.h | 558 +++ ...ipv6-fix-pskb-expand-head-limitation.patch | 22 + ...t-for-virtual-interface-acceleration.patch | 127 + ...k_eth_soc-add-mtk-dsa-tag-rx-offload.patch | 176 + .../980-add-mtk-vendor-ethernet-drivers.patch | 41 + 40 files changed, 20670 insertions(+) create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/Kconfig create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/Makefile create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_path.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/Makefile create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_debugfs.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_nf_hook.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_stag.c create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/nf_hnat_mtk.h create mode 100644 target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_sgmii.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Kconfig create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Makefile create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_common.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_mdio.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_regs.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.h create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.c create mode 100644 target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.h create mode 100644 target/linux/ramips/files/include/net/ra_nat.h create mode 100644 target/linux/ramips/patches-5.4/901-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch create mode 100644 target/linux/ramips/patches-5.4/902-mtkhnat-add-support-for-virtual-interface-acceleration.patch create mode 100644 target/linux/ramips/patches-5.4/903-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch create mode 100644 target/linux/ramips/patches-5.4/980-add-mtk-vendor-ethernet-drivers.patch diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/Kconfig b/target/linux/ramips/files/drivers/net/ethernet/mtk/Kconfig new file mode 100644 index 000000000..e33e3de40 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_VENDOR_RAW_MEDIATEK + bool "MediaTek ethernet driver" + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 + ---help--- + If you have a Mediatek SoC with ethernet, say Y. + +if NET_VENDOR_RAW_MEDIATEK + +config NET_MEDIATEK_SOC_GE + tristate "MediaTek SoC Gigabit Ethernet support" + select PHYLINK + ---help--- + This driver supports the gigabit ethernet MACs in the + MediaTek SoC family. + +config MEDIATEK_NETSYS_V2 + tristate "MediaTek Ethernet NETSYS V2 support" + depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC_GE + ---help--- + This options enable MTK Ethernet NETSYS V2 support + +config NET_MEDIATEK_HNAT + tristate "MediaTek HW NAT support" + depends on NET_MEDIATEK_SOC_GE && NF_CONNTRACK && IP_NF_NAT + ---help--- + This driver supports the hardward Network Address Translation + in the MediaTek MT2701/MT7622/MT7629/MT7621 chipset family. + +endif #NET_VENDOR_RAW_MEDIATEK diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/Makefile b/target/linux/ramips/files/drivers/net/ethernet/mtk/Makefile new file mode 100644 index 000000000..d932067c2 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_MEDIATEK_SOC_GE) += mtk_eth.o +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o +obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/ diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.c new file mode 100644 index 000000000..50f31af53 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.c @@ -0,0 +1,1640 @@ +/* + * Copyright (C) 2018 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_eth_soc.h" +#include "mtk_eth_dbg.h" +#include "mtk_eth_reset.h" + +u32 hw_lro_agg_num_cnt[MTK_HW_LRO_RING_NUM][MTK_HW_LRO_MAX_AGG_CNT + 1]; +u32 hw_lro_agg_size_cnt[MTK_HW_LRO_RING_NUM][16]; +u32 hw_lro_tot_agg_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_tot_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_agg_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_age_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_seq_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_timestamp_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 hw_lro_norule_flush_cnt[MTK_HW_LRO_RING_NUM]; +u32 mtk_hwlro_stats_ebl; +static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb; +typedef int (*mtk_lro_dbg_func) (int par); + +struct mtk_eth_debug { + struct dentry *root; +}; + +struct mtk_eth *g_eth; + +struct mtk_eth_debug eth_debug; + +void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val) +{ + mutex_lock(ð->mii_bus->mdio_lock); + + _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + _mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff); + _mtk_mdio_write(eth, 0x1f, 0x10, val >> 16); + + mutex_unlock(ð->mii_bus->mdio_lock); +} + +u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg) +{ + u16 high, low; + + mutex_lock(ð->mii_bus->mdio_lock); + + _mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf); + high = _mtk_mdio_read(eth, 0x1f, 0x10); + + mutex_unlock(ð->mii_bus->mdio_lock); + + return (high << 16) | (low & 0xffff); +} + +void mtk_switch_w32(struct mtk_eth *eth, u32 val, unsigned reg) +{ + mtk_w32(eth, val, reg + 0x10000); +} +EXPORT_SYMBOL(mtk_switch_w32); + +u32 mtk_switch_r32(struct mtk_eth *eth, unsigned reg) +{ + return mtk_r32(eth, reg + 0x10000); +} +EXPORT_SYMBOL(mtk_switch_r32); + +static int mtketh_debug_show(struct seq_file *m, void *private) +{ + struct mtk_eth *eth = m->private; + struct mtk_mac *mac = 0; + int i = 0; + + for (i = 0 ; i < MTK_MAX_DEVS ; i++) { + if (!eth->mac[i] || + of_phy_is_fixed_link(eth->mac[i]->of_node)) + continue; + mac = eth->mac[i]; + } + return 0; +} + +static int mtketh_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, mtketh_debug_show, inode->i_private); +} + +static const struct file_operations mtketh_debug_fops = { + .open = mtketh_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int mtketh_mt7530sw_debug_show(struct seq_file *m, void *private) +{ + struct mtk_eth *eth = m->private; + u32 offset, data; + int i; + struct mt7530_ranges { + u32 start; + u32 end; + } ranges[] = { + {0x0, 0xac}, + {0x1000, 0x10e0}, + {0x1100, 0x1140}, + {0x1200, 0x1240}, + {0x1300, 0x1340}, + {0x1400, 0x1440}, + {0x1500, 0x1540}, + {0x1600, 0x1640}, + {0x1800, 0x1848}, + {0x1900, 0x1948}, + {0x1a00, 0x1a48}, + {0x1b00, 0x1b48}, + {0x1c00, 0x1c48}, + {0x1d00, 0x1d48}, + {0x1e00, 0x1e48}, + {0x1f60, 0x1ffc}, + {0x2000, 0x212c}, + {0x2200, 0x222c}, + {0x2300, 0x232c}, + {0x2400, 0x242c}, + {0x2500, 0x252c}, + {0x2600, 0x262c}, + {0x3000, 0x3014}, + {0x30c0, 0x30f8}, + {0x3100, 0x3114}, + {0x3200, 0x3214}, + {0x3300, 0x3314}, + {0x3400, 0x3414}, + {0x3500, 0x3514}, + {0x3600, 0x3614}, + {0x4000, 0x40d4}, + {0x4100, 0x41d4}, + {0x4200, 0x42d4}, + {0x4300, 0x43d4}, + {0x4400, 0x44d4}, + {0x4500, 0x45d4}, + {0x4600, 0x46d4}, + {0x4f00, 0x461c}, + {0x7000, 0x7038}, + {0x7120, 0x7124}, + {0x7800, 0x7804}, + {0x7810, 0x7810}, + {0x7830, 0x7830}, + {0x7a00, 0x7a7c}, + {0x7b00, 0x7b04}, + {0x7e00, 0x7e04}, + {0x7ffc, 0x7ffc}, + }; + + if (!mt7530_exist(eth)) + return -EOPNOTSUPP; + + if ((!eth->mac[0] || !of_phy_is_fixed_link(eth->mac[0]->of_node)) && + (!eth->mac[1] || !of_phy_is_fixed_link(eth->mac[1]->of_node))) { + seq_puts(m, "no switch found\n"); + return 0; + } + + for (i = 0 ; i < ARRAY_SIZE(ranges) ; i++) { + for (offset = ranges[i].start; + offset <= ranges[i].end; offset += 4) { + data = mt7530_mdio_r32(eth, offset); + seq_printf(m, "mt7530 switch reg=0x%08x, data=0x%08x\n", + offset, data); + } + } + + return 0; +} + +static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file) +{ + return single_open(file, mtketh_mt7530sw_debug_show, inode->i_private); +} + +static const struct file_operations mtketh_debug_mt7530sw_fops = { + .open = mtketh_debug_mt7530sw_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t mtketh_mt7530sw_debugfs_write(struct file *file, + const char __user *ptr, + size_t len, loff_t *off) +{ + struct mtk_eth *eth = file->private_data; + char buf[32], *token, *p = buf; + u32 reg, value, phy; + int ret; + + if (!mt7530_exist(eth)) + return -EOPNOTSUPP; + + if (*off != 0) + return 0; + + if (len > sizeof(buf) - 1) + len = sizeof(buf) - 1; + + ret = strncpy_from_user(buf, ptr, len); + if (ret < 0) + return ret; + buf[len] = '\0'; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)&phy)) + return -EINVAL; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)®)) + return -EINVAL; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)&value)) + return -EINVAL; + + pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__, + 0x1f, reg, value); + mt7530_mdio_w32(eth, reg, value); + pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__, + 0x1f, reg, mt7530_mdio_r32(eth, reg)); + + return len; +} + +static ssize_t mtketh_debugfs_write(struct file *file, const char __user *ptr, + size_t len, loff_t *off) +{ + struct mtk_eth *eth = file->private_data; + char buf[32], *token, *p = buf; + u32 reg, value, phy; + int ret; + + if (*off != 0) + return 0; + + if (len > sizeof(buf) - 1) + len = sizeof(buf) - 1; + + ret = strncpy_from_user(buf, ptr, len); + if (ret < 0) + return ret; + buf[len] = '\0'; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)&phy)) + return -EINVAL; + + token = strsep(&p, " "); + + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)®)) + return -EINVAL; + + token = strsep(&p, " "); + + if (!token) + return -EINVAL; + if (kstrtoul(token, 16, (unsigned long *)&value)) + return -EINVAL; + + pr_info("%s:phy=%d, reg=0x%x, val=0x%x\n", __func__, + phy, reg, value); + + _mtk_mdio_write(eth, phy, reg, value); + + pr_info("%s:phy=%d, reg=0x%x, val=0x%x confirm..\n", __func__, + phy, reg, _mtk_mdio_read(eth, phy, reg)); + + return len; +} + +static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr, + size_t len, loff_t *off) +{ + struct mtk_eth *eth = file->private_data; + + atomic_inc(&force); + schedule_work(ð->pending_work); + return len; +} + +static const struct file_operations fops_reg_w = { + .owner = THIS_MODULE, + .open = simple_open, + .write = mtketh_debugfs_write, + .llseek = noop_llseek, +}; + +static const struct file_operations fops_eth_reset = { + .owner = THIS_MODULE, + .open = simple_open, + .write = mtketh_debugfs_reset, + .llseek = noop_llseek, +}; + +static const struct file_operations fops_mt7530sw_reg_w = { + .owner = THIS_MODULE, + .open = simple_open, + .write = mtketh_mt7530sw_debugfs_write, + .llseek = noop_llseek, +}; + +void mtketh_debugfs_exit(struct mtk_eth *eth) +{ + debugfs_remove_recursive(eth_debug.root); +} + +int mtketh_debugfs_init(struct mtk_eth *eth) +{ + int ret = 0; + + eth_debug.root = debugfs_create_dir("mtketh", NULL); + if (!eth_debug.root) { + dev_notice(eth->dev, "%s:err at %d\n", __func__, __LINE__); + ret = -ENOMEM; + } + + debugfs_create_file("phy_regs", S_IRUGO, + eth_debug.root, eth, &mtketh_debug_fops); + debugfs_create_file("phy_reg_w", S_IFREG | S_IWUSR, + eth_debug.root, eth, &fops_reg_w); + debugfs_create_file("reset", S_IFREG | S_IWUSR, + eth_debug.root, eth, &fops_eth_reset); + if (mt7530_exist(eth)) { + debugfs_create_file("mt7530sw_regs", S_IRUGO, + eth_debug.root, eth, + &mtketh_debug_mt7530sw_fops); + debugfs_create_file("mt7530sw_reg_w", S_IFREG | S_IWUSR, + eth_debug.root, eth, + &fops_mt7530sw_reg_w); + } + return ret; +} + +void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register, + u32 *read_data) +{ + if (mt7530_exist(eth) && phy_addr == 31) + *read_data = mt7530_mdio_r32(eth, phy_register); + + else + *read_data = _mtk_mdio_read(eth, phy_addr, phy_register); +} + +void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register, + u32 write_data) +{ + if (mt7530_exist(eth) && phy_addr == 31) + mt7530_mdio_w32(eth, phy_register, write_data); + + else + _mtk_mdio_write(eth, phy_addr, phy_register, write_data); +} + +static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data) +{ + mtk_cl45_ind_read(eth, port, devad, reg, data); +} + +static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data) +{ + mtk_cl45_ind_write(eth, port, devad, reg, data); +} + +int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_mii_ioctl_data mii; + struct mtk_esw_reg reg; + u16 val; + + switch (cmd) { + case MTKETH_MII_READ: + if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii))) + goto err_copy; + mii_mgr_read_combine(eth, mii.phy_id, mii.reg_num, + &mii.val_out); + if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii))) + goto err_copy; + + return 0; + case MTKETH_MII_WRITE: + if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii))) + goto err_copy; + mii_mgr_write_combine(eth, mii.phy_id, mii.reg_num, + mii.val_in); + return 0; + case MTKETH_MII_READ_CL45: + if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii))) + goto err_copy; + mii_mgr_read_cl45(eth, + mdio_phy_id_prtad(mii.phy_id), + mdio_phy_id_devad(mii.phy_id), + mii.reg_num, + &val); + mii.val_out = val; + if (copy_to_user(ifr->ifr_data, &mii, sizeof(mii))) + goto err_copy; + + return 0; + case MTKETH_MII_WRITE_CL45: + if (copy_from_user(&mii, ifr->ifr_data, sizeof(mii))) + goto err_copy; + val = mii.val_in; + mii_mgr_write_cl45(eth, + mdio_phy_id_prtad(mii.phy_id), + mdio_phy_id_devad(mii.phy_id), + mii.reg_num, + val); + return 0; + case MTKETH_ESW_REG_READ: + if (!mt7530_exist(eth)) + return -EOPNOTSUPP; + if (copy_from_user(®, ifr->ifr_data, sizeof(reg))) + goto err_copy; + if (reg.off > REG_ESW_MAX) + return -EINVAL; + reg.val = mtk_switch_r32(eth, reg.off); + + if (copy_to_user(ifr->ifr_data, ®, sizeof(reg))) + goto err_copy; + + return 0; + case MTKETH_ESW_REG_WRITE: + if (!mt7530_exist(eth)) + return -EOPNOTSUPP; + if (copy_from_user(®, ifr->ifr_data, sizeof(reg))) + goto err_copy; + if (reg.off > REG_ESW_MAX) + return -EINVAL; + mtk_switch_w32(eth, reg.val, reg.off); + + return 0; + default: + break; + } + + return -EOPNOTSUPP; +err_copy: + return -EFAULT; +} + +int esw_cnt_read(struct seq_file *seq, void *v) +{ + unsigned int pkt_cnt = 0; + int i = 0; + struct mtk_eth *eth = g_eth; + unsigned int mib_base = MTK_GDM1_TX_GBCNT; + + seq_puts(seq, "\n <>\n"); + seq_puts(seq, " |\n"); + seq_puts(seq, "+-----------------------------------------------+\n"); + seq_puts(seq, "| <> |\n"); + seq_puts(seq, "+-----------------------------------------------+\n"); + seq_puts(seq, " |\n"); + seq_puts(seq, "+-----------------------------------------------+\n"); + seq_puts(seq, "| <> |\n"); + seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n", + mtk_r32(eth, mib_base)); + seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", + mtk_r32(eth, mib_base+0x08)); + seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n", + mtk_r32(eth, mib_base+0x10)); + seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", + mtk_r32(eth, mib_base+0x14)); + seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", + mtk_r32(eth, mib_base+0x18)); + seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", + mtk_r32(eth, mib_base+0x1C)); + seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n", + mtk_r32(eth, mib_base+0x20)); + seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", + mtk_r32(eth, mib_base+0x24)); + seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n", + mtk_r32(eth, mib_base+0x28)); + seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n", + mtk_r32(eth, mib_base+0x2C)); + seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n", + mtk_r32(eth, mib_base+0x30)); + seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", + mtk_r32(eth, mib_base+0x38)); + seq_puts(seq, "| |\n"); + seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n", + mtk_r32(eth, mib_base+0x40)); + seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", + mtk_r32(eth, mib_base+0x48)); + seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n", + mtk_r32(eth, mib_base+0x50)); + seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", + mtk_r32(eth, mib_base+0x54)); + seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", + mtk_r32(eth, mib_base+0x58)); + seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", + mtk_r32(eth, mib_base+0x5C)); + seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n", + mtk_r32(eth, mib_base+0x60)); + seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", + mtk_r32(eth, mib_base+0x64)); + seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", + mtk_r32(eth, mib_base+0x68)); + seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", + mtk_r32(eth, mib_base+0x6C)); + seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n", + mtk_r32(eth, mib_base+0x70)); + seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", + mtk_r32(eth, mib_base+0x78)); + seq_puts(seq, "+-----------------------------------------------+\n"); + + if (!mt7530_exist(eth)) + return 0; + +#define DUMP_EACH_PORT(base) \ + do { \ + for (i = 0; i < 7; i++) { \ + pkt_cnt = mt7530_mdio_r32(eth, (base) + (i * 0x100));\ + seq_printf(seq, "%8u ", pkt_cnt); \ + } \ + seq_puts(seq, "\n"); \ + } while (0) + + seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n", + "Port0", "Port1", "Port2", "Port3", "Port4", "Port5", + "Port6"); + seq_puts(seq, "Tx Drop Packet :"); + DUMP_EACH_PORT(0x4000); + seq_puts(seq, "Tx CRC Error :"); + DUMP_EACH_PORT(0x4004); + seq_puts(seq, "Tx Unicast Packet :"); + DUMP_EACH_PORT(0x4008); + seq_puts(seq, "Tx Multicast Packet :"); + DUMP_EACH_PORT(0x400C); + seq_puts(seq, "Tx Broadcast Packet :"); + DUMP_EACH_PORT(0x4010); + seq_puts(seq, "Tx Collision Event :"); + DUMP_EACH_PORT(0x4014); + seq_puts(seq, "Tx Pause Packet :"); + DUMP_EACH_PORT(0x402C); + seq_puts(seq, "Rx Drop Packet :"); + DUMP_EACH_PORT(0x4060); + seq_puts(seq, "Rx Filtering Packet :"); + DUMP_EACH_PORT(0x4064); + seq_puts(seq, "Rx Unicast Packet :"); + DUMP_EACH_PORT(0x4068); + seq_puts(seq, "Rx Multicast Packet :"); + DUMP_EACH_PORT(0x406C); + seq_puts(seq, "Rx Broadcast Packet :"); + DUMP_EACH_PORT(0x4070); + seq_puts(seq, "Rx Alignment Error :"); + DUMP_EACH_PORT(0x4074); + seq_puts(seq, "Rx CRC Error :"); + DUMP_EACH_PORT(0x4078); + seq_puts(seq, "Rx Undersize Error :"); + DUMP_EACH_PORT(0x407C); + seq_puts(seq, "Rx Fragment Error :"); + DUMP_EACH_PORT(0x4080); + seq_puts(seq, "Rx Oversize Error :"); + DUMP_EACH_PORT(0x4084); + seq_puts(seq, "Rx Jabber Error :"); + DUMP_EACH_PORT(0x4088); + seq_puts(seq, "Rx Pause Packet :"); + DUMP_EACH_PORT(0x408C); + mt7530_mdio_w32(eth, 0x4fe0, 0xf0); + mt7530_mdio_w32(eth, 0x4fe0, 0x800000f0); + + seq_puts(seq, "\n"); + + return 0; +} + +static int switch_count_open(struct inode *inode, struct file *file) +{ + return single_open(file, esw_cnt_read, 0); +} + +static const struct file_operations switch_count_fops = { + .owner = THIS_MODULE, + .open = switch_count_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring; + +int tx_ring_read(struct seq_file *seq, void *v) +{ + struct mtk_tx_ring *ring = &g_eth->tx_ring; + struct mtk_tx_dma *tx_ring; + int i = 0; + + tx_ring = + kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL); + if (!tx_ring) { + seq_puts(seq, " allocate temp tx_ring fail.\n"); + return 0; + } + + for (i = 0; i < MTK_DMA_SIZE; i++) + tx_ring[i] = ring->dma[i]; + + seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count)); + seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma)); + seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma)); + for (i = 0; i < MTK_DMA_SIZE; i++) { + dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring); + + seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp, + *(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2, + *(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + seq_printf(seq, " %08x %08x %08x %08x", + *(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6, + *(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8); +#endif + seq_printf(seq, "\n"); + } + + kfree(tx_ring); + return 0; +} + +static int tx_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, tx_ring_read, NULL); +} + +static const struct file_operations tx_ring_fops = { + .owner = THIS_MODULE, + .open = tx_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +int hwtx_ring_read(struct seq_file *seq, void *v) +{ + struct mtk_eth *eth = g_eth; + struct mtk_tx_dma *hwtx_ring; + int i = 0; + + hwtx_ring = + kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL); + if (!hwtx_ring) { + seq_puts(seq, " allocate temp hwtx_ring fail.\n"); + return 0; + } + + for (i = 0; i < MTK_DMA_SIZE; i++) + hwtx_ring[i] = eth->scratch_ring[i]; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + dma_addr_t addr = eth->phy_scratch_ring + i * sizeof(*hwtx_ring); + + seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr, + *(int *)&hwtx_ring[i].txd1, *(int *)&hwtx_ring[i].txd2, + *(int *)&hwtx_ring[i].txd3, *(int *)&hwtx_ring[i].txd4); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + seq_printf(seq, " %08x %08x %08x %08x", + *(int *)&hwtx_ring[i].txd5, *(int *)&hwtx_ring[i].txd6, + *(int *)&hwtx_ring[i].txd7, *(int *)&hwtx_ring[i].txd8); +#endif + seq_printf(seq, "\n"); + } + + kfree(hwtx_ring); + return 0; +} + +static int hwtx_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, hwtx_ring_read, NULL); +} + +static const struct file_operations hwtx_ring_fops = { + .owner = THIS_MODULE, + .open = hwtx_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +int rx_ring_read(struct seq_file *seq, void *v) +{ + struct mtk_rx_ring *ring = &g_eth->rx_ring[0]; + struct mtk_rx_dma *rx_ring; + + int i = 0; + + rx_ring = + kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL); + if (!rx_ring) { + seq_puts(seq, " allocate temp rx_ring fail.\n"); + return 0; + } + + for (i = 0; i < MTK_DMA_SIZE; i++) + rx_ring[i] = ring->dma[i]; + + seq_printf(seq, "next to read: %d\n", + NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE)); + for (i = 0; i < MTK_DMA_SIZE; i++) { + seq_printf(seq, "%d: %08x %08x %08x %08x", i, + *(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2, + *(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + seq_printf(seq, " %08x %08x %08x %08x", + *(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6, + *(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8); +#endif + seq_printf(seq, "\n"); + } + + kfree(rx_ring); + return 0; +} + +static int rx_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, rx_ring_read, NULL); +} + +static const struct file_operations rx_ring_fops = { + .owner = THIS_MODULE, + .open = rx_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static inline u32 mtk_dbg_r32(u32 reg) +{ + void __iomem *virt_reg; + u32 val; + + virt_reg = ioremap(reg, 32); + val = __raw_readl(virt_reg); + iounmap(virt_reg); + + return val; +} + +int dbg_regs_read(struct seq_file *seq, void *v) +{ + struct mtk_eth *eth = g_eth; + + seq_puts(seq, " <>\n"); + + seq_printf(seq, "| FE_INT_STA : %08x |\n", + mtk_r32(eth, MTK_FE_INT_STATUS)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + seq_printf(seq, "| FE_INT_STA2 : %08x |\n", + mtk_r32(eth, MTK_FE_INT_STATUS2)); + + seq_printf(seq, "| PSE_FQFC_CFG : %08x |\n", + mtk_r32(eth, MTK_PSE_FQFC_CFG)); + seq_printf(seq, "| PSE_IQ_STA1 : %08x |\n", + mtk_r32(eth, MTK_PSE_IQ_STA(0))); + seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n", + mtk_r32(eth, MTK_PSE_IQ_STA(1))); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n", + mtk_r32(eth, MTK_PSE_IQ_STA(2))); + seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n", + mtk_r32(eth, MTK_PSE_IQ_STA(3))); + seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n", + mtk_r32(eth, MTK_PSE_IQ_STA(4))); + } + + seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n", + mtk_r32(eth, MTK_PSE_OQ_STA(0))); + seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n", + mtk_r32(eth, MTK_PSE_OQ_STA(1))); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n", + mtk_r32(eth, MTK_PSE_OQ_STA(2))); + seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n", + mtk_r32(eth, MTK_PSE_OQ_STA(3))); + seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n", + mtk_r32(eth, MTK_PSE_OQ_STA(4))); + } + + seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n", + mtk_r32(eth, MTK_PRX_CRX_IDX0)); + seq_printf(seq, "| PDMA_DRX_IDX : %08x |\n", + mtk_r32(eth, MTK_PRX_DRX_IDX0)); + seq_printf(seq, "| QDMA_CTX_IDX : %08x |\n", + mtk_r32(eth, MTK_QTX_CTX_PTR)); + seq_printf(seq, "| QDMA_DTX_IDX : %08x |\n", + mtk_r32(eth, MTK_QTX_DTX_PTR)); + seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n", + mtk_r32(eth, MTK_QDMA_FQ_CNT)); + seq_printf(seq, "| FE_PSE_FREE : %08x |\n", + mtk_r32(eth, MTK_FE_PSE_FREE)); + seq_printf(seq, "| FE_DROP_FQ : %08x |\n", + mtk_r32(eth, MTK_FE_DROP_FQ)); + seq_printf(seq, "| FE_DROP_FC : %08x |\n", + mtk_r32(eth, MTK_FE_DROP_FC)); + seq_printf(seq, "| FE_DROP_PPE : %08x |\n", + mtk_r32(eth, MTK_FE_DROP_PPE)); + seq_printf(seq, "| GDM1_IG_CTRL : %08x |\n", + mtk_r32(eth, MTK_GDMA_FWD_CFG(0))); + seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n", + mtk_r32(eth, MTK_GDMA_FWD_CFG(1))); + seq_printf(seq, "| MAC_P1_MCR : %08x |\n", + mtk_r32(eth, MTK_MAC_MCR(0))); + seq_printf(seq, "| MAC_P2_MCR : %08x |\n", + mtk_r32(eth, MTK_MAC_MCR(1))); + seq_printf(seq, "| MAC_P1_FSM : %08x |\n", + mtk_r32(eth, MTK_MAC_FSM(0))); + seq_printf(seq, "| MAC_P2_FSM : %08x |\n", + mtk_r32(eth, MTK_MAC_FSM(1))); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + seq_printf(seq, "| FE_CDM1_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_CDM1_FSM)); + seq_printf(seq, "| FE_CDM2_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_CDM2_FSM)); + seq_printf(seq, "| FE_CDM3_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_CDM3_FSM)); + seq_printf(seq, "| FE_CDM4_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_CDM4_FSM)); + seq_printf(seq, "| FE_GDM1_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_GDM1_FSM)); + seq_printf(seq, "| FE_GDM2_FSM : %08x |\n", + mtk_r32(eth, MTK_FE_GDM2_FSM)); + seq_printf(seq, "| SGMII_EFUSE : %08x |\n", + mtk_dbg_r32(MTK_SGMII_EFUSE)); + seq_printf(seq, "| SGMII0_RX_CNT : %08x |\n", + mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(0))); + seq_printf(seq, "| SGMII1_RX_CNT : %08x |\n", + mtk_dbg_r32(MTK_SGMII_FALSE_CARRIER_CNT(1))); + seq_printf(seq, "| WED_RTQM_GLO : %08x |\n", + mtk_dbg_r32(MTK_WED_RTQM_GLO_CFG)); + } + + mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2); + + return 0; +} + +static int dbg_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, dbg_regs_read, 0); +} + +static const struct file_operations dbg_regs_fops = { + .owner = THIS_MODULE, + .open = dbg_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd) +{ + u32 idx, agg_cnt, agg_size; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + idx = ring_no - 4; + agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6); +#else + idx = ring_no - 1; + agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2); +#endif + + agg_size = RX_DMA_GET_PLEN0(rxd->rxd2); + + hw_lro_agg_size_cnt[idx][agg_size / 5000]++; + hw_lro_agg_num_cnt[idx][agg_cnt]++; + hw_lro_tot_flush_cnt[idx]++; + hw_lro_tot_agg_cnt[idx] += agg_cnt; +} + +void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd) +{ + u32 idx, flush_reason; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + idx = ring_no - 4; + flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6); +#else + idx = ring_no - 1; + flush_reason = RX_DMA_GET_REV(rxd->rxd2); +#endif + + if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH) + hw_lro_agg_flush_cnt[idx]++; + else if ((flush_reason & 0x7) == MTK_HW_LRO_AGE_FLUSH) + hw_lro_age_flush_cnt[idx]++; + else if ((flush_reason & 0x7) == MTK_HW_LRO_NOT_IN_SEQ_FLUSH) + hw_lro_seq_flush_cnt[idx]++; + else if ((flush_reason & 0x7) == MTK_HW_LRO_TIMESTAMP_FLUSH) + hw_lro_timestamp_flush_cnt[idx]++; + else if ((flush_reason & 0x7) == MTK_HW_LRO_NON_RULE_FLUSH) + hw_lro_norule_flush_cnt[idx]++; +} + +ssize_t hw_lro_stats_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt)); + memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt)); + memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt)); + memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt)); + memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt)); + memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt)); + memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt)); + memset(hw_lro_timestamp_flush_cnt, 0, + sizeof(hw_lro_timestamp_flush_cnt)); + memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt)); + + pr_info("clear hw lro cnt table\n"); + + return count; +} + +int hw_lro_stats_read_v1(struct seq_file *seq, void *v) +{ + int i; + + seq_puts(seq, "HW LRO statistic dump:\n"); + + /* Agg number count */ + seq_puts(seq, "Cnt: RING1 | RING2 | RING3 | Total\n"); + for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) { + seq_printf(seq, " %d : %d %d %d %d\n", + i, hw_lro_agg_num_cnt[0][i], + hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i], + hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] + + hw_lro_agg_num_cnt[2][i]); + } + + /* Total agg count */ + seq_puts(seq, "Total agg: RING1 | RING2 | RING3 | Total\n"); + seq_printf(seq, " %d %d %d %d\n", + hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], + hw_lro_tot_agg_cnt[2], + hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] + + hw_lro_tot_agg_cnt[2]); + + /* Total flush count */ + seq_puts(seq, "Total flush: RING1 | RING2 | RING3 | Total\n"); + seq_printf(seq, " %d %d %d %d\n", + hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], + hw_lro_tot_flush_cnt[2], + hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] + + hw_lro_tot_flush_cnt[2]); + + /* Avg agg count */ + seq_puts(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n"); + seq_printf(seq, " %d %d %d %d\n", + (hw_lro_tot_flush_cnt[0]) ? + hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0, + (hw_lro_tot_flush_cnt[1]) ? + hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0, + (hw_lro_tot_flush_cnt[2]) ? + hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0, + (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] + + hw_lro_tot_flush_cnt[2]) ? + ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] + + hw_lro_tot_agg_cnt[2]) / (hw_lro_tot_flush_cnt[0] + + hw_lro_tot_flush_cnt[1] + hw_lro_tot_flush_cnt[2])) : 0); + + /* Statistics of aggregation size counts */ + seq_puts(seq, "HW LRO flush pkt len:\n"); + seq_puts(seq, " Length | RING1 | RING2 | RING3 | Total\n"); + for (i = 0; i < 15; i++) { + seq_printf(seq, "%d~%d: %d %d %d %d\n", i * 5000, + (i + 1) * 5000, hw_lro_agg_size_cnt[0][i], + hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i], + hw_lro_agg_size_cnt[0][i] + + hw_lro_agg_size_cnt[1][i] + + hw_lro_agg_size_cnt[2][i]); + } + + seq_puts(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n"); + seq_printf(seq, "AGG timeout: %d %d %d %d\n", + hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], + hw_lro_agg_flush_cnt[2], + (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] + + hw_lro_agg_flush_cnt[2])); + + seq_printf(seq, "AGE timeout: %d %d %d %d\n", + hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], + hw_lro_age_flush_cnt[2], + (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] + + hw_lro_age_flush_cnt[2])); + + seq_printf(seq, "Not in-sequence: %d %d %d %d\n", + hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], + hw_lro_seq_flush_cnt[2], + (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] + + hw_lro_seq_flush_cnt[2])); + + seq_printf(seq, "Timestamp: %d %d %d %d\n", + hw_lro_timestamp_flush_cnt[0], + hw_lro_timestamp_flush_cnt[1], + hw_lro_timestamp_flush_cnt[2], + (hw_lro_timestamp_flush_cnt[0] + + hw_lro_timestamp_flush_cnt[1] + + hw_lro_timestamp_flush_cnt[2])); + + seq_printf(seq, "No LRO rule: %d %d %d %d\n", + hw_lro_norule_flush_cnt[0], + hw_lro_norule_flush_cnt[1], + hw_lro_norule_flush_cnt[2], + (hw_lro_norule_flush_cnt[0] + + hw_lro_norule_flush_cnt[1] + + hw_lro_norule_flush_cnt[2])); + + return 0; +} + +int hw_lro_stats_read_v2(struct seq_file *seq, void *v) +{ + int i; + + seq_puts(seq, "HW LRO statistic dump:\n"); + + /* Agg number count */ + seq_puts(seq, "Cnt: RING4 | RING5 | RING6 | RING7 Total\n"); + for (i = 0; i <= MTK_HW_LRO_MAX_AGG_CNT; i++) { + seq_printf(seq, + " %d : %d %d %d %d %d\n", + i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i], + hw_lro_agg_num_cnt[2][i], hw_lro_agg_num_cnt[3][i], + hw_lro_agg_num_cnt[0][i] + hw_lro_agg_num_cnt[1][i] + + hw_lro_agg_num_cnt[2][i] + hw_lro_agg_num_cnt[3][i]); + } + + /* Total agg count */ + seq_puts(seq, "Total agg: RING4 | RING5 | RING6 | RING7 Total\n"); + seq_printf(seq, " %d %d %d %d %d\n", + hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], + hw_lro_tot_agg_cnt[2], hw_lro_tot_agg_cnt[3], + hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] + + hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]); + + /* Total flush count */ + seq_puts(seq, "Total flush: RING4 | RING5 | RING6 | RING7 Total\n"); + seq_printf(seq, " %d %d %d %d %d\n", + hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], + hw_lro_tot_flush_cnt[2], hw_lro_tot_flush_cnt[3], + hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] + + hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]); + + /* Avg agg count */ + seq_puts(seq, "Avg agg: RING4 | RING5 | RING6 | RING7 Total\n"); + seq_printf(seq, " %d %d %d %d %d\n", + (hw_lro_tot_flush_cnt[0]) ? + hw_lro_tot_agg_cnt[0] / hw_lro_tot_flush_cnt[0] : 0, + (hw_lro_tot_flush_cnt[1]) ? + hw_lro_tot_agg_cnt[1] / hw_lro_tot_flush_cnt[1] : 0, + (hw_lro_tot_flush_cnt[2]) ? + hw_lro_tot_agg_cnt[2] / hw_lro_tot_flush_cnt[2] : 0, + (hw_lro_tot_flush_cnt[3]) ? + hw_lro_tot_agg_cnt[3] / hw_lro_tot_flush_cnt[3] : 0, + (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] + + hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3]) ? + ((hw_lro_tot_agg_cnt[0] + hw_lro_tot_agg_cnt[1] + + hw_lro_tot_agg_cnt[2] + hw_lro_tot_agg_cnt[3]) / + (hw_lro_tot_flush_cnt[0] + hw_lro_tot_flush_cnt[1] + + hw_lro_tot_flush_cnt[2] + hw_lro_tot_flush_cnt[3])) : 0); + + /* Statistics of aggregation size counts */ + seq_puts(seq, "HW LRO flush pkt len:\n"); + seq_puts(seq, " Length | RING4 | RING5 | RING6 | RING7 Total\n"); + for (i = 0; i < 15; i++) { + seq_printf(seq, "%d~%d: %d %d %d %d %d\n", + i * 5000, (i + 1) * 5000, + hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i], + hw_lro_agg_size_cnt[2][i], hw_lro_agg_size_cnt[3][i], + hw_lro_agg_size_cnt[0][i] + + hw_lro_agg_size_cnt[1][i] + + hw_lro_agg_size_cnt[2][i] + + hw_lro_agg_size_cnt[3][i]); + } + + seq_puts(seq, "Flush reason: RING4 | RING5 | RING6 | RING7 Total\n"); + seq_printf(seq, "AGG timeout: %d %d %d %d %d\n", + hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], + hw_lro_agg_flush_cnt[2], hw_lro_agg_flush_cnt[3], + (hw_lro_agg_flush_cnt[0] + hw_lro_agg_flush_cnt[1] + + hw_lro_agg_flush_cnt[2] + hw_lro_agg_flush_cnt[3])); + + seq_printf(seq, "AGE timeout: %d %d %d %d %d\n", + hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], + hw_lro_age_flush_cnt[2], hw_lro_age_flush_cnt[3], + (hw_lro_age_flush_cnt[0] + hw_lro_age_flush_cnt[1] + + hw_lro_age_flush_cnt[2] + hw_lro_age_flush_cnt[3])); + + seq_printf(seq, "Not in-sequence: %d %d %d %d %d\n", + hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], + hw_lro_seq_flush_cnt[2], hw_lro_seq_flush_cnt[3], + (hw_lro_seq_flush_cnt[0] + hw_lro_seq_flush_cnt[1] + + hw_lro_seq_flush_cnt[2] + hw_lro_seq_flush_cnt[3])); + + seq_printf(seq, "Timestamp: %d %d %d %d %d\n", + hw_lro_timestamp_flush_cnt[0], + hw_lro_timestamp_flush_cnt[1], + hw_lro_timestamp_flush_cnt[2], + hw_lro_timestamp_flush_cnt[3], + (hw_lro_timestamp_flush_cnt[0] + + hw_lro_timestamp_flush_cnt[1] + + hw_lro_timestamp_flush_cnt[2] + + hw_lro_timestamp_flush_cnt[3])); + + seq_printf(seq, "No LRO rule: %d %d %d %d %d\n", + hw_lro_norule_flush_cnt[0], + hw_lro_norule_flush_cnt[1], + hw_lro_norule_flush_cnt[2], + hw_lro_norule_flush_cnt[3], + (hw_lro_norule_flush_cnt[0] + + hw_lro_norule_flush_cnt[1] + + hw_lro_norule_flush_cnt[2] + + hw_lro_norule_flush_cnt[3])); + + return 0; +} + +int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v) +{ + struct mtk_eth *eth = g_eth; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + hw_lro_stats_read_v2(seq, v); + else + hw_lro_stats_read_v1(seq, v); + + return 0; +} + +static int hw_lro_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, hw_lro_stats_read_wrapper, NULL); +} + +static const struct file_operations hw_lro_stats_fops = { + .owner = THIS_MODULE, + .open = hw_lro_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hw_lro_stats_write, + .release = single_release +}; + +int hwlro_agg_cnt_ctrl(int cnt) +{ + int i; + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) + SET_PDMA_RXRING_MAX_AGG_CNT(g_eth, i, cnt); + + return 0; +} + +int hwlro_agg_time_ctrl(int time) +{ + int i; + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) + SET_PDMA_RXRING_AGG_TIME(g_eth, i, time); + + return 0; +} + +int hwlro_age_time_ctrl(int time) +{ + int i; + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) + SET_PDMA_RXRING_AGE_TIME(g_eth, i, time); + + return 0; +} + +int hwlro_threshold_ctrl(int bandwidth) +{ + SET_PDMA_LRO_BW_THRESHOLD(g_eth, bandwidth); + + return 0; +} + +int hwlro_ring_enable_ctrl(int enable) +{ + int i; + + pr_info("[%s] %s HW LRO rings\n", __func__, (enable) ? "Enable" : "Disable"); + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) + SET_PDMA_RXRING_VALID(g_eth, i, enable); + + return 0; +} + +int hwlro_stats_enable_ctrl(int enable) +{ + pr_info("[%s] %s HW LRO statistics\n", __func__, (enable) ? "Enable" : "Disable"); + mtk_hwlro_stats_ebl = enable; + + return 0; +} + +static const mtk_lro_dbg_func lro_dbg_func[] = { + [0] = hwlro_agg_cnt_ctrl, + [1] = hwlro_agg_time_ctrl, + [2] = hwlro_age_time_ctrl, + [3] = hwlro_threshold_ctrl, + [4] = hwlro_ring_enable_ctrl, + [5] = hwlro_stats_enable_ctrl, +}; + +ssize_t hw_lro_auto_tlb_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[32]; + char *p_buf; + char *p_token = NULL; + char *p_delimiter = " \t"; + long x = 0, y = 0; + u32 len = count; + int ret; + + if (len >= sizeof(buf)) { + pr_info("Input handling fail!\n"); + return -1; + } + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + p_buf = buf; + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + x = 0; + else + ret = kstrtol(p_token, 10, &x); + + p_token = strsep(&p_buf, "\t\n "); + if (p_token) + ret = kstrtol(p_token, 10, &y); + + if (lro_dbg_func[x] && (ARRAY_SIZE(lro_dbg_func) > x)) + (*lro_dbg_func[x]) (y); + + return count; +} + +void hw_lro_auto_tlb_dump_v1(struct seq_file *seq, u32 index) +{ + int i; + struct mtk_lro_alt_v1 alt; + __be32 addr; + u32 tlb_info[9]; + u32 dw_len, cnt, priority; + u32 entry; + + if (index > 4) + index = index - 1; + entry = (index * 9) + 1; + + /* read valid entries of the auto-learn table */ + mtk_w32(g_eth, entry, MTK_FE_ALT_CF8); + + for (i = 0; i < 9; i++) + tlb_info[i] = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC); + + memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v1)); + + dw_len = alt.alt_info7.dw_len; + cnt = alt.alt_info6.cnt; + + if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE) + priority = cnt; /* packet count */ + else + priority = dw_len; /* byte count */ + + /* dump valid entries of the auto-learn table */ + if (index >= 4) + seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index); + else + seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index); + + if (alt.alt_info8.ipv4) { + addr = htonl(alt.alt_info1.sip0); + seq_printf(seq, "SIP = %pI4 (IPv4)\n", &addr); + } else { + seq_printf(seq, "SIP = %08X:%08X:%08X:%08X (IPv6)\n", + alt.alt_info4.sip3, alt.alt_info3.sip2, + alt.alt_info2.sip1, alt.alt_info1.sip0); + } + + seq_printf(seq, "DIP_ID = %d\n", alt.alt_info8.dip_id); + seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n", + alt.alt_info0.stp, alt.alt_info0.dtp); + seq_printf(seq, "VLAN_VID_VLD = %d\n", alt.alt_info6.vlan_vid_vld); + seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d\n", + (alt.alt_info5.vlan_vid0 & 0xfff), + ((alt.alt_info5.vlan_vid0 >> 12) & 0xfff), + ((alt.alt_info6.vlan_vid1 << 8) | + ((alt.alt_info5.vlan_vid0 >> 24) & 0xfff)), + ((alt.alt_info6.vlan_vid1 >> 4) & 0xfff)); + seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt); + seq_printf(seq, "PRIORITY = %d\n", priority); +} + +void hw_lro_auto_tlb_dump_v2(struct seq_file *seq, u32 index) +{ + int i; + struct mtk_lro_alt_v2 alt; + u32 score = 0, ipv4 = 0; + u32 ipv6[4] = { 0 }; + u32 tlb_info[12]; + + /* read valid entries of the auto-learn table */ + mtk_w32(g_eth, index << MTK_LRO_ALT_INDEX_OFFSET, MTK_LRO_ALT_DBG); + + for (i = 0; i < 11; i++) + tlb_info[i] = mtk_r32(g_eth, MTK_LRO_ALT_DBG_DATA); + + memcpy(&alt, tlb_info, sizeof(struct mtk_lro_alt_v2)); + + if (mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW0) & MTK_LRO_ALT_PKT_CNT_MODE) + score = 1; /* packet count */ + else + score = 0; /* byte count */ + + /* dump valid entries of the auto-learn table */ + if (alt.alt_info0.valid) { + if (index < 5) + seq_printf(seq, + "\n===== TABLE Entry: %d (onging) =====\n", + index); + else + seq_printf(seq, + "\n===== TABLE Entry: %d (candidate) =====\n", + index); + + if (alt.alt_info1.v4_valid) { + ipv4 = (alt.alt_info4.sip0_h << 23) | + alt.alt_info5.sip0_l; + seq_printf(seq, "SIP = 0x%x: (IPv4)\n", ipv4); + + ipv4 = (alt.alt_info8.dip0_h << 23) | + alt.alt_info9.dip0_l; + seq_printf(seq, "DIP = 0x%x: (IPv4)\n", ipv4); + } else if (alt.alt_info1.v6_valid) { + ipv6[3] = (alt.alt_info1.sip3_h << 23) | + (alt.alt_info2.sip3_l << 9); + ipv6[2] = (alt.alt_info2.sip2_h << 23) | + (alt.alt_info3.sip2_l << 9); + ipv6[1] = (alt.alt_info3.sip1_h << 23) | + (alt.alt_info4.sip1_l << 9); + ipv6[0] = (alt.alt_info4.sip0_h << 23) | + (alt.alt_info5.sip0_l << 9); + seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n", + ipv6[3], ipv6[2], ipv6[1], ipv6[0]); + + ipv6[3] = (alt.alt_info5.dip3_h << 23) | + (alt.alt_info6.dip3_l << 9); + ipv6[2] = (alt.alt_info6.dip2_h << 23) | + (alt.alt_info7.dip2_l << 9); + ipv6[1] = (alt.alt_info7.dip1_h << 23) | + (alt.alt_info8.dip1_l << 9); + ipv6[0] = (alt.alt_info8.dip0_h << 23) | + (alt.alt_info9.dip0_l << 9); + seq_printf(seq, "DIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n", + ipv6[3], ipv6[2], ipv6[1], ipv6[0]); + } + + seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n", + (alt.alt_info9.sp_h << 7) | (alt.alt_info10.sp_l), + alt.alt_info10.dp); + } +} + +int hw_lro_auto_tlb_read(struct seq_file *seq, void *v) +{ + int i; + u32 reg_val; + u32 reg_op1, reg_op2, reg_op3, reg_op4; + u32 agg_cnt, agg_time, age_time; + + seq_puts(seq, "Usage of /proc/mtketh/hw_lro_auto_tlb:\n"); + seq_puts(seq, "echo [function] [setting] > /proc/mtketh/hw_lro_auto_tlb\n"); + seq_puts(seq, "Functions:\n"); + seq_puts(seq, "[0] = hwlro_agg_cnt_ctrl\n"); + seq_puts(seq, "[1] = hwlro_agg_time_ctrl\n"); + seq_puts(seq, "[2] = hwlro_age_time_ctrl\n"); + seq_puts(seq, "[3] = hwlro_threshold_ctrl\n"); + seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n"); + seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n"); + + if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) { + for (i = 1; i <= 8; i++) + hw_lro_auto_tlb_dump_v2(seq, i); + } else { + /* Read valid entries of the auto-learn table */ + mtk_w32(g_eth, 0, MTK_FE_ALT_CF8); + reg_val = mtk_r32(g_eth, MTK_FE_ALT_SEQ_CFC); + + seq_printf(seq, + "HW LRO Auto-learn Table: (MTK_FE_ALT_SEQ_CFC=0x%x)\n", + reg_val); + + for (i = 7; i >= 0; i--) { + if (reg_val & (1 << i)) + hw_lro_auto_tlb_dump_v1(seq, i); + } + } + + /* Read the agg_time/age_time/agg_cnt of LRO rings */ + seq_puts(seq, "\nHW LRO Ring Settings\n"); + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) { + reg_op1 = mtk_r32(g_eth, MTK_LRO_CTRL_DW1_CFG(i)); + reg_op2 = mtk_r32(g_eth, MTK_LRO_CTRL_DW2_CFG(i)); + reg_op3 = mtk_r32(g_eth, MTK_LRO_CTRL_DW3_CFG(i)); + reg_op4 = mtk_r32(g_eth, MTK_PDMA_LRO_CTRL_DW2); + + agg_cnt = + ((reg_op3 & 0x3) << 6) | + ((reg_op2 >> MTK_LRO_RING_AGG_CNT_L_OFFSET) & 0x3f); + agg_time = (reg_op2 >> MTK_LRO_RING_AGG_TIME_OFFSET) & 0xffff; + age_time = + ((reg_op2 & 0x3f) << 10) | + ((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff); + seq_printf(seq, + "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n", + (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i, + agg_cnt, agg_time, age_time, reg_op4); + } + + seq_puts(seq, "\n"); + + return 0; +} + +static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file) +{ + return single_open(file, hw_lro_auto_tlb_read, NULL); +} + +static const struct file_operations hw_lro_auto_tlb_fops = { + .owner = THIS_MODULE, + .open = hw_lro_auto_tlb_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hw_lro_auto_tlb_write, + .release = single_release +}; + +int reset_event_read(struct seq_file *seq, void *v) +{ + struct mtk_eth *eth = g_eth; + struct mtk_reset_event reset_event = eth->reset_event; + + seq_printf(seq, "[Event] [Count]\n"); + seq_printf(seq, " FQ Empty: %d\n", + reset_event.count[MTK_EVENT_FQ_EMPTY]); + seq_printf(seq, " TSO Fail: %d\n", + reset_event.count[MTK_EVENT_TSO_FAIL]); + seq_printf(seq, " TSO Illegal: %d\n", + reset_event.count[MTK_EVENT_TSO_ILLEGAL]); + seq_printf(seq, " TSO Align: %d\n", + reset_event.count[MTK_EVENT_TSO_ALIGN]); + seq_printf(seq, " RFIFO OV: %d\n", + reset_event.count[MTK_EVENT_RFIFO_OV]); + seq_printf(seq, " RFIFO UF: %d\n", + reset_event.count[MTK_EVENT_RFIFO_UF]); + seq_printf(seq, " Force: %d\n", + reset_event.count[MTK_EVENT_FORCE]); + seq_printf(seq, "----------------------------\n"); + seq_printf(seq, " Warm Cnt: %d\n", + reset_event.count[MTK_EVENT_WARM_CNT]); + seq_printf(seq, " Cold Cnt: %d\n", + reset_event.count[MTK_EVENT_COLD_CNT]); + seq_printf(seq, " Total Cnt: %d\n", + reset_event.count[MTK_EVENT_TOTAL_CNT]); + + return 0; +} + +static int reset_event_open(struct inode *inode, struct file *file) +{ + return single_open(file, reset_event_read, 0); +} + +ssize_t reset_event_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + struct mtk_eth *eth = g_eth; + struct mtk_reset_event *reset_event = ð->reset_event; + + memset(reset_event, 0, sizeof(struct mtk_reset_event)); + pr_info("MTK reset event counter is cleared !\n"); + + return count; +} + +static const struct file_operations reset_event_fops = { + .owner = THIS_MODULE, + .open = reset_event_open, + .read = seq_read, + .llseek = seq_lseek, + .write = reset_event_write, + .release = single_release +}; + +struct proc_dir_entry *proc_reg_dir; +static struct proc_dir_entry *proc_esw_cnt, *proc_dbg_regs, *proc_reset_event; + +int debug_proc_init(struct mtk_eth *eth) +{ + g_eth = eth; + + if (!proc_reg_dir) + proc_reg_dir = proc_mkdir(PROCREG_DIR, NULL); + + proc_tx_ring = + proc_create(PROCREG_TXRING, 0, proc_reg_dir, &tx_ring_fops); + if (!proc_tx_ring) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_TXRING); + + proc_hwtx_ring = + proc_create(PROCREG_HWTXRING, 0, proc_reg_dir, &hwtx_ring_fops); + if (!proc_hwtx_ring) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_HWTXRING); + + proc_rx_ring = + proc_create(PROCREG_RXRING, 0, proc_reg_dir, &rx_ring_fops); + if (!proc_rx_ring) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RXRING); + + proc_esw_cnt = + proc_create(PROCREG_ESW_CNT, 0, proc_reg_dir, &switch_count_fops); + if (!proc_esw_cnt) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT); + + proc_dbg_regs = + proc_create(PROCREG_DBG_REGS, 0, proc_reg_dir, &dbg_regs_fops); + if (!proc_dbg_regs) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_DBG_REGS); + + if (g_eth->hwlro) { + proc_hw_lro_stats = + proc_create(PROCREG_HW_LRO_STATS, 0, proc_reg_dir, + &hw_lro_stats_fops); + if (!proc_hw_lro_stats) + pr_info("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS); + + proc_hw_lro_auto_tlb = + proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, proc_reg_dir, + &hw_lro_auto_tlb_fops); + if (!proc_hw_lro_auto_tlb) + pr_info("!! FAIL to create %s PROC !!\n", + PROCREG_HW_LRO_AUTO_TLB); + } + + proc_reset_event = + proc_create(PROCREG_RESET_EVENT, 0, proc_reg_dir, &reset_event_fops); + if (!proc_reset_event) + pr_notice("!! FAIL to create %s PROC !!\n", PROCREG_RESET_EVENT); + + return 0; +} + +void debug_proc_exit(void) +{ + if (proc_tx_ring) + remove_proc_entry(PROCREG_TXRING, proc_reg_dir); + if (proc_hwtx_ring) + remove_proc_entry(PROCREG_HWTXRING, proc_reg_dir); + if (proc_rx_ring) + remove_proc_entry(PROCREG_RXRING, proc_reg_dir); + + if (proc_esw_cnt) + remove_proc_entry(PROCREG_ESW_CNT, proc_reg_dir); + + if (proc_reg_dir) + remove_proc_entry(PROCREG_DIR, 0); + + if (proc_dbg_regs) + remove_proc_entry(PROCREG_DBG_REGS, proc_reg_dir); + + if (g_eth->hwlro) { + if (proc_hw_lro_stats) + remove_proc_entry(PROCREG_HW_LRO_STATS, proc_reg_dir); + + if (proc_hw_lro_auto_tlb) + remove_proc_entry(PROCREG_HW_LRO_AUTO_TLB, proc_reg_dir); + } + + if (proc_reset_event) + remove_proc_entry(PROCREG_RESET_EVENT, proc_reg_dir); +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.h new file mode 100644 index 000000000..43f483826 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_dbg.h @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2018 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#ifndef MTK_ETH_DBG_H +#define MTK_ETH_DBG_H + +/* Debug Purpose Register */ +#define MTK_PSE_FQFC_CFG 0x100 +#define MTK_FE_CDM1_FSM 0x220 +#define MTK_FE_CDM2_FSM 0x224 +#define MTK_FE_CDM3_FSM 0x238 +#define MTK_FE_CDM4_FSM 0x298 +#define MTK_FE_GDM1_FSM 0x228 +#define MTK_FE_GDM2_FSM 0x22C +#define MTK_FE_PSE_FREE 0x240 +#define MTK_FE_DROP_FQ 0x244 +#define MTK_FE_DROP_FC 0x248 +#define MTK_FE_DROP_PPE 0x24C +#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) +#define MTK_SGMII_FALSE_CARRIER_CNT(x) (0x10060028 + ((x) * 0x10000)) +#define MTK_SGMII_EFUSE 0x11D008C8 +#define MTK_WED_RTQM_GLO_CFG 0x15010B00 + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_PSE_IQ_STA(x) (0x180 + (x) * 0x4) +#define MTK_PSE_OQ_STA(x) (0x1A0 + (x) * 0x4) +#else +#define MTK_PSE_IQ_STA(x) (0x110 + (x) * 0x4) +#define MTK_PSE_OQ_STA(x) (0x118 + (x) * 0x4) +#endif + +#define MTKETH_MII_READ 0x89F3 +#define MTKETH_MII_WRITE 0x89F4 +#define MTKETH_ESW_REG_READ 0x89F1 +#define MTKETH_ESW_REG_WRITE 0x89F2 +#define MTKETH_MII_READ_CL45 0x89FC +#define MTKETH_MII_WRITE_CL45 0x89FD +#define REG_ESW_MAX 0xFC + +#define PROCREG_ESW_CNT "esw_cnt" +#define PROCREG_TXRING "tx_ring" +#define PROCREG_HWTXRING "hwtx_ring" +#define PROCREG_RXRING "rx_ring" +#define PROCREG_DIR "mtketh" +#define PROCREG_DBG_REGS "dbg_regs" +#define PROCREG_HW_LRO_STATS "hw_lro_stats" +#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb" +#define PROCREG_RESET_EVENT "reset_event" + +/* HW LRO flush reason */ +#define MTK_HW_LRO_AGG_FLUSH (1) +#define MTK_HW_LRO_AGE_FLUSH (2) +#define MTK_HW_LRO_NOT_IN_SEQ_FLUSH (3) +#define MTK_HW_LRO_TIMESTAMP_FLUSH (4) +#define MTK_HW_LRO_NON_RULE_FLUSH (5) + +#define SET_PDMA_RXRING_MAX_AGG_CNT(eth, x, y) \ +{ \ + u32 reg_val1 = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \ + u32 reg_val2 = mtk_r32(eth, MTK_LRO_CTRL_DW3_CFG(x)); \ + reg_val1 &= ~MTK_LRO_RING_AGG_CNT_L_MASK; \ + reg_val2 &= ~MTK_LRO_RING_AGG_CNT_H_MASK; \ + reg_val1 |= ((y) & 0x3f) << MTK_LRO_RING_AGG_CNT_L_OFFSET; \ + reg_val2 |= (((y) >> 6) & 0x03) << \ + MTK_LRO_RING_AGG_CNT_H_OFFSET; \ + mtk_w32(eth, reg_val1, MTK_LRO_CTRL_DW2_CFG(x)); \ + mtk_w32(eth, reg_val2, MTK_LRO_CTRL_DW3_CFG(x)); \ +} + +#define SET_PDMA_RXRING_AGG_TIME(eth, x, y) \ +{ \ + u32 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \ + reg_val &= ~MTK_LRO_RING_AGG_TIME_MASK; \ + reg_val |= ((y) & 0xffff) << MTK_LRO_RING_AGG_TIME_OFFSET; \ + mtk_w32(eth, reg_val, MTK_LRO_CTRL_DW2_CFG(x)); \ +} + +#define SET_PDMA_RXRING_AGE_TIME(eth, x, y) \ +{ \ + u32 reg_val1 = mtk_r32(eth, MTK_LRO_CTRL_DW1_CFG(x)); \ + u32 reg_val2 = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \ + reg_val1 &= ~MTK_LRO_RING_AGE_TIME_L_MASK; \ + reg_val2 &= ~MTK_LRO_RING_AGE_TIME_H_MASK; \ + reg_val1 |= ((y) & 0x3ff) << MTK_LRO_RING_AGE_TIME_L_OFFSET; \ + reg_val2 |= (((y) >> 10) & 0x03f) << \ + MTK_LRO_RING_AGE_TIME_H_OFFSET; \ + mtk_w32(eth, reg_val1, MTK_LRO_CTRL_DW1_CFG(x)); \ + mtk_w32(eth, reg_val2, MTK_LRO_CTRL_DW2_CFG(x)); \ +} + +#define SET_PDMA_LRO_BW_THRESHOLD(eth, x) \ +{ \ + u32 reg_val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW2); \ + reg_val = (x); \ + mtk_w32(eth, reg_val, MTK_PDMA_LRO_CTRL_DW2); \ +} + +#define SET_PDMA_RXRING_VALID(eth, x, y) \ +{ \ + u32 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \ + reg_val &= ~(0x1 << MTK_RX_PORT_VALID_OFFSET); \ + reg_val |= ((y) & 0x1) << MTK_RX_PORT_VALID_OFFSET; \ + mtk_w32(eth, reg_val, MTK_LRO_CTRL_DW2_CFG(x)); \ +} + +struct mtk_lro_alt_v1_info0 { + u32 dtp : 16; + u32 stp : 16; +}; + +struct mtk_lro_alt_v1_info1 { + u32 sip0 : 32; +}; + +struct mtk_lro_alt_v1_info2 { + u32 sip1 : 32; +}; + +struct mtk_lro_alt_v1_info3 { + u32 sip2 : 32; +}; + +struct mtk_lro_alt_v1_info4 { + u32 sip3 : 32; +}; + +struct mtk_lro_alt_v1_info5 { + u32 vlan_vid0 : 32; +}; + +struct mtk_lro_alt_v1_info6 { + u32 vlan_vid1 : 16; + u32 vlan_vid_vld : 4; + u32 cnt : 12; +}; + +struct mtk_lro_alt_v1_info7 { + u32 dw_len : 32; +}; + +struct mtk_lro_alt_v1_info8 { + u32 dip_id : 2; + u32 ipv6 : 1; + u32 ipv4 : 1; + u32 resv : 27; + u32 valid : 1; +}; + +struct mtk_lro_alt_v1 { + struct mtk_lro_alt_v1_info0 alt_info0; + struct mtk_lro_alt_v1_info1 alt_info1; + struct mtk_lro_alt_v1_info2 alt_info2; + struct mtk_lro_alt_v1_info3 alt_info3; + struct mtk_lro_alt_v1_info4 alt_info4; + struct mtk_lro_alt_v1_info5 alt_info5; + struct mtk_lro_alt_v1_info6 alt_info6; + struct mtk_lro_alt_v1_info7 alt_info7; + struct mtk_lro_alt_v1_info8 alt_info8; +}; + +struct mtk_lro_alt_v2_info0 { + u32 v2_id_h:3; + u32 v1_id:12; + u32 v0_id:12; + u32 v3_valid:1; + u32 v2_valid:1; + u32 v1_valid:1; + u32 v0_valid:1; + u32 valid:1; +}; + +struct mtk_lro_alt_v2_info1 { + u32 sip3_h:9; + u32 v6_valid:1; + u32 v4_valid:1; + u32 v3_id:12; + u32 v2_id_l:9; +}; + +struct mtk_lro_alt_v2_info2 { + u32 sip2_h:9; + u32 sip3_l:23; +}; +struct mtk_lro_alt_v2_info3 { + u32 sip1_h:9; + u32 sip2_l:23; +}; +struct mtk_lro_alt_v2_info4 { + u32 sip0_h:9; + u32 sip1_l:23; +}; +struct mtk_lro_alt_v2_info5 { + u32 dip3_h:9; + u32 sip0_l:23; +}; +struct mtk_lro_alt_v2_info6 { + u32 dip2_h:9; + u32 dip3_l:23; +}; +struct mtk_lro_alt_v2_info7 { + u32 dip1_h:9; + u32 dip2_l:23; +}; +struct mtk_lro_alt_v2_info8 { + u32 dip0_h:9; + u32 dip1_l:23; +}; +struct mtk_lro_alt_v2_info9 { + u32 sp_h:9; + u32 dip0_l:23; +}; +struct mtk_lro_alt_v2_info10 { + u32 resv:9; + u32 dp:16; + u32 sp_l:7; +}; + +struct mtk_lro_alt_v2 { + struct mtk_lro_alt_v2_info0 alt_info0; + struct mtk_lro_alt_v2_info1 alt_info1; + struct mtk_lro_alt_v2_info2 alt_info2; + struct mtk_lro_alt_v2_info3 alt_info3; + struct mtk_lro_alt_v2_info4 alt_info4; + struct mtk_lro_alt_v2_info5 alt_info5; + struct mtk_lro_alt_v2_info6 alt_info6; + struct mtk_lro_alt_v2_info7 alt_info7; + struct mtk_lro_alt_v2_info8 alt_info8; + struct mtk_lro_alt_v2_info9 alt_info9; + struct mtk_lro_alt_v2_info10 alt_info10; +}; + +struct mtk_esw_reg { + unsigned int off; + unsigned int val; +}; + +struct mtk_mii_ioctl_data { + u16 phy_id; + u16 reg_num; + unsigned int val_in; + unsigned int val_out; +}; + +#if defined(CONFIG_NET_DSA_MT7530) || defined(CONFIG_MT753X_GSW) +static inline bool mt7530_exist(struct mtk_eth *eth) +{ + return true; +} +#else +static inline bool mt7530_exist(struct mtk_eth *eth) +{ + return false; +} +#endif + +extern u32 _mtk_mdio_read(struct mtk_eth *eth, u16 phy_addr, u16 phy_reg); +extern u32 _mtk_mdio_write(struct mtk_eth *eth, u16 phy_addr, + u16 phy_register, u16 write_data); + +extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data); +extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data); +extern atomic_t force; + +int debug_proc_init(struct mtk_eth *eth); +void debug_proc_exit(void); + +int mtketh_debugfs_init(struct mtk_eth *eth); +void mtketh_debugfs_exit(struct mtk_eth *eth); +int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd); +void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd); + +#endif /* MTK_ETH_DBG_H */ diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_path.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_path.c new file mode 100644 index 000000000..5234a34b0 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_path.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* A library for configuring path from GMAC/GDM to target PHY + * + * Author: Sean Wang + * + */ + +#include +#include + +#include "mtk_eth_soc.h" + +struct mtk_eth_muxc { + const char *name; + int cap_bit; + int (*set_path)(struct mtk_eth *eth, int path); +}; + +static const char *mtk_eth_path_name(int path) +{ + switch (path) { + case MTK_ETH_PATH_GMAC1_RGMII: + return "gmac1_rgmii"; + case MTK_ETH_PATH_GMAC1_TRGMII: + return "gmac1_trgmii"; + case MTK_ETH_PATH_GMAC1_SGMII: + return "gmac1_sgmii"; + case MTK_ETH_PATH_GMAC2_RGMII: + return "gmac2_rgmii"; + case MTK_ETH_PATH_GMAC2_SGMII: + return "gmac2_sgmii"; + case MTK_ETH_PATH_GMAC2_GEPHY: + return "gmac2_gephy"; + case MTK_ETH_PATH_GDM1_ESW: + return "gdm1_esw"; + default: + return "unknown path"; + } +} + +static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path) +{ + bool updated = true; + u32 val, mask, set; + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + mask = ~(u32)MTK_MUX_TO_ESW; + set = 0; + break; + case MTK_ETH_PATH_GDM1_ESW: + mask = ~(u32)MTK_MUX_TO_ESW; + set = MTK_MUX_TO_ESW; + break; + default: + updated = false; + break; + }; + + if (updated) { + val = mtk_r32(eth, MTK_MAC_MISC); + val = (val & mask) | set; + mtk_w32(eth, val, MTK_MAC_MISC); + } + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + switch (path) { + case MTK_ETH_PATH_GMAC2_GEPHY: + val = ~(u32)GEPHY_MAC_SEL; + break; + default: + updated = false; + break; + } + + if (updated) + regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path) +{ + unsigned int val = 0,mask=0,reg=0; + bool updated = true; + + switch (path) { + case MTK_ETH_PATH_GMAC2_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) { + reg = USB_PHY_SWITCH_REG; + val = SGMII_QPHY_SEL; + mask = QPHY_SEL_MASK; + } else { + reg = INFRA_MISC2; + val = CO_QPHY_SEL; + mask = val; + } + break; + default: + updated = false; + break; + } + + if (updated) + regmap_update_bits(eth->infra, reg, mask, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + spin_lock(ð->syscfg0_lock); + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + val = SYSCFG0_SGMII_GMAC1; + break; + case MTK_ETH_PATH_GMAC2_SGMII: + val = SYSCFG0_SGMII_GMAC2; + break; + case MTK_ETH_PATH_GMAC1_RGMII: + case MTK_ETH_PATH_GMAC2_RGMII: + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= SYSCFG0_SGMII_MASK; + + if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) || + (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2)) + val = 0; + else + updated = false; + break; + default: + updated = false; + break; + }; + + if (updated) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + + spin_unlock(ð->syscfg0_lock); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + spin_lock(ð->syscfg0_lock); + + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + val |= SYSCFG0_SGMII_GMAC1_V2; + break; + case MTK_ETH_PATH_GMAC2_GEPHY: + val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2; + break; + case MTK_ETH_PATH_GMAC2_SGMII: + val |= SYSCFG0_SGMII_GMAC2_V2; + break; + default: + updated = false; + }; + + if (updated) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + + spin_unlock(ð->syscfg0_lock); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static const struct mtk_eth_muxc mtk_eth_muxc[] = { + { + .name = "mux_gdm1_to_gmac1_esw", + .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW, + .set_path = set_mux_gdm1_to_gmac1_esw, + }, { + .name = "mux_gmac2_gmac0_to_gephy", + .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY, + .set_path = set_mux_gmac2_gmac0_to_gephy, + }, { + .name = "mux_u3_gmac2_to_qphy", + .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY, + .set_path = set_mux_u3_gmac2_to_qphy, + }, { + .name = "mux_gmac1_gmac2_to_sgmii_rgmii", + .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII, + .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, + }, { + .name = "mux_gmac12_to_gephy_sgmii", + .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII, + .set_path = set_mux_gmac12_to_gephy_sgmii, + }, +}; + +static int mtk_eth_mux_setup(struct mtk_eth *eth, int path) +{ + int i, err = 0; + + if (!MTK_HAS_CAPS(eth->soc->caps, path)) { + dev_err(eth->dev, "path %s isn't support on the SoC\n", + mtk_eth_path_name(path)); + return -EINVAL; + } + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX)) + return 0; + + /* Setup MUX in path fabric */ + for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) { + if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) { + err = mtk_eth_muxc[i].set_path(eth, path); + if (err) + goto out; + } else { + dev_dbg(eth->dev, "mux %s isn't present on the SoC\n", + mtk_eth_muxc[i].name); + } + } + +out: + return err; +} + +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) +{ + int err, path; + + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : + MTK_ETH_PATH_GMAC2_SGMII; + + /* Setup proper MUXes along the path */ + err = mtk_eth_mux_setup(eth, path); + if (err) + return err; + + return 0; +} + +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) +{ + int err, path = 0; + + if (mac_id == 1) + path = MTK_ETH_PATH_GMAC2_GEPHY; + + if (!path) + return -EINVAL; + + /* Setup proper MUXes along the path */ + err = mtk_eth_mux_setup(eth, path); + if (err) + return err; + + return 0; +} + +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) +{ + int err, path; + + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII : + MTK_ETH_PATH_GMAC2_RGMII; + + /* Setup proper MUXes along the path */ + err = mtk_eth_mux_setup(eth, path); + if (err) + return err; + + return 0; +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.c new file mode 100644 index 000000000..a8aeccc11 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.c @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022 MediaTek Inc. + * Author: Henry Yen + */ + +#include +#include "mtk_eth_soc.h" +#include "mtk_eth_dbg.h" +#include "mtk_eth_reset.h" + +char* mtk_reset_event_name[32] = { + [MTK_EVENT_FORCE] = "Force", + [MTK_EVENT_WARM_CNT] = "Warm", + [MTK_EVENT_COLD_CNT] = "Cold", + [MTK_EVENT_TOTAL_CNT] = "Total", + [MTK_EVENT_FQ_EMPTY] = "FQ Empty", + [MTK_EVENT_TSO_FAIL] = "TSO Fail", + [MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal", + [MTK_EVENT_TSO_ALIGN] = "TSO Align", + [MTK_EVENT_RFIFO_OV] = "RFIFO OV", + [MTK_EVENT_RFIFO_UF] = "RFIFO UF", +}; + +void mtk_reset_event_update(struct mtk_eth *eth, u32 id) +{ + struct mtk_reset_event *reset_event = ð->reset_event; + reset_event->count[id]++; +} + +int mtk_eth_cold_reset(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1); + else + ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff); + + return 0; +} + +int mtk_eth_warm_reset(struct mtk_eth *eth) +{ + u32 reset_bits = 0, i = 0, done = 0; + u32 val1 = 0, val2 = 0, val3 = 0; + + mdelay(100); + + reset_bits |= RSTCTRL_FE; + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, reset_bits); + + while (i < 1000) { + regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1); + if (val1 & RSTCTRL_FE) + break; + i++; + udelay(1); + } + + if (i < 1000) { + reset_bits = 0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1; + else + reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0; + + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, reset_bits); + + udelay(1); + regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2); + if (!(val2 & reset_bits)) + pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n", + __func__, val2, reset_bits); + reset_bits |= RSTCTRL_FE; + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, ~reset_bits); + + udelay(1); + regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3); + if (val3 & reset_bits) + pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n", + __func__, val3, reset_bits); + done = 1; + mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT); + } + + pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n", + __func__, val1, val2, val3); + + if (!done) + mtk_eth_cold_reset(eth); + + return 0; +} + +u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status) +{ + u32 ret = 0, val = 0; + + if ((status & MTK_FE_INT_FQ_EMPTY) || + (status & MTK_FE_INT_RFIFO_UF) || + (status & MTK_FE_INT_RFIFO_OV) || + (status & MTK_FE_INT_TSO_FAIL) || + (status & MTK_FE_INT_TSO_ALIGN) || + (status & MTK_FE_INT_TSO_ILLEGAL)) { + while (status) { + val = ffs((unsigned int)status) - 1; + mtk_reset_event_update(eth, val); + status &= ~(1 << val); + } + ret = 1; + } + + if (atomic_read(&force)) { + mtk_reset_event_update(eth, MTK_EVENT_FORCE); + ret = 1; + } + + if (ret) { + mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT); + mtk_dump_netsys_info(eth); + } + + return ret; +} + +irqreturn_t mtk_handle_fe_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + u32 status = 0, val = 0; + + status = mtk_r32(eth, MTK_FE_INT_STATUS); + pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status); + + while (status) { + val = ffs((unsigned int)status) - 1; + status &= ~(1 << val); + + if ((val == MTK_EVENT_FQ_EMPTY) || + (val == MTK_EVENT_TSO_FAIL) || + (val == MTK_EVENT_TSO_ILLEGAL) || + (val == MTK_EVENT_TSO_ALIGN) || + (val == MTK_EVENT_RFIFO_OV) || + (val == MTK_EVENT_RFIFO_UF)) + pr_info("[%s] Detect reset event: %s !\n", __func__, + mtk_reset_event_name[val]); + } + mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS); + + return IRQ_HANDLED; +} + +static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range) +{ + struct mtk_eth *eth = _eth; + u32 cur = offset; + + pr_info("\n============ %s ============\n", name); + while(cur < offset + range) { + pr_info("0x%x: %08x %08x %08x %08x\n", + cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4), + mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc)); + cur += 0x10; + } +} + +void mtk_dump_netsys_info(void *_eth) +{ + struct mtk_eth *eth = _eth; + + mtk_dump_reg(eth, "FE", 0x0, 0x500); + mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300); + mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400); + mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600); + mtk_dump_reg(eth, "PPE", 0x2200, 0x200); + mtk_dump_reg(eth, "GMAC", 0x10000, 0x300); +} + +void mtk_dma_monitor(struct timer_list *t) +{ + struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer); + static u32 timestamp = 0; + static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0; + static u32 prev_wdidx = 0; + u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0)); + u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY; + u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) && + ((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) && + ((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0); + u32 is_cdm_full = + !(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY); + u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0; + u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0; + u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) != + ((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE); + u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0; + u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0; + u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) && + ((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0x3F0000) == 0) && + ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0); + + if (cur_wdidx == prev_wdidx && is_wtx_busy && + is_oq_free && is_cdm_full) { + err_cnt1++; + if (err_cnt1 == 3) { + pr_info("WDMA CDM Hang !\n"); + pr_info("============== Time: %d ================\n", + timestamp); + pr_info("err_cnt1 = %d", err_cnt1); + pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n", + prev_wdidx, cur_wdidx); + pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n", + is_wtx_busy, is_oq_free, is_cdm_full); + pr_info("-- -- -- -- -- -- --\n"); + pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808)); + pr_info("WDMA_DTX_PTR = 0x%x\n", + mtk_r32(eth, MTK_WDMA_DTX_PTR(0))); + pr_info("WDMA_GLO_CFG = 0x%x\n", + mtk_r32(eth, MTK_WDMA_GLO_CFG(0))); + pr_info("WDMA_TX_DBG_MON0 = 0x%x\n", + mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0))); + pr_info("PSE_OQ_STA1 = 0x%x\n", + mtk_r32(eth, MTK_PSE_OQ_STA(0))); + pr_info("PSE_OQ_STA2 = 0x%x\n", + mtk_r32(eth, MTK_PSE_OQ_STA(1))); + pr_info("PSE_OQ_STA5 = 0x%x\n", + mtk_r32(eth, MTK_PSE_OQ_STA(4))); + pr_info("==============================\n"); + + if ((atomic_read(&reset_lock) == 0) && + (atomic_read(&force) == 0)){ + atomic_inc(&force); + schedule_work(ð->pending_work); + } + } + } else if (is_qfsm_hang && is_qfwd_hang) { + err_cnt2++; + if (err_cnt2 == 3) { + pr_info("QDMA Tx Hang !\n"); + pr_info("============== Time: %d ================\n", + timestamp); + pr_info("err_cnt2 = %d", err_cnt2); + pr_info("is_qfsm_hang = %d\n", is_qfsm_hang); + pr_info("is_qfwd_hang = %d\n", is_qfwd_hang); + pr_info("is_qfq_hang = %d\n", is_qfq_hang); + pr_info("-- -- -- -- -- -- --\n"); + pr_info("MTK_QDMA_FSM = 0x%x\n", + mtk_r32(eth, MTK_QDMA_FSM)); + pr_info("MTK_QDMA_FWD_CNT = 0x%x\n", + mtk_r32(eth, MTK_QDMA_FWD_CNT)); + pr_info("MTK_QDMA_FQ_CNT = 0x%x\n", + mtk_r32(eth, MTK_QDMA_FQ_CNT)); + pr_info("==============================\n"); + + if ((atomic_read(&reset_lock) == 0) && + (atomic_read(&force) == 0)){ + atomic_inc(&force); + schedule_work(ð->pending_work); + } + } + } else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) { + err_cnt3++; + if (err_cnt3 == 3) { + pr_info("ADMA Rx Hang !\n"); + pr_info("============== Time: %d ================\n", + timestamp); + pr_info("err_cnt3 = %d", err_cnt3); + pr_info("is_oq0_stuck = %d\n", is_oq0_stuck); + pr_info("is_cdm1_busy = %d\n", is_cdm1_busy); + pr_info("is_adma_busy = %d\n", is_adma_busy); + pr_info("-- -- -- -- -- -- --\n"); + pr_info("MTK_PSE_OQ_STA1 = 0x%x\n", + mtk_r32(eth, MTK_PSE_OQ_STA(0))); + pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n", + mtk_r32(eth, MTK_ADMA_RX_DBG0)); + pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n", + mtk_r32(eth, MTK_ADMA_RX_DBG1)); + pr_info("==============================\n"); + if ((atomic_read(&reset_lock) == 0) && + (atomic_read(&force) == 0)){ + atomic_inc(&force); + schedule_work(ð->pending_work); + } + } + } else { + err_cnt1 = 0; + err_cnt2 = 0; + err_cnt3 = 0; + } + + prev_wdidx = cur_wdidx; + mod_timer(ð->mtk_dma_monitor_timer, jiffies + 1 * HZ); +} + +void mtk_prepare_reset_fe(struct mtk_eth *eth) +{ + u32 i = 0, val = 0; + + /* Disable NETSYS Interrupt */ + mtk_w32(eth, 0, MTK_FE_INT_ENABLE); + mtk_w32(eth, 0, MTK_PDMA_INT_MASK); + mtk_w32(eth, 0, MTK_QDMA_INT_MASK); + + /* Disable Linux netif Tx path */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_tx_disable(eth->netdev[i]); + } + + /* Disable QDMA Tx */ + val = mtk_r32(eth, MTK_QDMA_GLO_CFG); + mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG); + + /* Power down sgmii */ + regmap_read(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val |= SGMII_PHYA_PWD; + regmap_write(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, val); + regmap_read(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val |= SGMII_PHYA_PWD; + regmap_write(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, val); + + /* Force link down GMAC */ + val = mtk_r32(eth, MTK_MAC_MCR(0)); + mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0)); + val = mtk_r32(eth, MTK_MAC_MCR(1)); + mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1)); + + /* Disable GMAC Rx */ + val = mtk_r32(eth, MTK_MAC_MCR(0)); + mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0)); + val = mtk_r32(eth, MTK_MAC_MCR(1)); + mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1)); + + /* Enable GDM drop */ + mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + + /* Disable ADMA Rx */ + val = mtk_r32(eth, MTK_PDMA_GLO_CFG); + mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG); +} + +void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id) +{ + u32 i = 0, poll_time = 5000, val; + + /* Disable KA */ + mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id)); + mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id)); + mtk_w32(eth, 0, MTK_PPE_KA(ppe_id)); + mdelay(10); + + /* Set KA timer to maximum */ + mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id)); + mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id)); + + /* Set KA tick select */ + mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id)); + mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id)); + mdelay(10); + + /* Disable scan mode */ + mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id)); + mdelay(10); + + /* Check PPE idle */ + while (i++ < poll_time) { + val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id)); + if (!(val & MTK_PPE_BUSY)) + break; + mdelay(1); + } + + if (i >= poll_time) { + pr_info("[%s] PPE keeps busy !\n", __func__); + mtk_dump_reg(eth, "FE", 0x0, 0x500); + mtk_dump_reg(eth, "PPE", 0x2200, 0x200); + } +} + +static int mtk_eth_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + switch (event) { + case MTK_WIFI_RESET_DONE: + complete(&wait_ser_done); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +struct notifier_block mtk_eth_netdevice_nb __read_mostly = { + .notifier_call = mtk_eth_netdevice_event, +}; diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.h new file mode 100644 index 000000000..9abd2f51e --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_reset.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022 MediaTek Inc. + * Author: Henry Yen + */ + +#ifndef MTK_ETH_RESET_H +#define MTK_ETH_RESET_H + +/* Frame Engine Reset FSM */ +#define MTK_FE_START_RESET 0x2000 +#define MTK_FE_RESET_DONE 0x2001 +#define MTK_WIFI_RESET_DONE 0x2002 +#define MTK_NAT_DISABLE 0x3000 +#define MTK_FE_RESET_NAT_DONE 0x4001 + +/* ADMA Rx Debug Monitor */ +#define MTK_ADMA_RX_DBG0 (PDMA_BASE + 0x238) +#define MTK_ADMA_RX_DBG1 (PDMA_BASE + 0x23C) + +/* PPE Configurations */ +#define MTK_PPE_GLO_CFG(x) (PPE_BASE(x) + 0x00) +#define MTK_PPE_TB_CFG(x) (PPE_BASE(x) + 0x1C) +#define MTK_PPE_BIND_LMT_1(x) (PPE_BASE(x) + 0x30) +#define MTK_PPE_KA(x) (PPE_BASE(x) + 0x34) +#define MTK_PPE_KA_CFG_MASK (0x3 << 12) +#define MTK_PPE_NTU_KA_MASK (0xFF << 16) +#define MTK_PPE_KA_T_MASK (0xFFFF << 0) +#define MTK_PPE_TCP_KA_MASK (0xFF << 16) +#define MTK_PPE_UDP_KA_MASK (0xFF << 24) +#define MTK_PPE_TICK_SEL_MASK (0x1 << 24) +#define MTK_PPE_SCAN_MODE_MASK (0x3 << 16) +#define MTK_PPE_BUSY BIT(31) + +enum mtk_reset_type { + MTK_TYPE_COLD_RESET = 0, + MTK_TYPE_WARM_RESET, +}; + +enum mtk_reset_event_id { + MTK_EVENT_FORCE = 0, + MTK_EVENT_WARM_CNT = 1, + MTK_EVENT_COLD_CNT = 2, + MTK_EVENT_TOTAL_CNT = 3, + MTK_EVENT_FQ_EMPTY = 8, + MTK_EVENT_TSO_FAIL = 12, + MTK_EVENT_TSO_ILLEGAL = 13, + MTK_EVENT_TSO_ALIGN = 14, + MTK_EVENT_RFIFO_OV = 18, + MTK_EVENT_RFIFO_UF = 19, +}; + +extern struct notifier_block mtk_eth_netdevice_nb __read_mostly; +extern struct completion wait_ser_done; +extern char* mtk_reset_event_name[32]; +extern atomic_t reset_lock; + +irqreturn_t mtk_handle_fe_irq(int irq, void *_eth); +u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status); +int mtk_eth_cold_reset(struct mtk_eth *eth); +int mtk_eth_warm_reset(struct mtk_eth *eth); +void mtk_reset_event_update(struct mtk_eth *eth, u32 id); +void mtk_dump_netsys_info(void *_eth); +void mtk_dma_monitor(struct timer_list *t); +void mtk_prepare_reset_fe(struct mtk_eth *eth); +void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id); + +#endif /* MTK_ETH_RESET_H */ diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.c new file mode 100644 index 000000000..b3ab3f603 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.c @@ -0,0 +1,3850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_eth_soc.h" +#include "mtk_eth_dbg.h" +#include "mtk_eth_reset.h" + +#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) +#include "mtk_hnat/nf_hnat_mtk.h" +#endif + +static int mtk_msg_level = -1; +atomic_t reset_lock = ATOMIC_INIT(0); +atomic_t force = ATOMIC_INIT(0); + +module_param_named(msg_level, mtk_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); +DECLARE_COMPLETION(wait_ser_done); + +#define MTK_ETHTOOL_STAT(x) { #x, \ + offsetof(struct mtk_hw_stats, x) / sizeof(u64) } + +/* strings used by ethtool */ +static const struct mtk_ethtool_stats { + char str[ETH_GSTRING_LEN]; + u32 offset; +} mtk_ethtool_stats[] = { + MTK_ETHTOOL_STAT(tx_bytes), + MTK_ETHTOOL_STAT(tx_packets), + MTK_ETHTOOL_STAT(tx_skip), + MTK_ETHTOOL_STAT(tx_collisions), + MTK_ETHTOOL_STAT(rx_bytes), + MTK_ETHTOOL_STAT(rx_packets), + MTK_ETHTOOL_STAT(rx_overflow), + MTK_ETHTOOL_STAT(rx_fcs_errors), + MTK_ETHTOOL_STAT(rx_short_errors), + MTK_ETHTOOL_STAT(rx_long_errors), + MTK_ETHTOOL_STAT(rx_checksum_errors), + MTK_ETHTOOL_STAT(rx_flow_control_packets), +}; + +static const char * const mtk_clks_source_name[] = { + "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", + "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", + "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", + "sgmii_ck", "eth2pll", "wocpu0","wocpu1", +}; + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) +{ + __raw_writel(val, eth->base + reg); +} + +u32 mtk_r32(struct mtk_eth *eth, unsigned reg) +{ + return __raw_readl(eth->base + reg); +} + +u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) +{ + u32 val; + + val = mtk_r32(eth, reg); + val &= ~mask; + val |= set; + mtk_w32(eth, val, reg); + return reg; +} + +static int mtk_mdio_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) + return 0; + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) + break; + cond_resched(); + } + + dev_err(eth->dev, "mdio: MDIO timeout\n"); + return -1; +} + +u32 _mtk_mdio_write(struct mtk_eth *eth, u16 phy_addr, + u16 phy_register, u16 write_data) +{ + if (mtk_mdio_busy_wait(eth)) + return -1; + + write_data &= 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | + ((phy_register & 0x1f) << PHY_IAC_REG_SHIFT) | + ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT) | write_data, + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return -1; + + return 0; +} + +u32 _mtk_mdio_read(struct mtk_eth *eth, u16 phy_addr, u16 phy_reg) +{ + u32 d; + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | + ((phy_reg & 0x1f) << PHY_IAC_REG_SHIFT) | + ((phy_addr & 0x1f) << PHY_IAC_ADDR_SHIFT), + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; + + return d; +} + +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, + int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write(eth, phy_addr, phy_reg, val); +} + +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_read(eth, phy_addr, phy_reg); +} + +u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data) +{ + mutex_lock(ð->mii_bus->mdio_lock); + + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad); + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg); + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad); + *data = _mtk_mdio_read(eth, port, MII_MMD_ADDR_DATA_REG); + + mutex_unlock(ð->mii_bus->mdio_lock); + + return 0; +} + +u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data) +{ + mutex_lock(ð->mii_bus->mdio_lock); + + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, devad); + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, reg); + _mtk_mdio_write(eth, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad); + _mtk_mdio_write(eth, port, MII_MMD_ADDR_DATA_REG, data); + + mutex_unlock(ð->mii_bus->mdio_lock); + + return 0; +} + +static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface) +{ + u32 val; + + /* Check DDR memory type. + * Currently TRGMII mode with DDR2 memory is not supported. + */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); + if (interface == PHY_INTERFACE_MODE_TRGMII && + val & SYSCFG_DRAM_TYPE_DDR2) { + dev_err(eth->dev, + "TRGMII mode with DDR2 memory is not supported!\n"); + return -EOPNOTSUPP; + } + + val = (interface == PHY_INTERFACE_MODE_TRGMII) ? + ETHSYS_TRGMII_MT7621_DDR_PLL : 0; + + regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, + ETHSYS_TRGMII_MT7621_MASK, val); + + return 0; +} + +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) +{ + u32 val; + int ret; + + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + + val = (speed == SPEED_1000) ? + INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; + mtk_w32(eth, val, INTF_MODE); + + regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, + ETHSYS_TRGMII_CLK_SEL362_5, + ETHSYS_TRGMII_CLK_SEL362_5); + + val = (speed == SPEED_1000) ? 250000000 : 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + + val = (speed == SPEED_1000) ? + RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100; + mtk_w32(eth, val, TRGMII_RCK_CTRL); + + val = (speed == SPEED_1000) ? + TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100; + mtk_w32(eth, val, TRGMII_TCK_CTRL); +} + +static void mtk_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new, sid, i; + int val, ge_mode, err=0; + + /* MT76x8 has no hardware settings between for the MAC */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + mac->interface != state->interface) { + /* Setup soc pin functions */ + switch (state->interface) { + case PHY_INTERFACE_MODE_TRGMII: + if (mac->id) + goto err_phy; + if (!MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_GMAC1_TRGMII)) + goto err_phy; + /* fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_RMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { + err = mtk_gmac_rgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + err = mtk_gmac_sgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_GMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { + err = mtk_gmac_gephy_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + default: + goto err_phy; + } + + /* Setup clock for 1st gmac */ + if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_TRGMII_MT7621_CLK)) { + if (mt7621_gmac0_rgmii_adjust(mac->hw, + state->interface)) + goto err_phy; + } else { + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); + + /* mt7623_pad_clk_setup */ + for (i = 0 ; i < NUM_TRGMII_CTRL; i++) + mtk_w32(mac->hw, + TD_DM_DRVP(8) | TD_DM_DRVN(8), + TRGMII_TD_ODT(i)); + + /* Assert/release MT7623 RXC reset */ + mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, + TRGMII_RCK_CTRL); + mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); + } + } + + ge_mode = 0; + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_REVMII: + ge_mode = 2; + break; + case PHY_INTERFACE_MODE_RMII: + if (mac->id) + goto err_phy; + ge_mode = 3; + break; + default: + break; + } + + /* put the gmac into the right mode */ + spin_lock(ð->syscfg0_lock); + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + spin_unlock(ð->syscfg0_lock); + + mac->interface = state->interface; + } + + /* SGMII */ + if (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) { + /* The path GMAC to SGMII will be enabled once the SGMIISYS is + * being setup done. + */ + spin_lock(ð->syscfg0_lock); + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, + ~(u32)SYSCFG0_SGMII_MASK); + + /* Decide how GMAC and SGMIISYS be mapped */ + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + + /* Setup SGMIISYS with the determined property */ + if (state->interface != PHY_INTERFACE_MODE_SGMII) + err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, + state); + else if (phylink_autoneg_inband(mode)) + err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); + + if (err) { + spin_unlock(ð->syscfg0_lock); + goto init_err; + } + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + spin_unlock(ð->syscfg0_lock); + } else if (phylink_autoneg_inband(mode)) { + dev_err(eth->dev, + "In-band mode not supported in non SGMII mode!\n"); + return; + } + + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | + MAC_MCR_FORCE_RX_FC); + mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + + switch (state->speed) { + case SPEED_2500: + case SPEED_1000: + mcr_new |= MAC_MCR_SPEED_1000; + break; + case SPEED_100: + mcr_new |= MAC_MCR_SPEED_100; + break; + } + if (state->duplex == DUPLEX_FULL) { + mcr_new |= MAC_MCR_FORCE_DPX; + if (state->pause & MLO_PAUSE_TX) + mcr_new |= MAC_MCR_FORCE_TX_FC; + if (state->pause & MLO_PAUSE_RX) + mcr_new |= MAC_MCR_FORCE_RX_FC; + } + + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); + + return; + +err_phy: + dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, + mac->id, phy_modes(state->interface)); + return; + +init_err: + dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, + mac->id, phy_modes(state->interface), err); +} + +static int mtk_mac_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); + + state->link = (pmsr & MAC_MSR_LINK); + state->duplex = (pmsr & MAC_MSR_DPX) >> 1; + + switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) { + case 0: + state->speed = SPEED_10; + break; + case MAC_MSR_SPEED_100: + state->speed = SPEED_100; + break; + case MAC_MSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & MAC_MSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & MAC_MSR_TX_FC) + state->pause |= MLO_PAUSE_TX; + + return 1; +} + +static void mtk_mac_an_restart(struct phylink_config *config) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + + mtk_sgmii_restart_an(mac->hw, mac->id); +} + +static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} + +static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, + struct phy_device *phy) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} + +static void mtk_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) && + phy_interface_mode_is_rgmii(state->interface)) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && + !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) && + (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)))) { + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + + switch (state->interface) { + case PHY_INTERFACE_MODE_TRGMII: + phylink_set(mask, 1000baseT_Full); + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + break; + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + phylink_set(mask, 1000baseT_Half); + /* fall through */ + case PHY_INTERFACE_MODE_SGMII: + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + /* fall through */ + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_NA: + default: + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + break; + } + + if (state->interface == PHY_INTERFACE_MODE_NA) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + } + + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); + + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); +} + +static const struct phylink_mac_ops mtk_phylink_ops = { + .validate = mtk_validate, + .mac_link_state = mtk_mac_link_state, + .mac_an_restart = mtk_mac_an_restart, + .mac_config = mtk_mac_config, + .mac_link_down = mtk_mac_link_down, + .mac_link_up = mtk_mac_link_up, +}; + +static int mtk_mdio_init(struct mtk_eth *eth) +{ + struct device_node *mii_np; + int ret; + + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); + if (!mii_np) { + dev_err(eth->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + ret = -ENODEV; + goto err_put_node; + } + + eth->mii_bus = devm_mdiobus_alloc(eth->dev); + if (!eth->mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } + + eth->mii_bus->name = "mdio"; + eth->mii_bus->read = mtk_mdio_read; + eth->mii_bus->write = mtk_mdio_write; + eth->mii_bus->priv = eth; + eth->mii_bus->parent = eth->dev; + + if(snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np) < 0) { + ret = -ENOMEM; + goto err_put_node; + } + ret = of_mdiobus_register(eth->mii_bus, mii_np); + +err_put_node: + of_node_put(mii_np); + return ret; +} + +static void mtk_mdio_cleanup(struct mtk_eth *eth) +{ + if (!eth->mii_bus) + return; + + mdiobus_unregister(eth->mii_bus); +} + +static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); +} + +static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val | mask, eth->tx_int_mask_reg); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); +} + +static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, MTK_PDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); +} + +static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, MTK_PDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); +} + +static int mtk_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + const char *macaddr = dev->dev_addr; + + if (ret) + return ret; + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + spin_lock_bh(&mac->hw->page_lock); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MT7628_SDM_MAC_ADRH); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MT7628_SDM_MAC_ADRL); + } else { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + } + spin_unlock_bh(&mac->hw->page_lock); + + return 0; +} + +void mtk_stats_update_mac(struct mtk_mac *mac) +{ + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int base = MTK_GDM1_TX_GBCNT; + u64 stats; + + base += hw_stats->reg_offset; + + u64_stats_update_begin(&hw_stats->syncp); + + hw_stats->rx_bytes += mtk_r32(mac->hw, base); + stats = mtk_r32(mac->hw, base + 0x04); + if (stats) + hw_stats->rx_bytes += (stats << 32); + hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); + hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); + hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); + hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); + hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); + hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); + hw_stats->rx_flow_control_packets += + mtk_r32(mac->hw, base + 0x24); + hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); + hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); + hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); + stats = mtk_r32(mac->hw, base + 0x34); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); + u64_stats_update_end(&hw_stats->syncp); +} + +static void mtk_stats_update(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i] || !eth->mac[i]->hw_stats) + continue; + if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { + mtk_stats_update_mac(eth->mac[i]); + spin_unlock(ð->mac[i]->hw_stats->stats_lock); + } + } +} + +static void mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int start; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock_bh(&hw_stats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock_bh(&hw_stats->stats_lock); + } + } + + do { + start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + storage->rx_packets = hw_stats->rx_packets; + storage->tx_packets = hw_stats->tx_packets; + storage->rx_bytes = hw_stats->rx_bytes; + storage->tx_bytes = hw_stats->tx_bytes; + storage->collisions = hw_stats->tx_collisions; + storage->rx_length_errors = hw_stats->rx_short_errors + + hw_stats->rx_long_errors; + storage->rx_over_errors = hw_stats->rx_overflow; + storage->rx_crc_errors = hw_stats->rx_fcs_errors; + storage->rx_errors = hw_stats->rx_checksum_errors; + storage->tx_aborted_errors = hw_stats->tx_skip; + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + + storage->tx_errors = dev->stats.tx_errors; + storage->rx_dropped = dev->stats.rx_dropped; + storage->tx_dropped = dev->stats.tx_dropped; +} + +static inline int mtk_max_frag_size(int mtu) +{ + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) + mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static inline int mtk_max_buf_size(int frag_size) +{ + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + WARN_ON(buf_size < MTK_MAX_RX_LENGTH); + + return buf_size; +} + +static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, + struct mtk_rx_dma *dma_rxd) +{ + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); + if (!(rxd->rxd2 & RX_DMA_DONE)) + return false; + + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); +#endif + return true; +} + +/* the qdma core needs scratch memory to be setup */ +static int mtk_init_fq_dma(struct mtk_eth *eth) +{ + dma_addr_t phy_ring_tail; + int cnt = MTK_DMA_SIZE; + dma_addr_t dma_addr; + int i; + + if (!eth->soc->has_sram) { + eth->scratch_ring = dma_alloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); + } else { + eth->scratch_ring = eth->base + MTK_ETH_SRAM_OFFSET; + } + + if (unlikely(!eth->scratch_ring)) + return -ENOMEM; + + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, + GFP_KERNEL); + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + + dma_addr = dma_map_single(eth->dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + + phy_ring_tail = eth->phy_scratch_ring + + (sizeof(struct mtk_tx_dma) * (cnt - 1)); + + for (i = 0; i < cnt; i++) { + eth->scratch_ring[i].txd1 = + (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); + if (i < cnt - 1) + eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + + ((i + 1) * sizeof(struct mtk_tx_dma))); + eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); + + eth->scratch_ring[i].txd4 = 0; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (eth->soc->has_sram && ((sizeof(struct mtk_tx_dma)) > 16)) { + eth->scratch_ring[i].txd5 = 0; + eth->scratch_ring[i].txd6 = 0; + eth->scratch_ring[i].txd7 = 0; + eth->scratch_ring[i].txd8 = 0; + } +#endif + } + + mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); + mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); + mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); + + return 0; +} + +static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) +{ + void *ret = ring->dma; + + return ret + (desc - ring->phys); +} + +static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, + struct mtk_tx_dma *txd) +{ + int idx = txd - ring->dma; + + return &ring->buf[idx]; +} + +static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, + struct mtk_tx_dma *dma) +{ + return ring->dma_pdma - ring->dma + dma; +} + +static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) +{ + return ((void *)dma - (void *)ring->dma) / sizeof(*dma); +} + +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + bool napi) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); + } + } + + tx_buf->flags = 0; + if (tx_buf->skb && + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) { + if (napi) + napi_consume_skb(tx_buf->skb, napi); + else + dev_kfree_skb_any(tx_buf->skb); + } + tx_buf->skb = NULL; +} + +static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + struct mtk_tx_dma *txd, dma_addr_t mapped_addr, + size_t size, int idx) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } else { + if (idx & 1) { + txd->txd3 = mapped_addr; + txd->txd2 |= TX_DMA_PLEN1(size); + dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len1, size); + } else { + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + txd->txd1 = mapped_addr; + txd->txd2 = TX_DMA_PLEN0(size); + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } + } +} + +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + int tx_num, struct mtk_tx_ring *ring, bool gso) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_dma *itxd_pdma, *txd_pdma; + struct mtk_tx_buf *itx_buf, *tx_buf; + dma_addr_t mapped_addr; + unsigned int nr_frags; + int i, n_desc = 1; + u32 txd4 = 0, txd5 = 0, txd6 = 0; + u32 fport; + u32 qid = 0; + int k = 0; + + itxd = ring->next_free; + itxd_pdma = qdma_to_pdma(ring, itxd); + if (itxd == ring->last_free) + return -ENOMEM; + + itx_buf = mtk_desc_to_tx_buf(ring, itxd); + memset(itx_buf, 0, sizeof(*itx_buf)); + + mapped_addr = dma_map_single(eth->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + return -ENOMEM; + + WRITE_ONCE(itxd->txd1, mapped_addr); + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), + k++); + + nr_frags = skb_shinfo(skb)->nr_frags; + + qid = skb->mark & (MTK_QDMA_TX_MASK); + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if(!qid && mac->id) + qid = MTK_QDMA_GMAC2_QID; +#endif + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* set the forward port */ + fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; + txd4 |= fport; + + if (gso) + txd5 |= TX_DMA_TSO_V2; + + /* TX Checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + txd5 |= TX_DMA_CHKSUM_V2; + + /* VLAN header offload */ + if (skb_vlan_tag_present(skb)) + txd6 |= TX_DMA_INS_VLAN_V2 | skb_vlan_tag_get(skb); + + txd4 = txd4 | TX_DMA_SWC_V2; + } else { + /* set the forward port */ + fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; + txd4 |= fport; + + if (gso) + txd4 |= TX_DMA_TSO; + + /* TX Checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + txd4 |= TX_DMA_CHKSUM; + + /* VLAN header offload */ + if (skb_vlan_tag_present(skb)) + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); + } + /* TX SG offload */ + txd = itxd; + txd_pdma = qdma_to_pdma(ring, txd); + +#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) + if (HNAT_SKB_CB2(skb)->magic == 0x78681415) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + txd4 &= ~(0xf << TX_DMA_FPORT_SHIFT_V2); + txd4 |= 0x4 << TX_DMA_FPORT_SHIFT_V2; + } else { + txd4 &= ~(0x7 << TX_DMA_FPORT_SHIFT); + txd4 |= 0x4 << TX_DMA_FPORT_SHIFT; + } + } + + trace_printk("[%s] nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n", + __func__, nr_frags, HNAT_SKB_CB2(skb)->magic, txd4); +#endif + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + unsigned int offset = 0; + int frag_size = skb_frag_size(frag); + + while (frag_size) { + bool last_frag = false; + unsigned int frag_map_size; + bool new_desc = true; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || + (i & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + txd_pdma = qdma_to_pdma(ring, txd); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + } else { + new_desc = false; + } + + + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); + mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + goto err_dma; + + if (i == nr_frags - 1 && + (frag_size - frag_map_size) == 0) + last_frag = true; + + WRITE_ONCE(txd->txd1, mapped_addr); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + WRITE_ONCE(txd->txd3, (TX_DMA_PLEN0(frag_map_size) | + last_frag * TX_DMA_LS0)); + WRITE_ONCE(txd->txd4, fport | TX_DMA_SWC_V2 | + QID_BITS_V2(qid)); + } else { + WRITE_ONCE(txd->txd3, + (TX_DMA_SWC | QID_LOW_BITS(qid) | + TX_DMA_PLEN0(frag_map_size) | + last_frag * TX_DMA_LS0)); + WRITE_ONCE(txd->txd4, + fport | QID_HIGH_BITS(qid)); + } + + tx_buf = mtk_desc_to_tx_buf(ring, txd); + if (new_desc) + memset(tx_buf, 0, sizeof(*tx_buf)); + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + tx_buf->flags |= MTK_TX_FLAGS_PAGE0; + tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + + setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, + frag_map_size, k++); + + frag_size -= frag_map_size; + offset += frag_map_size; + } + } + + /* store skb to cleanup */ + itx_buf->skb = skb; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + WRITE_ONCE(itxd->txd5, txd5); + WRITE_ONCE(itxd->txd6, txd6); + WRITE_ONCE(itxd->txd7, 0); + WRITE_ONCE(itxd->txd8, 0); +#endif + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + WRITE_ONCE(itxd->txd4, txd4 | QID_BITS_V2(qid)); + WRITE_ONCE(itxd->txd3, (TX_DMA_PLEN0(skb_headlen(skb)) | + (!nr_frags * TX_DMA_LS0))); + } else { + WRITE_ONCE(itxd->txd4, txd4 | QID_HIGH_BITS(qid)); + WRITE_ONCE(itxd->txd3, + TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | + (!nr_frags * TX_DMA_LS0) | QID_LOW_BITS(qid)); + } + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (k & 0x1) + txd_pdma->txd2 |= TX_DMA_LS0; + else + txd_pdma->txd2 |= TX_DMA_LS1; + } + + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); + atomic_sub(n_desc, &ring->free_count); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + } else { + int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), + ring->dma_size); + mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); + } + + return 0; + +err_dma: + do { + tx_buf = mtk_desc_to_tx_buf(ring, itxd); + + /* unmap dma */ + mtk_tx_unmap(eth, tx_buf, false); + + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + itxd_pdma->txd2 = TX_DMA_DESP2_DEF; + + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + itxd_pdma = qdma_to_pdma(ring, itxd); + } while (itxd != txd); + + return -ENOMEM; +} + +static inline int mtk_cal_txd_req(struct sk_buff *skb) +{ + int i, nfrags; + skb_frag_t *frag; + + nfrags = 1; + if (skb_is_gso(skb)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nfrags += DIV_ROUND_UP(skb_frag_size(frag), + MTK_TX_DMA_BUF_LEN); + } + } else { + nfrags += skb_shinfo(skb)->nr_frags; + } + + return nfrags; +} + +static int mtk_queue_stopped(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + if (netif_queue_stopped(eth->netdev[i])) + return 1; + } + + return 0; +} + +static void mtk_wake_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_wake_queue(eth->netdev[i]); + } +} + +static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_ring *ring = ð->tx_ring; + struct net_device_stats *stats = &dev->stats; + bool gso = false; + int tx_num; + + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running on the same ring so we need to lock + * the ring access + */ + spin_lock(ð->page_lock); + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto drop; + + tx_num = mtk_cal_txd_req(skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { + netif_stop_queue(dev); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + spin_unlock(ð->page_lock); + return NETDEV_TX_BUSY; + } + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) { + netif_warn(eth, tx_err, dev, + "GSO expand head fail.\n"); + goto drop; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + gso = true; + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); + } + } + + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) + goto drop; + + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) + netif_stop_queue(dev); + + spin_unlock(ð->page_lock); + + return NETDEV_TX_OK; + +drop: + spin_unlock(ð->page_lock); + stats->tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) +{ + int i; + struct mtk_rx_ring *ring; + int idx; + + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + if (!IS_NORMAL_RING(i) && !IS_HW_LRO_RING(i)) + continue; + + ring = ð->rx_ring[i]; + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); + if (ring->dma[idx].rxd2 & RX_DMA_DONE) { + ring->calc_idx_update = true; + return ring; + } + } + + return NULL; +} + +static void mtk_update_rx_cpu_idx(struct mtk_eth *eth, struct mtk_rx_ring *ring) +{ + int i; + + if (!eth->hwlro) + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + else { + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + ring = ð->rx_ring[i]; + if (ring->calc_idx_update) { + ring->calc_idx_update = false; + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + } + } + } +} + +static int mtk_poll_rx(struct napi_struct *napi, int budget, + struct mtk_eth *eth) +{ + struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi); + struct mtk_rx_ring *ring = rx_napi->rx_ring; + int idx; + struct sk_buff *skb; + u8 *data, *new_data; + struct mtk_rx_dma *rxd, trxd; + int done = 0; + + if (unlikely(!ring)) + goto rx_done; + + while (done < budget) { + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; + int mac; + + if (eth->hwlro) + ring = mtk_get_rx_ring(eth); + + if (unlikely(!ring)) + goto rx_done; + + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); + rxd = &ring->dma[idx]; + data = ring->data[idx]; + + if (!mtk_rx_get_desc(&trxd, rxd)) + break; + + /* find out which mac the packet come from. values start at 1 */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mac = 0; + } else { +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mac = RX_DMA_GET_SPORT(trxd.rxd5) - 1; + else +#endif + mac = (trxd.rxd4 & RX_DMA_SPECIAL_TAG) ? + 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1; + } + + if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || + !eth->netdev[mac])) + goto release_desc; + + netdev = eth->netdev[mac]; + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto release_desc; + + /* alloc new buffer */ + new_data = napi_alloc_frag(ring->frag_size); + if (unlikely(!new_data)) { + netdev->stats.rx_dropped++; + goto release_desc; + } + dma_addr = dma_map_single(eth->dev, + new_data + NET_SKB_PAD + + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { + skb_free_frag(new_data); + netdev->stats.rx_dropped++; + goto release_desc; + } + + dma_unmap_single(eth->dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { + skb_free_frag(data); + netdev->stats.rx_dropped++; + goto skip_rx; + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + skb->dev = netdev; + skb_put(skb, pktlen); + + if ((!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && + (trxd.rxd4 & eth->rx_dma_l4_valid)) || + (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && + (trxd.rxd3 & eth->rx_dma_l4_valid))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (trxd.rxd3 & RX_DMA_VTAG_V2) + __vlan_hwaccel_put_tag(skb, + htons(RX_DMA_VPID_V2(trxd.rxd4)), + RX_DMA_VID_V2(trxd.rxd4)); + } else { + if (trxd.rxd2 & RX_DMA_VTAG) + __vlan_hwaccel_put_tag(skb, + htons(RX_DMA_VPID(trxd.rxd3)), + RX_DMA_VID(trxd.rxd3)); + } + + /* If netdev is attached to dsa switch, the special + * tag inserted in VLAN field by switch hardware can + * be offload by RX HW VLAN offload. Clears the VLAN + * information from @skb to avoid unexpected 8021d + * handler before packet enter dsa framework. + */ + if (netdev_uses_dsa(netdev)) + __vlan_hwaccel_clear_tag(skb); + } + +#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + *(u32 *)(skb->head) = trxd.rxd5; + else +#endif + *(u32 *)(skb->head) = trxd.rxd4; + + skb_hnat_alg(skb) = 0; + skb_hnat_filled(skb) = 0; + skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; + + if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) { + trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n", + __func__, skb_hnat_reason(skb)); + skb->pkt_type = PACKET_HOST; + } + + trace_printk("[%s] rxd:(entry=%x,sport=%x,reason=%x,alg=%x\n", + __func__, skb_hnat_entry(skb), skb_hnat_sport(skb), + skb_hnat_reason(skb), skb_hnat_alg(skb)); +#endif + if (mtk_hwlro_stats_ebl && + IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) { + hw_lro_stats_update(ring->ring_no, &trxd); + hw_lro_flush_stats_update(ring->ring_no, &trxd); + } + + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); + +skip_rx: + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + +release_desc: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + + ring->calc_idx = idx; + + done++; + } + +rx_done: + if (done) { + /* make sure that all changes to the dma ring are flushed before + * we continue + */ + wmb(); + mtk_update_rx_cpu_idx(eth, ring); + } + + return done; +} + +static void mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + u32 cpu, dma; + + cpu = ring->last_free_ptr; + dma = mtk_r32(eth, MTK_QTX_DRX_PTR); + + desc = mtk_qdma_phys_to_virt(ring, cpu); + + while ((cpu != dma) && budget) { + u32 next_cpu = desc->txd2; + int mac = 0; + + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) + break; + + desc = mtk_qdma_phys_to_virt(ring, desc->txd2); + + tx_buf = mtk_desc_to_tx_buf(ring, desc); + if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) + mac = 1; + + skb = tx_buf->skb; + if (!skb) + break; + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[mac] += skb->len; + done[mac]++; + budget--; + } + mtk_tx_unmap(eth, tx_buf, true); + + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = next_cpu; + } + + ring->last_free_ptr = cpu; + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); +} + +static void mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + u32 cpu, dma; + + cpu = ring->cpu_idx; + dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); + + while ((cpu != dma) && budget) { + tx_buf = &ring->buf[cpu]; + skb = tx_buf->skb; + if (!skb) + break; + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[0] += skb->len; + done[0]++; + budget--; + } + + mtk_tx_unmap(eth, tx_buf, true); + + desc = &ring->dma[cpu]; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = NEXT_DESP_IDX(cpu, ring->dma_size); + } + + ring->cpu_idx = cpu; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + unsigned int done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + int total = 0, i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_poll_tx_qdma(eth, budget, done, bytes); + else + mtk_poll_tx_pdma(eth, budget, done, bytes); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || !done[i]) + continue; + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); + total += done[i]; + } + + if (mtk_queue_stopped(eth) && + (atomic_read(&ring->free_count) > ring->thresh)) + mtk_wake_queue(eth); + + return total; +} + +static void mtk_handle_status_irq(struct mtk_eth *eth) +{ + u32 status2 = mtk_r32(eth, MTK_FE_INT_STATUS); + + if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { + mtk_stats_update(eth); + mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), + MTK_FE_INT_STATUS); + } +} + +static int mtk_napi_tx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); + u32 status, mask; + int tx_done = 0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); + tx_done = mtk_poll_tx(eth, budget); + + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, eth->tx_int_status_reg); + mask = mtk_r32(eth, eth->tx_int_mask_reg); + dev_info(eth->dev, + "done tx %d, intr 0x%08x/0x%x\n", + tx_done, status, mask); + } + + if (tx_done == budget) + return budget; + + status = mtk_r32(eth, eth->tx_int_status_reg); + if (status & MTK_TX_DONE_INT) + return budget; + + if (napi_complete(napi)) + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; +} + +static int mtk_napi_rx(struct napi_struct *napi, int budget) +{ + struct mtk_napi *rx_napi = container_of(napi, struct mtk_napi, napi); + struct mtk_eth *eth = rx_napi->eth; + struct mtk_rx_ring *ring = rx_napi->rx_ring; + u32 status, mask; + int rx_done = 0; + int remain_budget = budget; + + mtk_handle_status_irq(eth); + +poll_again: + mtk_w32(eth, MTK_RX_DONE_INT(ring->ring_no), MTK_PDMA_INT_STATUS); + rx_done = mtk_poll_rx(napi, remain_budget, eth); + + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + mask = mtk_r32(eth, MTK_PDMA_INT_MASK); + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", + rx_done, status, mask); + } + if (rx_done == remain_budget) + return budget; + + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + if (status & MTK_RX_DONE_INT(ring->ring_no)) { + remain_budget -= rx_done; + goto poll_again; + } + + if (napi_complete(napi)) + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(ring->ring_no)); + + return rx_done + budget - remain_budget; +} + +static int mtk_tx_alloc(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i, sz = sizeof(*ring->dma); + + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), + GFP_KERNEL); + if (!ring->buf) + goto no_tx_mem; + + if (!eth->soc->has_sram) + ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); + else { + ring->dma = eth->scratch_ring + MTK_DMA_SIZE; + ring->phys = eth->phy_scratch_ring + MTK_DMA_SIZE * sz; + } + + if (!ring->dma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + int next = (i + 1) % MTK_DMA_SIZE; + u32 next_ptr = ring->phys + next * sz; + + ring->dma[i].txd2 = next_ptr; + ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + ring->dma[i].txd4 = 0; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (eth->soc->has_sram && ( sz > 16)) { + ring->dma[i].txd5 = 0; + ring->dma[i].txd6 = 0; + ring->dma[i].txd7 = 0; + ring->dma[i].txd8 = 0; + } +#endif + } + + /* On MT7688 (PDMA only) this driver uses the ring->dma structs + * only as the framework. The real HW descriptors are the PDMA + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, + GFP_ATOMIC); + if (!ring->dma_pdma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; + ring->dma_pdma[i].txd4 = 0; + } + } + + ring->dma_size = MTK_DMA_SIZE; + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = &ring->dma[0]; + ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; + ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); + ring->thresh = MAX_SKB_FRAGS; + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); + mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, + MTK_QTX_CFG(0)); + } else { + mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); + mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); + mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); + } + + return 0; + +no_tx_mem: + return -ENOMEM; +} + +static void mtk_tx_clean(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i; + + if (ring->buf) { + for (i = 0; i < MTK_DMA_SIZE; i++) + mtk_tx_unmap(eth, &ring->buf[i], false); + kfree(ring->buf); + ring->buf = NULL; + } + + if (!eth->soc->has_sram && ring->dma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } + + if (ring->dma_pdma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), + ring->dma_pdma, + ring->phys_pdma); + ring->dma_pdma = NULL; + } +} + +static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) +{ + struct mtk_rx_ring *ring; + int rx_data_len, rx_dma_size; + int i; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) + return -EINVAL; + ring = ð->rx_ring_qdma; + } else { + ring = ð->rx_ring[ring_no]; + } + + if (rx_flag == MTK_RX_FLAGS_HWLRO) { + rx_data_len = MTK_MAX_LRO_RX_LENGTH; + rx_dma_size = MTK_HW_LRO_DMA_SIZE; + } else { + rx_data_len = ETH_DATA_LEN; + rx_dma_size = MTK_DMA_SIZE; + } + + ring->frag_size = mtk_max_frag_size(rx_data_len); + ring->buf_size = mtk_max_buf_size(ring->frag_size); + ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), + GFP_KERNEL); + if (!ring->data) + return -ENOMEM; + + for (i = 0; i < rx_dma_size; i++) { + ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (!ring->data[i]) + return -ENOMEM; + } + + if ((!eth->soc->has_sram) || (eth->soc->has_sram + && (rx_flag != MTK_RX_FLAGS_NORMAL))) + ring->dma = dma_alloc_coherent(eth->dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); + else { + struct mtk_tx_ring *tx_ring = ð->tx_ring; + ring->dma = (struct mtk_rx_dma *)(tx_ring->dma + + MTK_DMA_SIZE * (ring_no + 1)); + ring->phys = tx_ring->phys + MTK_DMA_SIZE * + sizeof(*tx_ring->dma) * (ring_no + 1); + } + + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < rx_dma_size; i++) { + dma_addr_t dma_addr = dma_map_single(eth->dev, + ring->data[i] + NET_SKB_PAD + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + ring->dma[i].rxd1 = (unsigned int)dma_addr; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + ring->dma[i].rxd2 = RX_DMA_LSO; + else + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + + ring->dma[i].rxd3 = 0; + ring->dma[i].rxd4 = 0; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (eth->soc->has_sram && ((sizeof(struct mtk_rx_dma)) > 16)) { + ring->dma[i].rxd5 = 0; + ring->dma[i].rxd6 = 0; + ring->dma[i].rxd7 = 0; + ring->dma[i].rxd8 = 0; + } +#endif + } + ring->dma_size = rx_dma_size; + ring->calc_idx_update = false; + ring->calc_idx = rx_dma_size - 1; + ring->crx_idx_reg = (rx_flag == MTK_RX_FLAGS_QDMA) ? + MTK_QRX_CRX_IDX_CFG(ring_no) : + MTK_PRX_CRX_IDX_CFG(ring_no); + ring->ring_no = ring_no; + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(ring_no)); + mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(ring_no)); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_QDMA_RST_IDX); + } else { + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); + } + + return 0; +} + +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_sram) +{ + int i; + + if (ring->data && ring->dma) { + for (i = 0; i < ring->dma_size; i++) { + if (!ring->data[i]) + continue; + if (!ring->dma[i].rxd1) + continue; + dma_unmap_single(eth->dev, + ring->dma[i].rxd1, + ring->buf_size, + DMA_FROM_DEVICE); + skb_free_frag(ring->data[i]); + } + kfree(ring->data); + ring->data = NULL; + } + + if(in_sram) + return; + + if (ring->dma) { + dma_free_coherent(eth->dev, + ring->dma_size * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } +} + +static int mtk_hwlro_rx_init(struct mtk_eth *eth) +{ + int i; + u32 val; + u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; + u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; + + /* set LRO rings to auto-learn modes */ + ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; + + /* validate LRO ring */ + ring_ctrl_dw2 |= MTK_RING_VLD; + + /* set AGE timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; + ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; + + /* set max AGG timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; + + /* set max LRO AGG count */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; + ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; + + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) { + mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); + mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); + mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); + } + + /* IPv4 checksum update enable */ + lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; + + /* switch priority comparison to packet count mode */ + lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; + + /* bandwidth threshold setting */ + mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); + + /* auto-learn score delta setting */ + mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_LRO_ALT_SCORE_DELTA); + + /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ + mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, + MTK_PDMA_LRO_ALT_REFRESH_TIMER); + + /* the minimal remaining room of SDL0 in RXD for lro aggregation */ + lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + val = mtk_r32(eth, MTK_PDMA_RX_CFG); + mtk_w32(eth, val | (MTK_PDMA_LRO_SDL << MTK_RX_CFG_SDL_OFFSET), + MTK_PDMA_RX_CFG); + + lro_ctrl_dw0 |= MTK_PDMA_LRO_SDL << MTK_CTRL_DW0_SDL_OFFSET; + } else { + /* set HW LRO mode & the max aggregation count for rx packets */ + lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); + } + + /* enable HW LRO */ + lro_ctrl_dw0 |= MTK_LRO_EN; + + /* enable cpu reason black list */ + lro_ctrl_dw0 |= MTK_LRO_CRSN_BNW; + + mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); + mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); + + /* no use PPE cpu reason */ + mtk_w32(eth, 0xffffffff, MTK_PDMA_LRO_CTRL_DW1); + + return 0; +} + +static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) +{ + int i; + u32 val; + + /* relinquish lro rings, flush aggregated packets */ + mtk_w32(eth, MTK_LRO_RING_RELINGUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); + + /* wait for relinquishments done */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); + if (val & MTK_LRO_RING_RELINGUISH_DONE) { + mdelay(20); + continue; + } + break; + } + + /* invalidate lro rings */ + for (i = 1; i <= MTK_HW_LRO_RING_NUM; i++) + mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); + + /* disable HW LRO */ + mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); +} + +static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) +{ + u32 reg_val; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + idx += 1; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); + + /* validate the IP setting */ + mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); +} + +static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) +{ + u32 reg_val; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + idx += 1; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); +} + +static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) +{ + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (mac->hwlro_ip[i]) + cnt++; + } + + return cnt; +} + +static int mtk_hwlro_add_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if ((fsp->flow_type != TCP_V4_FLOW) || + (!fsp->h_u.tcp_ip4_spec.ip4dst) || + (fsp->location > 1)) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); + + return 0; +} + +static int mtk_hwlro_del_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if (fsp->location > 1) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + + return 0; +} + +static void mtk_hwlro_netdev_disable(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int i, hwlro_idx; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + mac->hwlro_ip[i] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + } + + mac->hwlro_ip_cnt = 0; +} + +static int mtk_hwlro_get_fdir_entry(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + + /* only tcp dst ipv4 is meaningful, others are meaningless */ + fsp->flow_type = TCP_V4_FLOW; + fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); + fsp->m_u.tcp_ip4_spec.ip4dst = 0; + + fsp->h_u.tcp_ip4_spec.ip4src = 0; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->h_u.tcp_ip4_spec.psrc = 0; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->h_u.tcp_ip4_spec.pdst = 0; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->h_u.tcp_ip4_spec.tos = 0; + fsp->m_u.tcp_ip4_spec.tos = 0xff; + + return 0; +} + +static int mtk_hwlro_get_fdir_all(struct net_device *dev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct mtk_mac *mac = netdev_priv(dev); + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (mac->hwlro_ip[i]) { + rule_locs[cnt] = i; + cnt++; + } + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int mtk_rss_init(struct mtk_eth *eth) +{ + u32 val; + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* Set RSS rings to PSE modes */ + val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(1)); + val |= MTK_RING_PSE_MODE; + mtk_w32(eth, val, MTK_LRO_CTRL_DW2_CFG(1)); + + /* Enable non-lro multiple rx */ + val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); + val |= MTK_NON_LRO_MULTI_EN; + mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0); + + /* Enable RSS dly int supoort */ + val |= MTK_LRO_DLY_INT_EN; + mtk_w32(eth, val, MTK_PDMA_LRO_CTRL_DW0); + + /* Set RSS delay config int ring1 */ + mtk_w32(eth, MTK_MAX_DELAY_INT, MTK_LRO_RX1_DLY_INT); + } + + /* Hash Type */ + val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG); + val |= MTK_RSS_IPV4_STATIC_HASH; + val |= MTK_RSS_IPV6_STATIC_HASH; + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Select the size of indirection table */ + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW0); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW1); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW2); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW3); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW4); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW5); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW6); + mtk_w32(eth, MTK_RSS_INDR_TABLE_SIZE4, MTK_RSS_INDR_TABLE_DW7); + + /* Pause */ + val |= MTK_RSS_CFG_REQ; + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Enable RSS*/ + val |= MTK_RSS_EN; + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Release pause */ + val &= ~(MTK_RSS_CFG_REQ); + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Set perRSS GRP INT */ + mtk_w32(eth, MTK_RX_DONE_INT(MTK_RSS_RING1), MTK_PDMA_INT_GRP3); + + /* Set GRP INT */ + mtk_w32(eth, 0x21021030, MTK_FE_INT_GRP); + + return 0; +} + +static void mtk_rss_uninit(struct mtk_eth *eth) +{ + u32 val; + + /* Pause */ + val = mtk_r32(eth, MTK_PDMA_RSS_GLO_CFG); + val |= MTK_RSS_CFG_REQ; + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Disable RSS*/ + val &= ~(MTK_RSS_EN); + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); + + /* Release pause */ + val &= ~(MTK_RSS_CFG_REQ); + mtk_w32(eth, val, MTK_PDMA_RSS_GLO_CFG); +} + +static netdev_features_t mtk_fix_features(struct net_device *dev, + netdev_features_t features) +{ + if (!(features & NETIF_F_LRO)) { + struct mtk_mac *mac = netdev_priv(dev); + int ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + if (ip_cnt) { + netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); + + features |= NETIF_F_LRO; + } + } + + if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) { + netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n"); + + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + } + + return features; +} + +static int mtk_set_features(struct net_device *dev, netdev_features_t features) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int err = 0; + + if (!((dev->features ^ features) & MTK_SET_FEATURES)) + return 0; + + if (!(features & NETIF_F_LRO)) + mtk_hwlro_netdev_disable(dev); + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); + else + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + + return err; +} + +/* wait for DMA to finish whatever it is doing before we start using it again */ +static int mtk_dma_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } else { + if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } + + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) + break; + } + + dev_err(eth->dev, "DMA init timeout\n"); + return -1; +} + +static int mtk_dma_init(struct mtk_eth *eth) +{ + int err; + u32 i; + + if (mtk_dma_busy_wait(eth)) + return -EBUSY; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + } + + err = mtk_tx_alloc(eth); + if (err) + return err; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + } + + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); + if (err) + return err; + + if (eth->hwlro) { + i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1; + for (; i < MTK_MAX_RX_RING_NUM; i++) { + err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); + if (err) + return err; + } + err = mtk_hwlro_rx_init(eth); + if (err) + return err; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) { + err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_NORMAL); + if (err) + return err; + } + err = mtk_rss_init(eth); + if (err) + return err; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* Enable random early drop and set drop threshold + * automatically + */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | + FC_THRES_MIN, MTK_QDMA_FC_THRES); + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + } + + return 0; +} + +static void mtk_dma_free(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + if ( !eth->soc->has_sram && eth->scratch_ring) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), + eth->scratch_ring, + eth->phy_scratch_ring); + eth->scratch_ring = NULL; + eth->phy_scratch_ring = 0; + } + mtk_tx_clean(eth); + mtk_rx_clean(eth, ð->rx_ring[0],1); + mtk_rx_clean(eth, ð->rx_ring_qdma,0); + + if (eth->hwlro) { + mtk_hwlro_rx_uninit(eth); + + i = (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ? 4 : 1; + for (; i < MTK_MAX_RX_RING_NUM; i++) + mtk_rx_clean(eth, ð->rx_ring[i], 0); + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + mtk_rss_uninit(eth); + + for (i = 1; i < MTK_RX_NAPI_NUM; i++) + mtk_rx_clean(eth, ð->rx_ring[i], 1); + } + + if (eth->scratch_head) { + kfree(eth->scratch_head); + eth->scratch_head = NULL; + } +} + +static void mtk_tx_timeout(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + eth->netdev[mac->id]->stats.tx_errors++; + netif_err(eth, tx_err, dev, + "transmit timed out\n"); + + if (atomic_read(&reset_lock) == 0) + schedule_work(ð->pending_work); +} + +static irqreturn_t mtk_handle_irq_rx(int irq, void *priv) +{ + struct mtk_napi *rx_napi = priv; + struct mtk_eth *eth = rx_napi->eth; + struct mtk_rx_ring *ring = rx_napi->rx_ring; + + if (likely(napi_schedule_prep(&rx_napi->napi))) { + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(ring->ring_no)); + __napi_schedule(&rx_napi->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + if (likely(napi_schedule_prep(ð->tx_napi))) { + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT(0)) { + if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT(0)) + mtk_handle_irq_rx(irq, ð->rx_napi[0]); + } + if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) + mtk_handle_irq_tx(irq, _eth); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mtk_poll_controller(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0)); + mtk_handle_irq_rx(eth->irq[2], ð->rx_napi[0]); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0)); +} +#endif + +static int mtk_start_dma(struct mtk_eth *eth) +{ + u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; + int val, err; + + err = mtk_dma_init(eth); + if (err) { + mtk_dma_free(eth); + return err; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + val = mtk_r32(eth, MTK_QDMA_GLO_CFG); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + val &= ~MTK_RESV_BUF_MASK; + mtk_w32(eth, + val | MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE | + MTK_NDP_CO_PRO | MTK_MUTLI_CNT | + MTK_RESV_BUF | MTK_WCOMP_EN | + MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN | + MTK_RX_2B_OFFSET, MTK_QDMA_GLO_CFG); + } + else + mtk_w32(eth, + val | MTK_TX_DMA_EN | + MTK_DMA_SIZE_32DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); + + val = mtk_r32(eth, MTK_PDMA_GLO_CFG); + mtk_w32(eth, + val | MTK_RX_DMA_EN | rx_2b_offset | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + } else { + mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, + MTK_PDMA_GLO_CFG); + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && eth->hwlro) { + val = mtk_r32(eth, MTK_PDMA_GLO_CFG); + mtk_w32(eth, val | MTK_RX_DMA_LRO_EN, MTK_PDMA_GLO_CFG); + } + + return 0; +} + +void mtk_gdm_config(struct mtk_eth *eth, u32 config) +{ + int i; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + + /* default setup the forward port to send frame to PDMA */ + val &= ~0xffff; + + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + + val |= config; + + if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i])) + val |= MTK_GDMA_SPECIAL_TAG; + + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } +} + +static int mtk_open(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int err, i; + struct device_node *phy_node; + + err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); + if (err) { + netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, + err); + return err; + } + + /* we run 2 netdevs on the same dma ring so we only bring it up once */ + if (!refcount_read(ð->dma_refcnt)) { + int err = mtk_start_dma(eth); + + if (err) + return err; + + mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); + + /* Indicates CDM to parse the MTK special tag from CPU */ + if (netdev_uses_dsa(dev)) { + u32 val; + val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); + mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); + val = mtk_r32(eth, MTK_CDMP_IG_CTRL); + mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); + } + + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi[0].napi); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(0)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) { + napi_enable(ð->rx_napi[i].napi); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT(i)); + } + } + + refcount_set(ð->dma_refcnt, 1); + } + else + refcount_inc(ð->dma_refcnt); + + phylink_start(mac->phylink); + netif_start_queue(dev); + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0); + if (!phy_node) { + regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0); + } +#endif + + return 0; +} + +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) +{ + u32 val; + int i; + + /* stop the dma engine */ + spin_lock_bh(ð->page_lock); + val = mtk_r32(eth, glo_cfg); + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), + glo_cfg); + spin_unlock_bh(ð->page_lock); + + /* wait for dma stop */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, glo_cfg); + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { + mdelay(20); + continue; + } + break; + } +} + +static int mtk_stop(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int i; + u32 val = 0; + struct device_node *phy_node; + + netif_tx_disable(dev); + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0); + if (phy_node) { + val = _mtk_mdio_read(eth, 0, 0); + val |= BMCR_PDOWN; + _mtk_mdio_write(eth, 0, 0, val); + }else { + regmap_read(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val |= SGMII_PHYA_PWD; + regmap_write(eth->sgmii->regmap[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, val); + } +#endif + + //GMAC RX disable + val = mtk_r32(eth, MTK_MAC_MCR(mac->id)); + mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(mac->id)); + + phylink_stop(mac->phylink); + + phylink_disconnect_phy(mac->phylink); + + /* only shutdown DMA if this is the last user */ + if (!refcount_dec_and_test(ð->dma_refcnt)) + return 0; + + mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(0)); + napi_disable(ð->tx_napi); + napi_disable(ð->rx_napi[0].napi); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) { + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT(i)); + napi_disable(ð->rx_napi[i].napi); + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); + + mtk_dma_free(eth); + + return 0; +} + +void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) +{ + u32 val = 0, i = 0; + + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, reset_bits); + + while (i++ < 5000) { + mdelay(1); + regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); + + if ((val & reset_bits) == reset_bits) { + mtk_reset_event_update(eth, MTK_EVENT_COLD_CNT); + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, ~reset_bits); + break; + } + } + + mdelay(10); +} + +static void mtk_clk_disable(struct mtk_eth *eth) +{ + int clk; + + for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) + clk_disable_unprepare(eth->clks[clk]); +} + +static int mtk_clk_enable(struct mtk_eth *eth) +{ + int clk, ret; + + for (clk = 0; clk < MTK_CLK_MAX ; clk++) { + ret = clk_prepare_enable(eth->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(eth->clks[clk]); + + return ret; +} + +static int mtk_napi_init(struct mtk_eth *eth) +{ + struct mtk_napi *rx_napi = ð->rx_napi[0]; + int i; + + rx_napi->eth = eth; + rx_napi->rx_ring = ð->rx_ring[0]; + rx_napi->irq_grp_no = 2; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) { + rx_napi = ð->rx_napi[i]; + rx_napi->eth = eth; + rx_napi->rx_ring = ð->rx_ring[i]; + rx_napi->irq_grp_no = 2 + i; + } + } + + return 0; +} + +static int mtk_hw_init(struct mtk_eth *eth, u32 type) +{ + int i, ret = 0; + + pr_info("[%s] reset_lock:%d, force:%d\n", __func__, + atomic_read(&reset_lock), atomic_read(&force)); + + if (atomic_read(&reset_lock) == 0) { + if (test_and_set_bit(MTK_HW_INIT, ð->state)) + return 0; + + pm_runtime_enable(eth->dev); + pm_runtime_get_sync(eth->dev); + + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { + dev_err(eth->dev, "MAC reset failed!\n"); + goto err_disable_pm; + } + + /* enable interrupt delay for RX */ + mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); + + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + return 0; + } + + pr_info("[%s] execute fe %s reset\n", __func__, + (type == MTK_TYPE_WARM_RESET) ? "warm" : "cold"); + + if (type == MTK_TYPE_WARM_RESET) + mtk_eth_warm_reset(eth); + else + mtk_eth_cold_reset(eth); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* Set FE to PDMAv2 if necessary */ + mtk_w32(eth, mtk_r32(eth, MTK_FE_GLO_MISC) | MTK_PDMA_V2, MTK_FE_GLO_MISC); + } + + if (eth->pctl) { + /* Set GE2 driving and slew rate */ + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); + + /* set GE2 TDSEL */ + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); + + /* set GE2 TUNE */ + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + } + + /* Set linkdown as the default for each GMAC. Its own MCR would be set + * up with the more appropriate value when mtk_mac_config call is being + * invoked. + */ + for (i = 0; i < MTK_MAC_COUNT; i++) + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); + + /* Enable RX VLan Offloading */ + if (eth->soc->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + else + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); + + /* enable interrupt delay for RX/TX */ + mtk_w32(eth, 0x8f0f8f0f, MTK_PDMA_DELAY_INT); + mtk_w32(eth, 0x8f0f8f0f, MTK_QDMA_DELAY_INT); + + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + /* FE int grouping */ + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_PDMA_INT_GRP2); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT(0), MTK_QDMA_INT_GRP2); + mtk_w32(eth, 0x21021003, MTK_FE_INT_GRP); + mtk_w32(eth, MTK_FE_INT_FQ_EMPTY | MTK_FE_INT_TSO_FAIL | + MTK_FE_INT_TSO_ILLEGAL | MTK_FE_INT_TSO_ALIGN | + MTK_FE_INT_RFIFO_OV | MTK_FE_INT_RFIFO_UF, MTK_FE_INT_ENABLE); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* PSE Free Queue Flow Control */ + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); + + /* PSE should not drop port8 and port9 packets */ + mtk_w32(eth, 0x00000300, PSE_DROP_CFG); + + /* PSE config input queue threshold */ + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); + mtk_w32(eth, 0x002a000e, PSE_IQ_REV(8)); + + /* PSE config output queue threshold */ + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); + + /* GDM and CDM Threshold */ + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); + } + + return 0; + +err_disable_pm: + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return ret; +} + +static int mtk_hw_deinit(struct mtk_eth *eth) +{ + if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) + return 0; + + mtk_clk_disable(eth); + + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return 0; +} + +static int __init mtk_init(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + of_get_mac_address(mac->of_node, dev->dev_addr); + + /* If the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + eth_hw_addr_random(dev); + dev_err(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); + } + + return 0; +} + +static void mtk_uninit(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + phylink_disconnect_phy(mac->phylink); + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); +} + +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phylink_mii_ioctl(mac->phylink, ifr, cmd); + default: + /* default invoke the mtk_eth_dbg handler */ + return mtk_do_priv_ioctl(dev, ifr, cmd); + break; + } + + return -EOPNOTSUPP; +} + +static void mtk_pending_work(struct work_struct *work) +{ + struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); + struct device_node *phy_node = NULL; + struct mtk_mac *mac = NULL; + int err, i = 0; + unsigned long restart = 0; + u32 val = 0; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + atomic_inc(&reset_lock); + val = mtk_r32(eth, MTK_FE_INT_STATUS); + if (!mtk_check_reset_event(eth, val)) { + atomic_dec(&reset_lock); + pr_info("[%s] No need to do FE reset !\n", __func__); + return; + } + + rtnl_lock(); + + /* Disabe FE P3 and P4 */ + val = mtk_r32(eth, MTK_FE_GLO_CFG); + val |= MTK_FE_LINK_DOWN_P3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= MTK_FE_LINK_DOWN_P4; + mtk_w32(eth, val, MTK_FE_GLO_CFG); + + /* Adjust PPE configurations to prepare for reset */ + mtk_prepare_reset_ppe(eth, 0); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + mtk_prepare_reset_ppe(eth, 1); + + /* Adjust FE configurations to prepare for reset */ + mtk_prepare_reset_fe(eth); + + /* Trigger Wifi SER reset */ + call_netdevice_notifiers(MTK_FE_START_RESET, eth->netdev[0]); + rtnl_unlock(); + wait_for_completion_timeout(&wait_ser_done, 5000); +#endif + rtnl_lock(); + + while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) + cpu_relax(); + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + del_timer_sync(ð->mtk_dma_monitor_timer); +#endif + pr_info("[%s] mtk_stop starts !\n", __func__); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + __set_bit(i, &restart); + } + pr_info("[%s] mtk_stop ends !\n", __func__); + mdelay(15); + + if (eth->dev->pins) + pinctrl_select_state(eth->dev->pins->p, + eth->dev->pins->default_state); + + pr_info("[%s] mtk_hw_init starts !\n", __func__); + mtk_hw_init(eth, MTK_TYPE_WARM_RESET); + pr_info("[%s] mtk_hw_init ends !\n", __func__); + + /* restart DMA and enable IRQs */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!test_bit(i, &restart)) + continue; + err = mtk_open(eth->netdev[i]); + if (err) { + netif_alert(eth, ifup, eth->netdev[i], + "Driver up/down cycle failed, closing device.\n"); + dev_close(eth->netdev[i]); + } + } + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + /* Set KA tick select */ + mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(0)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, 0, MTK_PPE_TB_CFG(1)); + + /* Enabe FE P3 and P4*/ + val = mtk_r32(eth, MTK_FE_GLO_CFG); + val &= ~MTK_FE_LINK_DOWN_P3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val &= ~MTK_FE_LINK_DOWN_P4; + mtk_w32(eth, val, MTK_FE_GLO_CFG); + + /* Power up sgmii */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + mac = netdev_priv(eth->netdev[i]); + phy_node = of_parse_phandle(mac->of_node, "phy-handle", 0); + if (!phy_node) { + mtk_gmac_sgmii_path_setup(eth, i); + regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, 0); + } + } + + call_netdevice_notifiers(MTK_FE_RESET_NAT_DONE, eth->netdev[0]); + pr_info("[%s] HNAT reset done !\n", __func__); + + call_netdevice_notifiers(MTK_FE_RESET_DONE, eth->netdev[0]); + pr_info("[%s] WiFi SER reset done !\n", __func__); + + atomic_dec(&reset_lock); + if (atomic_read(&force) > 0) + atomic_dec(&force); + + timer_setup(ð->mtk_dma_monitor_timer, mtk_dma_monitor, 0); + eth->mtk_dma_monitor_timer.expires = jiffies; + add_timer(ð->mtk_dma_monitor_timer); +#endif + clear_bit_unlock(MTK_RESETTING, ð->state); + + rtnl_unlock(); +} + +static int mtk_free_dev(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + free_netdev(eth->netdev[i]); + } + + return 0; +} + +static int mtk_unreg_dev(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + unregister_netdev(eth->netdev[i]); + } + + return 0; +} + +static int mtk_cleanup(struct mtk_eth *eth) +{ + mtk_unreg_dev(eth); + mtk_free_dev(eth); + cancel_work_sync(ð->pending_work); + + return 0; +} + +static int mtk_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct mtk_mac *mac = netdev_priv(ndev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + return phylink_ethtool_ksettings_get(mac->phylink, cmd); +} + +static int mtk_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct mtk_mac *mac = netdev_priv(ndev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + return phylink_ethtool_ksettings_set(mac->phylink, cmd); +} + +static void mtk_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + + strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); +} + +static u32 mtk_get_msglevel(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return mac->hw->msg_enable; +} + +static void mtk_set_msglevel(struct net_device *dev, u32 value) +{ + struct mtk_mac *mac = netdev_priv(dev); + + mac->hw->msg_enable = value; +} + +static int mtk_nway_reset(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + if (!mac->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(mac->phylink); +} + +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int mtk_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(mtk_ethtool_stats); + default: + return -EOPNOTSUPP; + } +} + +static void mtk_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hwstats = mac->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + int i; + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock_bh(&hwstats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock_bh(&hwstats->stats_lock); + } + } + + data_src = (u64 *)hwstats; + + do { + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); +} + +static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (dev->hw_features & NETIF_F_LRO) { + cmd->data = MTK_MAX_RX_RING_NUM; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRLCNT: + if (dev->hw_features & NETIF_F_LRO) { + struct mtk_mac *mac = netdev_priv(dev); + + cmd->rule_cnt = mac->hwlro_ip_cnt; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRULE: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_entry(dev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_all(dev, cmd, + rule_locs); + break; + default: + break; + } + + return ret; +} + +static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_add_ipaddr(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_del_ipaddr(dev, cmd); + break; + default: + break; + } + + return ret; +} + +static const struct ethtool_ops mtk_ethtool_ops = { + .get_link_ksettings = mtk_get_link_ksettings, + .set_link_ksettings = mtk_set_link_ksettings, + .get_drvinfo = mtk_get_drvinfo, + .get_msglevel = mtk_get_msglevel, + .set_msglevel = mtk_set_msglevel, + .nway_reset = mtk_nway_reset, + .get_link = ethtool_op_get_link, + .get_strings = mtk_get_strings, + .get_sset_count = mtk_get_sset_count, + .get_ethtool_stats = mtk_get_ethtool_stats, + .get_rxnfc = mtk_get_rxnfc, + .set_rxnfc = mtk_set_rxnfc, +}; + +static const struct net_device_ops mtk_netdev_ops = { + .ndo_init = mtk_init, + .ndo_uninit = mtk_uninit, + .ndo_open = mtk_open, + .ndo_stop = mtk_stop, + .ndo_start_xmit = mtk_start_xmit, + .ndo_set_mac_address = mtk_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mtk_do_ioctl, + .ndo_tx_timeout = mtk_tx_timeout, + .ndo_get_stats64 = mtk_get_stats64, + .ndo_fix_features = mtk_fix_features, + .ndo_set_features = mtk_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mtk_poll_controller, +#endif +}; + +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) +{ + const __be32 *_id = of_get_property(np, "reg", NULL); + struct phylink *phylink; + int phy_mode, id, err; + struct mtk_mac *mac; + + if (!_id) { + dev_err(eth->dev, "missing mac id\n"); + return -EINVAL; + } + + id = be32_to_cpup(_id); + if (id < 0 || id >= MTK_MAC_COUNT) { + dev_err(eth->dev, "%d is not a valid mac id\n", id); + return -EINVAL; + } + + if (eth->netdev[id]) { + dev_err(eth->dev, "duplicate mac id found: %d\n", id); + return -EINVAL; + } + + eth->netdev[id] = alloc_etherdev(sizeof(*mac)); + if (!eth->netdev[id]) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + mac = netdev_priv(eth->netdev[id]); + eth->mac[id] = mac; + mac->id = id; + mac->hw = eth; + mac->of_node = np; + + memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); + mac->hwlro_ip_cnt = 0; + + mac->hw_stats = devm_kzalloc(eth->dev, + sizeof(*mac->hw_stats), + GFP_KERNEL); + if (!mac->hw_stats) { + dev_err(eth->dev, "failed to allocate counter memory\n"); + err = -ENOMEM; + goto free_netdev; + } + spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + /* phylink create */ + phy_mode = of_get_phy_mode(np); + if (phy_mode < 0) { + dev_err(eth->dev, "incorrect phy-mode\n"); + err = -EINVAL; + goto free_netdev; + } + + /* mac config is not set */ + mac->interface = PHY_INTERFACE_MODE_NA; + mac->mode = MLO_AN_PHY; + mac->speed = SPEED_UNKNOWN; + + mac->phylink_config.dev = ð->netdev[id]->dev; + mac->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&mac->phylink_config, + of_fwnode_handle(mac->of_node), + phy_mode, &mtk_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto free_netdev; + } + + mac->phylink = phylink; + + SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->watchdog_timeo = 5 * HZ; + eth->netdev[id]->netdev_ops = &mtk_netdev_ops; + eth->netdev[id]->base_addr = (unsigned long)eth->base; + + eth->netdev[id]->hw_features = eth->soc->hw_features; + if (eth->hwlro) + eth->netdev[id]->hw_features |= NETIF_F_LRO; + + eth->netdev[id]->vlan_features = eth->soc->hw_features & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + eth->netdev[id]->features |= eth->soc->hw_features; + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; + + eth->netdev[id]->irq = eth->irq[0]; + eth->netdev[id]->dev.of_node = np; + + return 0; + +free_netdev: + free_netdev(eth->netdev[id]); + return err; +} + +static int mtk_probe(struct platform_device *pdev) +{ + struct device_node *mac_np; + struct mtk_eth *eth; + int err, i; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->soc = of_device_get_match_data(&pdev->dev); + + eth->dev = &pdev->dev; + eth->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eth->base)) + return PTR_ERR(eth->base); + + if(eth->soc->has_sram) { + struct resource *res; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!res)) + return -EINVAL; + eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; + eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; + } else { + eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; + eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; + eth->ip_align = NET_IP_ALIGN; + } else { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + eth->rx_dma_l4_valid = RX_DMA_L4_VALID_V2; + else + eth->rx_dma_l4_valid = RX_DMA_L4_VALID; + } + + spin_lock_init(ð->page_lock); + spin_lock_init(ð->tx_irq_lock); + spin_lock_init(ð->rx_irq_lock); + spin_lock_init(ð->syscfg0_lock); + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { + eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,infracfg"); + if (IS_ERR(eth->infra)) { + dev_err(&pdev->dev, "no infracfg regmap found\n"); + return PTR_ERR(eth->infra); + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), + GFP_KERNEL); + if (!eth->sgmii) + return -ENOMEM; + + err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node, + eth->soc->ana_rgc3); + + if (err) + return err; + } + + if (eth->soc->required_pctl) { + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,pctl"); + if (IS_ERR(eth->pctl)) { + dev_err(&pdev->dev, "no pctl regmap found\n"); + return PTR_ERR(eth->pctl); + } + } + + for (i = 0; i < MTK_MAX_IRQ_NUM; i++) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) + eth->irq[i] = eth->irq[0]; + else + eth->irq[i] = platform_get_irq(pdev, i); + if (eth->irq[i] < 0) { + dev_err(&pdev->dev, "no IRQ%d resource found\n", i); + return -ENXIO; + } + } + + for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { + eth->clks[i] = devm_clk_get(eth->dev, + mtk_clks_source_name[i]); + if (IS_ERR(eth->clks[i])) { + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (eth->soc->required_clks & BIT(i)) { + dev_err(&pdev->dev, "clock %s not found\n", + mtk_clks_source_name[i]); + return -EINVAL; + } + eth->clks[i] = NULL; + } + } + + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + INIT_WORK(ð->pending_work, mtk_pending_work); + + err = mtk_hw_init(eth, MTK_TYPE_COLD_RESET); + if (err) + return err; + + eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); + + for_each_child_of_node(pdev->dev.of_node, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + if (!of_device_is_available(mac_np)) + continue; + + err = mtk_add_mac(eth, mac_np); + if (err) { + of_node_put(mac_np); + goto err_deinit_hw; + } + } + + err = mtk_napi_init(eth); + if (err) + goto err_free_dev; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { + err = devm_request_irq(eth->dev, eth->irq[0], + mtk_handle_irq, 0, + dev_name(eth->dev), eth); + } else { + err = devm_request_irq(eth->dev, eth->irq[1], + mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; + + err = devm_request_irq(eth->dev, eth->irq[2], + mtk_handle_irq_rx, 0, + dev_name(eth->dev), ð->rx_napi[0]); + if (err) + goto err_free_dev; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) { + err = devm_request_irq(eth->dev, + eth->irq[2 + i], + mtk_handle_irq_rx, 0, + dev_name(eth->dev), + ð->rx_napi[i]); + if (err) + goto err_free_dev; + } + } else { + err = devm_request_irq(eth->dev, eth->irq[3], + mtk_handle_fe_irq, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; + } + } + + if (err) + goto err_free_dev; + + /* No MT7628/88 support yet */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + err = mtk_mdio_init(eth); + if (err) + goto err_free_dev; + } + + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + err = register_netdev(eth->netdev[i]); + if (err) { + dev_err(eth->dev, "error bringing up device\n"); + goto err_deinit_mdio; + } else + netif_info(eth, probe, eth->netdev[i], + "mediatek frame engine at 0x%08lx, irq %d\n", + eth->netdev[i]->base_addr, eth->irq[0]); + } + + /* we run 2 devices on the same DMA ring so we need a dummy device + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, + MTK_NAPI_WEIGHT); + netif_napi_add(ð->dummy_dev, ð->rx_napi[0].napi, mtk_napi_rx, + MTK_NAPI_WEIGHT); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) + netif_napi_add(ð->dummy_dev, ð->rx_napi[i].napi, + mtk_napi_rx, MTK_NAPI_WEIGHT); + } + + mtketh_debugfs_init(eth); + debug_proc_init(eth); + + platform_set_drvdata(pdev, eth); + + register_netdevice_notifier(&mtk_eth_netdevice_nb); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + timer_setup(ð->mtk_dma_monitor_timer, mtk_dma_monitor, 0); + eth->mtk_dma_monitor_timer.expires = jiffies; + add_timer(ð->mtk_dma_monitor_timer); +#endif + + return 0; + +err_deinit_mdio: + mtk_mdio_cleanup(eth); +err_free_dev: + mtk_free_dev(eth); +err_deinit_hw: + mtk_hw_deinit(eth); + + return err; +} + +static int mtk_remove(struct platform_device *pdev) +{ + struct mtk_eth *eth = platform_get_drvdata(pdev); + struct mtk_mac *mac; + int i; + + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + mac = netdev_priv(eth->netdev[i]); + phylink_disconnect_phy(mac->phylink); + } + + mtk_hw_deinit(eth); + + netif_napi_del(ð->tx_napi); + netif_napi_del(ð->rx_napi[0].napi); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSS)) { + for (i = 1; i < MTK_RX_NAPI_NUM; i++) + netif_napi_del(ð->rx_napi[i].napi); + } + + mtk_cleanup(eth); + mtk_mdio_cleanup(eth); + unregister_netdevice_notifier(&mtk_eth_netdevice_nb); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + del_timer_sync(ð->mtk_dma_monitor_timer); +#endif + + return 0; +} + +static const struct mtk_soc_data mt2701_data = { + .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, + .has_sram = false, +}; + +static const struct mtk_soc_data mt7621_data = { + .caps = MT7621_CAPS, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7621_CLKS_BITMAP, + .required_pctl = false, + .has_sram = false, +}; + +static const struct mtk_soc_data mt7622_data = { + .ana_rgc3 = 0x2028, + .caps = MT7622_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7622_CLKS_BITMAP, + .required_pctl = false, + .has_sram = false, +}; + +static const struct mtk_soc_data mt7623_data = { + .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, + .has_sram = false, +}; + +static const struct mtk_soc_data mt7629_data = { + .ana_rgc3 = 0x128, + .caps = MT7629_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7629_CLKS_BITMAP, + .required_pctl = false, + .has_sram = false, +}; + +static const struct mtk_soc_data rt5350_data = { + .caps = MT7628_CAPS, + .hw_features = MTK_HW_FEATURES_MT7628, + .required_clks = MT7628_CLKS_BITMAP, + .required_pctl = false, + .has_sram = false, +}; + +const struct of_device_id of_mtk_match[] = { + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, + { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, of_mtk_match); + +static struct platform_driver mtk_driver = { + .probe = mtk_probe, + .remove = mtk_remove, + .driver = { + .name = "mtk_soc_eth", + .of_match_table = of_mtk_match, + }, +}; + +module_platform_driver(mtk_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.h new file mode 100644 index 000000000..367f7f1e9 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_eth_soc.h @@ -0,0 +1,1318 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#ifndef MTK_ETH_H +#define MTK_ETH_H + +#include +#include +#include +#include +#include +#include + +#define MTK_QDMA_PAGE_SIZE 2048 +#define MTK_MAX_RX_LENGTH 1536 +#define MTK_DMA_SIZE 2048 +#define MTK_NAPI_WEIGHT 256 +#define MTK_MAC_COUNT 2 +#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) +#define MTK_DMA_DUMMY_DESC 0xffffffff +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_IPV6_CSUM) +#define MTK_SET_FEATURES (NETIF_F_LRO | \ + NETIF_F_HW_VLAN_CTAG_RX) +#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) +#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) + +#define MTK_HW_LRO_DMA_SIZE 8 + +#define MTK_MAX_LRO_RX_LENGTH (4096 * 3) +#define MTK_MAX_LRO_IP_CNT 2 +#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */ +#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */ +#define MTK_HW_LRO_AGG_TIME 10 /* 200us */ +#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */ +#define MTK_HW_LRO_MAX_AGG_CNT 64 +#define MTK_HW_LRO_BW_THRE 3000 +#define MTK_HW_LRO_REPLACE_DELTA 1000 +#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 + +/* Frame Engine Global Configuration */ +#define MTK_FE_GLO_CFG 0x00 +#define MTK_FE_LINK_DOWN_P3 BIT(11) +#define MTK_FE_LINK_DOWN_P4 BIT(12) + +/* Frame Engine Global Reset Register */ +#define MTK_RST_GL 0x04 +#define RST_GL_PSE BIT(0) + +/* Frame Engine Interrupt Status Register */ +#define MTK_FE_INT_STATUS 0x08 +#define MTK_FE_INT_STATUS2 0x28 +#define MTK_FE_INT_ENABLE 0x0C +#define MTK_FE_INT_FQ_EMPTY BIT(8) +#define MTK_FE_INT_TSO_FAIL BIT(12) +#define MTK_FE_INT_TSO_ILLEGAL BIT(13) +#define MTK_FE_INT_TSO_ALIGN BIT(14) +#define MTK_FE_INT_RFIFO_OV BIT(18) +#define MTK_FE_INT_RFIFO_UF BIT(19) +#define MTK_GDM1_AF BIT(28) +#define MTK_GDM2_AF BIT(29) + +/* PDMA HW LRO Alter Flow Timer Register */ +#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c + +/* Frame Engine Interrupt Grouping Register */ +#define MTK_FE_INT_GRP 0x20 + +/* Frame Engine LRO auto-learn table info */ +#define MTK_FE_ALT_CF8 0x300 +#define MTK_FE_ALT_SGL_CFC 0x304 +#define MTK_FE_ALT_SEQ_CFC 0x308 + +/* CDMP Ingress Control Register */ +#define MTK_CDMQ_IG_CTRL 0x1400 +#define MTK_CDMQ_STAG_EN BIT(0) + +/* CDMP Ingress Control Register */ +#define MTK_CDMP_IG_CTRL 0x400 +#define MTK_CDMP_STAG_EN BIT(0) + +/* CDMP Exgress Control Register */ +#define MTK_CDMP_EG_CTRL 0x404 + +/* GDM Exgress Control Register */ +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_SPECIAL_TAG BIT(24) +#define MTK_GDMA_ICS_EN BIT(22) +#define MTK_GDMA_TCS_EN BIT(21) +#define MTK_GDMA_UCS_EN BIT(20) +#define MTK_GDMA_TO_PDMA 0x0 +#define MTK_GDMA_DROP_ALL 0x7777 + +/* Unicast Filter MAC Address Register - Low */ +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) + +/* Unicast Filter MAC Address Register - High */ +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) + +/* Internal SRAM offset */ +#define MTK_ETH_SRAM_OFFSET 0x40000 + +/* FE global misc reg*/ +#define MTK_FE_GLO_MISC 0x124 + +/* PSE Free Queue Flow Control */ +#define PSE_FQFC_CFG1 0x100 +#define PSE_FQFC_CFG2 0x104 +#define PSE_DROP_CFG 0x108 + +/* PSE Input Queue Reservation Register*/ +#define PSE_IQ_REV(x) (0x140 + ((x - 1) * 0x4)) + +/* PSE Output Queue Threshold Register*/ +#define PSE_OQ_TH(x) (0x160 + ((x - 1) * 0x4)) + +/* GDM and CDM Threshold */ +#define MTK_GDM2_THRES 0x1530 +#define MTK_CDMW0_THRES 0x164c +#define MTK_CDMW1_THRES 0x1650 +#define MTK_CDME0_THRES 0x1654 +#define MTK_CDME1_THRES 0x1658 +#define MTK_CDMM_THRES 0x165c + +#define MTK_PDMA_V2 BIT(4) + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define PDMA_BASE 0x6000 +#define QDMA_BASE 0x4400 +#define WDMA_BASE(x) (0x4800 + ((x) * 0x400)) +#define PPE_BASE(x) (0x2200 + ((x) * 0x400)) +#else +#define PDMA_BASE 0x0800 +#define QDMA_BASE 0x1800 +#define WDMA_BASE(x) (0x2800 + ((x) * 0x400)) +#define PPE_BASE(x) (0xE00 + ((x) * 0x400)) +#endif +/* PDMA RX Base Pointer Register */ +#define MTK_PRX_BASE_PTR0 (PDMA_BASE + 0x100) +#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10)) + +/* PDMA RX Maximum Count Register */ +#define MTK_PRX_MAX_CNT0 (MTK_PRX_BASE_PTR0 + 0x04) +#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10)) + +/* PDMA RX CPU Pointer Register */ +#define MTK_PRX_CRX_IDX0 (MTK_PRX_BASE_PTR0 + 0x08) +#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10)) + +/* PDMA RX DMA Pointer Register */ +#define MTK_PRX_DRX_IDX0 (MTK_PRX_BASE_PTR0 + 0x0c) +#define MTK_PRX_DRX_IDX_CFG(x) (MTK_PRX_DRX_IDX0 + (x * 0x10)) + +/* PDMA HW LRO Control Registers */ +#define BITS(m, n) (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n))) +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_MAX_RX_RING_NUM (8) +#define MTK_HW_LRO_RING_NUM (4) +#define IS_HW_LRO_RING(ring_no) (((ring_no) > 3) && ((ring_no) < 8)) +#define MTK_PDMA_LRO_CTRL_DW0 (PDMA_BASE + 0x408) +#define MTK_LRO_ALT_SCORE_DELTA (PDMA_BASE + 0x41c) +#define MTK_LRO_RX_RING0_CTRL_DW1 (PDMA_BASE + 0x438) +#define MTK_LRO_RX_RING0_CTRL_DW2 (PDMA_BASE + 0x43c) +#define MTK_LRO_RX_RING0_CTRL_DW3 (PDMA_BASE + 0x440) +#define MTK_L3_CKS_UPD_EN BIT(19) +#define MTK_LRO_CRSN_BNW BIT(22) +#define MTK_LRO_RING_RELINGUISH_REQ (0xf << 24) +#define MTK_LRO_RING_RELINGUISH_DONE (0xf << 28) +#else +#define MTK_MAX_RX_RING_NUM (4) +#define MTK_HW_LRO_RING_NUM (3) +#define IS_HW_LRO_RING(ring_no) (((ring_no) > 0) && ((ring_no) < 4)) +#define MTK_PDMA_LRO_CTRL_DW0 (PDMA_BASE + 0x180) +#define MTK_LRO_ALT_SCORE_DELTA (PDMA_BASE + 0x24c) +#define MTK_LRO_RX_RING0_CTRL_DW1 (PDMA_BASE + 0x328) +#define MTK_LRO_RX_RING0_CTRL_DW2 (PDMA_BASE + 0x32c) +#define MTK_LRO_RX_RING0_CTRL_DW3 (PDMA_BASE + 0x330) +#define MTK_LRO_CRSN_BNW BIT(6) +#define MTK_L3_CKS_UPD_EN BIT(7) +#define MTK_LRO_RING_RELINGUISH_REQ (0x7 << 26) +#define MTK_LRO_RING_RELINGUISH_DONE (0x7 << 29) +#endif + +#define IS_NORMAL_RING(ring_no) ((ring_no) == 0) +#define MTK_LRO_EN BIT(0) +#define MTK_NON_LRO_MULTI_EN BIT(2) +#define MTK_LRO_DLY_INT_EN BIT(5) +#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) +#define MTK_LRO_L4_CTRL_PSH_EN BIT(23) +#define MTK_CTRL_DW0_SDL_OFFSET (3) +#define MTK_CTRL_DW0_SDL_MASK BITS(3, 18) + +#define MTK_PDMA_LRO_CTRL_DW1 (MTK_PDMA_LRO_CTRL_DW0 + 0x04) +#define MTK_PDMA_LRO_CTRL_DW2 (MTK_PDMA_LRO_CTRL_DW0 + 0x08) +#define MTK_PDMA_LRO_CTRL_DW3 (MTK_PDMA_LRO_CTRL_DW0 + 0x0c) +#define MTK_ADMA_MODE BIT(15) +#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) + +/* PDMA RSS Control Registers */ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_PDMA_RSS_GLO_CFG (PDMA_BASE + 0x800) +#define MTK_RX_NAPI_NUM (2) +#define MTK_MAX_IRQ_NUM (4) +#else +#define MTK_PDMA_RSS_GLO_CFG 0x3000 +#define MTK_RX_NAPI_NUM (1) +#define MTK_MAX_IRQ_NUM (3) +#endif +#define MTK_RSS_RING1 (1) +#define MTK_RSS_EN BIT(0) +#define MTK_RSS_CFG_REQ BIT(2) +#define MTK_RSS_IPV6_STATIC_HASH (0x7 << 8) +#define MTK_RSS_IPV4_STATIC_HASH (0x7 << 12) +#define MTK_RSS_INDR_TABLE_DW0 (MTK_PDMA_RSS_GLO_CFG + 0x50) +#define MTK_RSS_INDR_TABLE_DW1 (MTK_PDMA_RSS_GLO_CFG + 0x54) +#define MTK_RSS_INDR_TABLE_DW2 (MTK_PDMA_RSS_GLO_CFG + 0x58) +#define MTK_RSS_INDR_TABLE_DW3 (MTK_PDMA_RSS_GLO_CFG + 0x5C) +#define MTK_RSS_INDR_TABLE_DW4 (MTK_PDMA_RSS_GLO_CFG + 0x60) +#define MTK_RSS_INDR_TABLE_DW5 (MTK_PDMA_RSS_GLO_CFG + 0x64) +#define MTK_RSS_INDR_TABLE_DW6 (MTK_PDMA_RSS_GLO_CFG + 0x68) +#define MTK_RSS_INDR_TABLE_DW7 (MTK_PDMA_RSS_GLO_CFG + 0x6C) +#define MTK_RSS_INDR_TABLE_SIZE4 0x44444444 + +/* PDMA Global Configuration Register */ +#define MTK_PDMA_GLO_CFG (PDMA_BASE + 0x204) +#define MTK_RX_DMA_LRO_EN BIT(8) +#define MTK_MULTI_EN BIT(10) +#define MTK_PDMA_SIZE_8DWORDS (1 << 4) + +/* PDMA Global Configuration Register */ +#define MTK_PDMA_RX_CFG (PDMA_BASE + 0x210) +#define MTK_PDMA_LRO_SDL (0x3000) +#define MTK_RX_CFG_SDL_OFFSET (16) + +/* PDMA Reset Index Register */ +#define MTK_PDMA_RST_IDX (PDMA_BASE + 0x208) +#define MTK_PST_DRX_IDX0 BIT(16) +#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) + +/* PDMA Delay Interrupt Register */ +#define MTK_PDMA_DELAY_INT (PDMA_BASE + 0x20c) +#define MTK_PDMA_DELAY_RX_EN BIT(15) +#define MTK_PDMA_DELAY_RX_PINT 4 +#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 +#define MTK_PDMA_DELAY_RX_PTIME 4 +#define MTK_PDMA_DELAY_RX_DELAY \ + (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \ + (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT)) + +/* PDMA Interrupt Status Register */ +#define MTK_PDMA_INT_STATUS (PDMA_BASE + 0x220) + +/* PDMA Interrupt Mask Register */ +#define MTK_PDMA_INT_MASK (PDMA_BASE + 0x228) + +/* PDMA Interrupt grouping registers */ +#define MTK_PDMA_INT_GRP1 (PDMA_BASE + 0x250) +#define MTK_PDMA_INT_GRP2 (PDMA_BASE + 0x254) +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_PDMA_INT_GRP3 (PDMA_BASE + 0x258) +#else +#define MTK_PDMA_INT_GRP3 (PDMA_BASE + 0x22c) +#endif +#define MTK_LRO_RX1_DLY_INT 0xa70 +#define MTK_MAX_DELAY_INT 0x8f0f8f0f + +/* PDMA HW LRO IP Setting Registers */ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x414) +#else +#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x304) +#endif +#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) +#define MTK_RING_MYIP_VLD BIT(9) + +/* PDMA HW LRO ALT Debug Registers */ +#define MTK_LRO_ALT_DBG (PDMA_BASE + 0x440) +#define MTK_LRO_ALT_INDEX_OFFSET (8) + +/* PDMA HW LRO ALT Data Registers */ +#define MTK_LRO_ALT_DBG_DATA (PDMA_BASE + 0x444) + +/* PDMA HW LRO Ring Control Registers */ +#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40)) +#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40)) +#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40)) +#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22) +#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f) +#define MTK_RING_PSE_MODE (1 << 6) +#define MTK_RING_AUTO_LERAN_MODE (3 << 6) +#define MTK_RING_VLD BIT(8) +#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10) +#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26) +#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) + +/* LRO_RX_RING_CTRL_DW masks */ +#define MTK_LRO_RING_AGG_TIME_MASK BITS(10, 25) +#define MTK_LRO_RING_AGG_CNT_L_MASK BITS(26, 31) +#define MTK_LRO_RING_AGG_CNT_H_MASK BITS(0, 1) +#define MTK_LRO_RING_AGE_TIME_L_MASK BITS(22, 31) +#define MTK_LRO_RING_AGE_TIME_H_MASK BITS(0, 5) + +/* LRO_RX_RING_CTRL_DW0 offsets */ +#define MTK_RX_IPV6_FORCE_OFFSET (0) +#define MTK_RX_IPV4_FORCE_OFFSET (1) + +/* LRO_RX_RING_CTRL_DW1 offsets */ +#define MTK_LRO_RING_AGE_TIME_L_OFFSET (22) + +/* LRO_RX_RING_CTRL_DW2 offsets */ +#define MTK_LRO_RING_AGE_TIME_H_OFFSET (0) +#define MTK_RX_MODE_OFFSET (6) +#define MTK_RX_PORT_VALID_OFFSET (8) +#define MTK_RX_MYIP_VALID_OFFSET (9) +#define MTK_LRO_RING_AGG_TIME_OFFSET (10) +#define MTK_LRO_RING_AGG_CNT_L_OFFSET (26) + +/* LRO_RX_RING_CTRL_DW3 offsets */ +#define MTK_LRO_RING_AGG_CNT_H_OFFSET (0) + +/* LRO_RX_RING_STP_DTP_DW offsets */ +#define MTK_RX_TCP_DEST_PORT_OFFSET (0) +#define MTK_RX_TCP_SRC_PORT_OFFSET (16) + +/* QDMA TX Queue Configuration Registers */ +#define MTK_QTX_CFG(x) (QDMA_BASE + (x * 0x10)) +#define QDMA_RES_THRES 4 + +/* QDMA TX Queue Scheduler Registers */ +#define MTK_QTX_SCH(x) (QDMA_BASE + 4 + (x * 0x10)) + +/* QDMA RX Base Pointer Register */ +#define MTK_QRX_BASE_PTR0 (QDMA_BASE + 0x100) +#define MTK_QRX_BASE_PTR_CFG(x) (MTK_QRX_BASE_PTR0 + ((x) * 0x10)) + +/* QDMA RX Maximum Count Register */ +#define MTK_QRX_MAX_CNT0 (QDMA_BASE + 0x104) +#define MTK_QRX_MAX_CNT_CFG(x) (MTK_QRX_MAX_CNT0 + ((x) * 0x10)) + +/* QDMA RX CPU Pointer Register */ +#define MTK_QRX_CRX_IDX0 (QDMA_BASE + 0x108) +#define MTK_QRX_CRX_IDX_CFG(x) (MTK_QRX_CRX_IDX0 + ((x) * 0x10)) + +/* QDMA RX DMA Pointer Register */ +#define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c) + +/* QDMA Global Configuration Register */ +#define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204) +#define MTK_RX_2B_OFFSET BIT(31) +#define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_NDP_CO_PRO BIT(10) +#define MTK_TX_WB_DDONE BIT(6) +#define MTK_DMA_SIZE_16DWORDS (2 << 4) +#define MTK_DMA_SIZE_32DWORDS (3 << 4) +#define MTK_RX_DMA_BUSY BIT(3) +#define MTK_TX_DMA_BUSY BIT(1) +#define MTK_RX_DMA_EN BIT(2) +#define MTK_TX_DMA_EN BIT(0) +#define MTK_DMA_BUSY_TIMEOUT HZ + +/* QDMA V2 Global Configuration Register */ +#define MTK_CHK_DDONE_EN BIT(28) +#define MTK_DMAD_WR_WDONE BIT(26) +#define MTK_WCOMP_EN BIT(24) +#define MTK_RESV_BUF (0x80 << 16) +#define MTK_MUTLI_CNT (0x4 << 12) +#define MTK_RESV_BUF_MASK (0xff << 16) + +/* QDMA Reset Index Register */ +#define MTK_QDMA_RST_IDX (QDMA_BASE + 0x208) + +/* QDMA Delay Interrupt Register */ +#define MTK_QDMA_DELAY_INT (QDMA_BASE + 0x20c) + +/* QDMA Flow Control Register */ +#define MTK_QDMA_FC_THRES (QDMA_BASE + 0x210) +#define FC_THRES_DROP_MODE BIT(20) +#define FC_THRES_DROP_EN (7 << 16) +#define FC_THRES_MIN 0x4444 + +/* QDMA Interrupt Status Register */ +#define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218) +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_RX_DONE_INT(ring_no) \ + ((ring_no)? BIT(16 + (ring_no)) : BIT(14)) +#else +#define MTK_RX_DONE_INT(ring_no) \ + ((ring_no)? BIT(24 + (ring_no)) : BIT(30)) +#endif +#define MTK_RX_DONE_INT3 BIT(19) +#define MTK_RX_DONE_INT2 BIT(18) +#define MTK_RX_DONE_INT1 BIT(17) +#define MTK_RX_DONE_INT0 BIT(16) +#define MTK_TX_DONE_INT3 BIT(3) +#define MTK_TX_DONE_INT2 BIT(2) +#define MTK_TX_DONE_INT1 BIT(1) +#define MTK_TX_DONE_INT0 BIT(0) +#define MTK_TX_DONE_DLY BIT(28) +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY + +/* QDMA Interrupt grouping registers */ +#define MTK_QDMA_INT_GRP1 (QDMA_BASE + 0x220) +#define MTK_QDMA_INT_GRP2 (QDMA_BASE + 0x224) +#define MTK_RLS_DONE_INT BIT(0) + +/* QDMA Interrupt Status Register */ +#define MTK_QDMA_INT_MASK (QDMA_BASE + 0x21c) + +/* QDMA DMA FSM */ +#define MTK_QDMA_FSM (QDMA_BASE + 0x234) + +/* QDMA Interrupt Mask Register */ +#define MTK_QDMA_HRED2 (QDMA_BASE + 0x244) + +/* QDMA TX Forward CPU Pointer Register */ +#define MTK_QTX_CTX_PTR (QDMA_BASE +0x300) + +/* QDMA TX Forward DMA Pointer Register */ +#define MTK_QTX_DTX_PTR (QDMA_BASE +0x304) + +/* QDMA TX Forward DMA Counter */ +#define MTK_QDMA_FWD_CNT (QDMA_BASE + 0x308) + +/* QDMA TX Release CPU Pointer Register */ +#define MTK_QTX_CRX_PTR (QDMA_BASE +0x310) + +/* QDMA TX Release DMA Pointer Register */ +#define MTK_QTX_DRX_PTR (QDMA_BASE +0x314) + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_HEAD (QDMA_BASE +0x320) + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_TAIL (QDMA_BASE +0x324) + +/* QDMA FQ Free Page Counter Register */ +#define MTK_QDMA_FQ_CNT (QDMA_BASE +0x328) + +/* QDMA FQ Free Page Buffer Length Register */ +#define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c) + +/* WDMA Registers */ +#define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC) +#define MTK_WDMA_GLO_CFG(x) (WDMA_BASE(x) + 0x204) +#define MTK_WDMA_TX_DBG_MON0(x) (WDMA_BASE(x) + 0x230) +#define MTK_CDM_TXFIFO_RDY BIT(7) + +/* GMA1 Received Good Byte Count Register */ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_GDM1_TX_GBCNT 0x1C00 +#else +#define MTK_GDM1_TX_GBCNT 0x2400 +#endif +#define MTK_STAT_OFFSET 0x40 + +/* QDMA TX NUM */ +#define MTK_QDMA_TX_NUM 16 +#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM) - 1) +#define QID_LOW_BITS(x) ((x) & 0xf) +#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) << 20) +#define QID_BITS_V2(x) (((x) & 0x3f) << 16) + +#define MTK_QDMA_GMAC2_QID 8 + +/* QDMA V2 descriptor txd6 */ +#define TX_DMA_INS_VLAN_V2 BIT(16) + +/* QDMA V2 descriptor txd5 */ +#define TX_DMA_CHKSUM_V2 (0x7 << 28) +#define TX_DMA_TSO_V2 BIT(31) + +/* QDMA V2 descriptor txd4 */ +#define TX_DMA_FPORT_SHIFT_V2 8 +#define TX_DMA_FPORT_MASK_V2 0xf +#define TX_DMA_SWC_V2 BIT(30) + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_TX_DMA_BUF_LEN 0xffff +#define MTK_TX_DMA_BUF_SHIFT 8 +#else +#define MTK_TX_DMA_BUF_LEN 0x3fff +#define MTK_TX_DMA_BUF_SHIFT 16 +#endif + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MTK_RX_DMA_BUF_LEN 0xffff +#define MTK_RX_DMA_BUF_SHIFT 8 +#define RX_DMA_SPORT_SHIFT 26 +#define RX_DMA_SPORT_MASK 0xf +#else +#define MTK_RX_DMA_BUF_LEN 0x3fff +#define MTK_RX_DMA_BUF_SHIFT 16 +#define RX_DMA_SPORT_SHIFT 19 +#define RX_DMA_SPORT_MASK 0x7 +#endif + +/* QDMA descriptor txd4 */ +#define TX_DMA_CHKSUM (0x7 << 29) +#define TX_DMA_TSO BIT(28) +#define TX_DMA_FPORT_SHIFT 25 +#define TX_DMA_FPORT_MASK 0x7 +#define TX_DMA_INS_VLAN BIT(16) + +/* QDMA descriptor txd3 */ +#define TX_DMA_OWNER_CPU BIT(31) +#define TX_DMA_LS0 BIT(30) +#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << MTK_TX_DMA_BUF_SHIFT) +#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) +#define TX_DMA_SWC BIT(14) +#define TX_DMA_SDL(_x) (TX_DMA_PLEN0(_x)) + +/* PDMA on MT7628 */ +#define TX_DMA_DONE BIT(31) +#define TX_DMA_LS1 BIT(14) +#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) + +/* QDMA descriptor rxd2 */ +#define RX_DMA_DONE BIT(31) +#define RX_DMA_LSO BIT(30) +#define RX_DMA_PLEN0(_x) (((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT) +#define RX_DMA_GET_PLEN0(_x) (((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN) +#define RX_DMA_GET_AGG_CNT(_x) (((_x) >> 2) & 0xff) +#define RX_DMA_GET_REV(_x) (((_x) >> 10) & 0x1f) +#define RX_DMA_VTAG BIT(15) + +/* QDMA descriptor rxd3 */ +#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK) +#define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) +#define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff) + +/* QDMA descriptor rxd4 */ +#define RX_DMA_L4_VALID BIT(24) +#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ +#define RX_DMA_SPECIAL_TAG BIT(22) /* switch header in packet */ + +#define RX_DMA_GET_SPORT(_x) (((_x) >> RX_DMA_SPORT_SHIFT) & RX_DMA_SPORT_MASK) + +/* PDMA V2 descriptor rxd3 */ +#define RX_DMA_VTAG_V2 BIT(0) +#define RX_DMA_L4_VALID_V2 BIT(2) + +/* PDMA V2 descriptor rxd4 */ +#define RX_DMA_VID_V2(_x) RX_DMA_VID(_x) +#define RX_DMA_TCI_V2(_x) RX_DMA_TCI(_x) +#define RX_DMA_VPID_V2(_x) RX_DMA_VPID(_x) + +/* PDMA V2 descriptor rxd6 */ +#define RX_DMA_GET_FLUSH_RSN_V2(_x) ((_x) & 0x7) +#define RX_DMA_GET_AGG_CNT_V2(_x) (((_x) >> 16) & 0xff) + +/* PHY Indirect Access Control registers */ +#define MTK_PHY_IAC 0x10004 +#define PHY_IAC_ACCESS BIT(31) +#define PHY_IAC_READ BIT(19) +#define PHY_IAC_WRITE BIT(18) +#define PHY_IAC_START BIT(16) +#define PHY_IAC_ADDR_SHIFT 20 +#define PHY_IAC_REG_SHIFT 25 +#define PHY_IAC_TIMEOUT HZ + +#define MTK_MAC_MISC 0x1000c +#define MTK_MUX_TO_ESW BIT(0) + +/* Mac control registers */ +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) +#define MAC_MCR_MAX_RX_1536 BIT(24) +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) +#define MAC_MCR_FORCE_MODE BIT(15) +#define MAC_MCR_TX_EN BIT(14) +#define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_BACKOFF_EN BIT(9) +#define MAC_MCR_BACKPR_EN BIT(8) +#define MAC_MCR_FORCE_RX_FC BIT(5) +#define MAC_MCR_FORCE_TX_FC BIT(4) +#define MAC_MCR_SPEED_1000 BIT(3) +#define MAC_MCR_SPEED_100 BIT(2) +#define MAC_MCR_FORCE_DPX BIT(1) +#define MAC_MCR_FORCE_LINK BIT(0) +#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) + +/* Mac status registers */ +#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) +#define MAC_MSR_EEE1G BIT(7) +#define MAC_MSR_EEE100M BIT(6) +#define MAC_MSR_RX_FC BIT(5) +#define MAC_MSR_TX_FC BIT(4) +#define MAC_MSR_SPEED_1000 BIT(3) +#define MAC_MSR_SPEED_100 BIT(2) +#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) +#define MAC_MSR_DPX BIT(1) +#define MAC_MSR_LINK BIT(0) + +/* TRGMII RXC control register */ +#define TRGMII_RCK_CTRL 0x10300 +#define DQSI0(x) ((x << 0) & GENMASK(6, 0)) +#define DQSI1(x) ((x << 8) & GENMASK(14, 8)) +#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) +#define RXC_RST BIT(31) +#define RXC_DQSISEL BIT(30) +#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) +#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) + +#define NUM_TRGMII_CTRL 5 + +/* TRGMII RXC control register */ +#define TRGMII_TCK_CTRL 0x10340 +#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) +#define TXC_INV BIT(30) +#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) +#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) + +/* TRGMII TX Drive Strength */ +#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) +#define TD_DM_DRVP(x) ((x) & 0xf) +#define TD_DM_DRVN(x) (((x) & 0xf) << 4) + +/* TRGMII Interface mode register */ +#define INTF_MODE 0x10390 +#define TRGMII_INTF_DIS BIT(0) +#define TRGMII_MODE BIT(1) +#define TRGMII_CENTRAL_ALIGNED BIT(2) +#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) +#define INTF_MODE_RGMII_10_100 0 + +/* GPIO port control registers for GMAC 2*/ +#define GPIO_OD33_CTRL8 0x4c0 +#define GPIO_BIAS_CTRL 0xed0 +#define GPIO_DRV_SEL10 0xf00 + +/* ethernet subsystem chip id register */ +#define ETHSYS_CHIPID0_3 0x0 +#define ETHSYS_CHIPID4_7 0x4 +#define MT7623_ETH 7623 +#define MT7622_ETH 7622 +#define MT7621_ETH 7621 + +/* ethernet system control register */ +#define ETHSYS_SYSCFG 0x10 +#define SYSCFG_DRAM_TYPE_DDR2 BIT(4) + +/* ethernet subsystem config register */ +#define ETHSYS_SYSCFG0 0x14 +#define SYSCFG0_GE_MASK 0x3 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) +#define SYSCFG0_SGMII_MASK GENMASK(9, 8) +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) +#define SYSCFG0_SGMII_GMAC1_V2 BIT(9) +#define SYSCFG0_SGMII_GMAC2_V2 BIT(8) + + +/* ethernet subsystem clock register */ +#define ETHSYS_CLKCFG0 0x2c +#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) +#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6)) +#define ETHSYS_TRGMII_MT7621_APLL BIT(6) +#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) + +/* ethernet reset control register */ +#define ETHSYS_RSTCTRL 0x34 +#define RSTCTRL_FE BIT(6) +#define RSTCTRL_ETH BIT(23) +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define RSTCTRL_PPE0 BIT(30) +#define RSTCTRL_PPE1 BIT(31) +#else +#define RSTCTRL_PPE0 BIT(31) +#define RSTCTRL_PPE1 0 +#endif + +/* ethernet reset check idle register */ +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 + + +/* SGMII subsystem config registers */ +/* Register to auto-negotiation restart */ +#define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_AN_RESTART BIT(9) +#define SGMII_ISOLATE BIT(10) +#define SGMII_AN_ENABLE BIT(12) +#define SGMII_LINK_STATYS BIT(18) +#define SGMII_AN_ABILITY BIT(19) +#define SGMII_AN_COMPLETE BIT(21) +#define SGMII_PCS_FAULT BIT(23) +#define SGMII_AN_EXPANSION_CLR BIT(30) + +/* Register to programmable link timer, the unit in 2 * 8ns */ +#define SGMSYS_PCS_LINK_TIMER 0x18 +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) + +/* Register to control remote fault */ +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_IF_MODE_BIT0 BIT(0) +#define SGMII_SPEED_DUPLEX_AN BIT(1) +#define SGMII_SPEED_10 0x0 +#define SGMII_SPEED_100 BIT(2) +#define SGMII_SPEED_1000 BIT(3) +#define SGMII_DUPLEX_FULL BIT(4) +#define SGMII_IF_MODE_BIT5 BIT(5) +#define SGMII_REMOTE_FAULT_DIS BIT(8) +#define SGMII_CODE_SYNC_SET_VAL BIT(9) +#define SGMII_CODE_SYNC_SET_EN BIT(10) +#define SGMII_SEND_AN_ERROR_EN BIT(11) +#define SGMII_IF_MODE_MASK GENMASK(5, 1) + +/* Register to set SGMII speed, ANA RG_ Control Signals III*/ +#define SGMSYS_ANA_RG_CS3 0x2028 +#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3)) +#define RG_PHY_SPEED_1_25G 0x0 +#define RG_PHY_SPEED_3_125G BIT(2) + +/* Register to power up QPHY */ +#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 +#define SGMII_PHYA_PWD BIT(4) + +/* Register to QPHY wrapper control */ +#define SGMSYS_QPHY_WRAP_CTRL 0xec +#define SGMII_PN_SWAP_MASK GENMASK(1, 0) +#define SGMII_PN_SWAP_TX_RX (BIT(0) | BIT(1)) + +/* Infrasys subsystem config registers */ +#define INFRA_MISC2 0x70c +#define CO_QPHY_SEL BIT(0) +#define GEPHY_MAC_SEL BIT(1) + +/* Top misc registers */ +#define USB_PHY_SWITCH_REG 0x218 +#define QPHY_SEL_MASK GENMASK(1, 0) +#define SGMII_QPHY_SEL 0x2 + +/*MDIO control*/ +#define MII_MMD_ACC_CTL_REG 0x0d +#define MII_MMD_ADDR_DATA_REG 0x0e +#define MMD_OP_MODE_DATA BIT(14) + +/* MT7628/88 specific stuff */ +#define MT7628_PDMA_OFFSET 0x0800 +#define MT7628_SDM_OFFSET 0x0c00 + +#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) +#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) +#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) +#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) +#define MT7628_PST_DTX_IDX0 BIT(0) + +#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) +#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) + +struct mtk_rx_dma { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + unsigned int rxd5; + unsigned int rxd6; + unsigned int rxd7; + unsigned int rxd8; +#endif +} __packed __aligned(4); + +struct mtk_tx_dma { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + unsigned int txd5; + unsigned int txd6; + unsigned int txd7; + unsigned int txd8; +#endif +} __packed __aligned(4); + +struct mtk_eth; +struct mtk_mac; + +/* struct mtk_hw_stats - the structure that holds the traffic statistics. + * @stats_lock: make sure that stats operations are atomic + * @reg_offset: the status register offset of the SoC + * @syncp: the refcount + * + * All of the supported SoCs have hardware counters for traffic statistics. + * Whenever the status IRQ triggers we can read the latest stats from these + * counters and store them in this struct. + */ +struct mtk_hw_stats { + u64 tx_bytes; + u64 tx_packets; + u64 tx_skip; + u64 tx_collisions; + u64 rx_bytes; + u64 rx_packets; + u64 rx_overflow; + u64 rx_fcs_errors; + u64 rx_short_errors; + u64 rx_long_errors; + u64 rx_checksum_errors; + u64 rx_flow_control_packets; + + spinlock_t stats_lock; + u32 reg_offset; + struct u64_stats_sync syncp; +}; + +enum mtk_tx_flags { + /* PDMA descriptor can point at 1-2 segments. This enum allows us to + * track how memory was allocated so that it can be freed properly. + */ + MTK_TX_FLAGS_SINGLE0 = 0x01, + MTK_TX_FLAGS_PAGE0 = 0x02, + + /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted + * SKB out instead of looking up through hardware TX descriptor. + */ + MTK_TX_FLAGS_FPORT0 = 0x04, + MTK_TX_FLAGS_FPORT1 = 0x08, +}; + +/* This enum allows us to identify how the clock is defined on the array of the + * clock in the order + */ +enum mtk_clks_map { + MTK_CLK_ETHIF, + MTK_CLK_SGMIITOP, + MTK_CLK_ESW, + MTK_CLK_GP0, + MTK_CLK_GP1, + MTK_CLK_GP2, + MTK_CLK_FE, + MTK_CLK_TRGPLL, + MTK_CLK_SGMII_TX_250M, + MTK_CLK_SGMII_RX_250M, + MTK_CLK_SGMII_CDR_REF, + MTK_CLK_SGMII_CDR_FB, + MTK_CLK_SGMII2_TX_250M, + MTK_CLK_SGMII2_RX_250M, + MTK_CLK_SGMII2_CDR_REF, + MTK_CLK_SGMII2_CDR_FB, + MTK_CLK_SGMII_CK, + MTK_CLK_ETH2PLL, + MTK_CLK_WOCPU0, + MTK_CLK_WOCPU1, + MTK_CLK_MAX +}; + +#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL)) +#define MT7621_CLKS_BITMAP (0) +#define MT7628_CLKS_BITMAP (0) +#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) + +#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB)) + + +#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_WOCPU0) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB)) +enum mtk_dev_state { + MTK_HW_INIT, + MTK_RESETTING +}; + +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at + * by the TX descriptor s + * @skb: The SKB pointer of the packet being sent + * @dma_addr0: The base addr of the first segment + * @dma_len0: The length of the first segment + * @dma_addr1: The base addr of the second segment + * @dma_len1: The length of the second segment + */ +struct mtk_tx_buf { + struct sk_buff *skb; + u32 flags; + DEFINE_DMA_UNMAP_ADDR(dma_addr0); + DEFINE_DMA_UNMAP_LEN(dma_len0); + DEFINE_DMA_UNMAP_ADDR(dma_addr1); + DEFINE_DMA_UNMAP_LEN(dma_len1); +}; + +/* struct mtk_tx_ring - This struct holds info describing a TX ring + * @dma: The descriptor ring + * @buf: The memory pointed at by the ring + * @phys: The physical addr of tx_buf + * @next_free: Pointer to the next free descriptor + * @last_free: Pointer to the last free descriptor + * @last_free_ptr: Hardware pointer value of the last free descriptor + * @thresh: The threshold of minimum amount of free descriptors + * @free_count: QDMA uses a linked list. Track how many free descriptors + * are present + */ +struct mtk_tx_ring { + struct mtk_tx_dma *dma; + struct mtk_tx_buf *buf; + dma_addr_t phys; + struct mtk_tx_dma *next_free; + struct mtk_tx_dma *last_free; + u32 last_free_ptr; + u16 thresh; + atomic_t free_count; + int dma_size; + struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ + dma_addr_t phys_pdma; + int cpu_idx; +}; + +/* PDMA rx ring mode */ +enum mtk_rx_flags { + MTK_RX_FLAGS_NORMAL = 0, + MTK_RX_FLAGS_HWLRO, + MTK_RX_FLAGS_QDMA, +}; + +/* struct mtk_rx_ring - This struct holds info describing a RX ring + * @dma: The descriptor ring + * @data: The memory pointed at by the ring + * @phys: The physical addr of rx_buf + * @frag_size: How big can each fragment be + * @buf_size: The size of each packet buffer + * @calc_idx: The current head of ring + * @ring_no: The index of ring + */ +struct mtk_rx_ring { + struct mtk_rx_dma *dma; + u8 **data; + dma_addr_t phys; + u16 frag_size; + u16 buf_size; + u16 dma_size; + bool calc_idx_update; + u16 calc_idx; + u32 crx_idx_reg; + u32 ring_no; +}; + +/* struct mtk_napi - This is the structure holding NAPI-related information, + * and a mtk_napi struct is binding to one interrupt group + * @napi: The NAPI struct + * @rx_ring: Pointer to the memory holding info about the RX ring + * @irq_grp_idx: The index indicates which interrupt group that this + * mtk_napi is binding to + */ +struct mtk_napi { + struct napi_struct napi; + struct mtk_eth *eth; + struct mtk_rx_ring *rx_ring; + u32 irq_grp_no; +}; + +enum mkt_eth_capabilities { + MTK_RGMII_BIT = 0, + MTK_TRGMII_BIT, + MTK_SGMII_BIT, + MTK_ESW_BIT, + MTK_GEPHY_BIT, + MTK_MUX_BIT, + MTK_INFRA_BIT, + MTK_SHARED_SGMII_BIT, + MTK_HWLRO_BIT, + MTK_RSS_BIT, + MTK_SHARED_INT_BIT, + MTK_TRGMII_MT7621_CLK_BIT, + MTK_QDMA_BIT, + MTK_NETSYS_V2_BIT, + MTK_SOC_MT7628_BIT, + MTK_RSTCTRL_PPE1_BIT, + MTK_U3_COPHY_V2_BIT, + + /* MUX BITS*/ + MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, + MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, + MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, + MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, + MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, + + /* PATH BITS */ + MTK_ETH_PATH_GMAC1_RGMII_BIT, + MTK_ETH_PATH_GMAC1_TRGMII_BIT, + MTK_ETH_PATH_GMAC1_SGMII_BIT, + MTK_ETH_PATH_GMAC2_RGMII_BIT, + MTK_ETH_PATH_GMAC2_SGMII_BIT, + MTK_ETH_PATH_GMAC2_GEPHY_BIT, + MTK_ETH_PATH_GDM1_ESW_BIT, +}; + +/* Supported hardware group on SoCs */ +#define MTK_RGMII BIT(MTK_RGMII_BIT) +#define MTK_TRGMII BIT(MTK_TRGMII_BIT) +#define MTK_SGMII BIT(MTK_SGMII_BIT) +#define MTK_ESW BIT(MTK_ESW_BIT) +#define MTK_GEPHY BIT(MTK_GEPHY_BIT) +#define MTK_MUX BIT(MTK_MUX_BIT) +#define MTK_INFRA BIT(MTK_INFRA_BIT) +#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT) +#define MTK_HWLRO BIT(MTK_HWLRO_BIT) +#define MTK_RSS BIT(MTK_RSS_BIT) +#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) +#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) +#define MTK_QDMA BIT(MTK_QDMA_BIT) +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) +#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) +#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT) + +#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ + BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) +#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ + BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) +#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ + BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) +#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ + BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) +#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ + BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) + +/* Supported path present on SoCs */ +#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT) +#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT) +#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT) +#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT) +#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT) + +#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) +#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) +#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII) +#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) +#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) +#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) +#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) + +/* MUXes present on SoCs */ +/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ +#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX) + +/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */ +#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \ + (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA) + +/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */ +#define MTK_MUX_U3_GMAC2_TO_QPHY \ + (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA) + +/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */ +#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ + (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ + MTK_SHARED_SGMII) + +/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ +#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ + (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) + +#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) + +#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ + MTK_GMAC2_RGMII | MTK_SHARED_INT | \ + MTK_TRGMII_MT7621_CLK | MTK_QDMA) + +#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ + MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ + MTK_MUX_GDM1_TO_GMAC1_ESW | \ + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) + +#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ + MTK_QDMA) + +#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) + +#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ + MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ + MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ + MTK_MUX_U3_GMAC2_TO_QPHY | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) + +#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ + MTK_NETSYS_V2) + +#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ + MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ + MTK_NETSYS_V2) + +/* struct mtk_eth_data - This is the structure holding all differences + * among various plaforms + * @ana_rgc3: The offset for register ANA_RGC3 related to + * sgmiisys syscon + * @caps Flags shown the extra capability for the SoC + * @hw_features Flags shown HW features + * @required_clks Flags shown the bitmap for required clocks on + * the target SoC + * @required_pctl A bool value to show whether the SoC requires + * the extra setup for those pins used by GMAC. + */ +struct mtk_soc_data { + u32 ana_rgc3; + u32 caps; + u32 required_clks; + bool required_pctl; + netdev_features_t hw_features; + bool has_sram; +}; + +/* currently no SoC has more than 2 macs */ +#define MTK_MAX_DEVS 2 + +#define MTK_SGMII_PHYSPEED_AN BIT(31) +#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0) +#define MTK_SGMII_PHYSPEED_1000 BIT(0) +#define MTK_SGMII_PHYSPEED_2500 BIT(1) +#define MTK_SGMII_PN_SWAP BIT(16) +#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x)) + +/* struct mtk_sgmii - This is the structure holding sgmii regmap and its + * characteristics + * @regmap: The register map pointing at the range used to setup + * SGMII modes + * @flags: The enum refers to which mode the sgmii wants to run on + * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap + */ + +struct mtk_sgmii { + struct regmap *regmap[MTK_MAX_DEVS]; + u32 flags[MTK_MAX_DEVS]; + u32 ana_rgc3; +}; + + +/* struct mtk_reset_event - This is the structure holding statistics counters + * for reset events + * @count: The counter is used to record the number of events + */ +struct mtk_reset_event { + u32 count[32]; +}; + +/* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic + * @rx_irq__lock: Make sure that IRQ register operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances + * @mac: Each netdev is linked to a physical MAC + * @irq: The IRQ that we are using + * @msg_enable: Ethtool msg level + * @ethsys: The register map pointing at the range used to setup + * MII modes + * @infra: The register map pointing at the range used to setup + * SGMII and GePHY path + * @pctl: The register map pointing at the range used to setup + * GMAC port drive/slew values + * @dma_refcnt: track how many netdevs are using the DMA engine + * @tx_ring: Pointer to the memory holding info about the TX ring + * @rx_ring: Pointer to the memory holding info about the RX ring + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring + * @tx_napi: The TX NAPI struct + * @rx_napi: The RX NAPI struct + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @phy_scratch_ring: physical address of scratch_ring + * @scratch_head: The scratch memory that scratch_ring points to. + * @clks: clock array for all clocks required + * @mii_bus: If there is a bus we need to create an instance for it + * @pending_work: The workqueue used to reset the dma ring + * @state: Initialization and runtime state of the device + * @soc: Holding specific data among vaious SoCs + */ + +struct mtk_eth { + struct device *dev; + void __iomem *base; + spinlock_t page_lock; + spinlock_t tx_irq_lock; + spinlock_t rx_irq_lock; + struct net_device dummy_dev; + struct net_device *netdev[MTK_MAX_DEVS]; + struct mtk_mac *mac[MTK_MAX_DEVS]; + int irq[MTK_MAX_IRQ_NUM]; + u32 msg_enable; + unsigned long sysclk; + struct regmap *ethsys; + struct regmap *infra; + struct mtk_sgmii *sgmii; + struct regmap *pctl; + bool hwlro; + refcount_t dma_refcnt; + struct mtk_tx_ring tx_ring; + struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; + struct mtk_rx_ring rx_ring_qdma; + struct napi_struct tx_napi; + struct mtk_napi rx_napi[MTK_RX_NAPI_NUM]; + struct mtk_tx_dma *scratch_ring; + struct mtk_reset_event reset_event; + dma_addr_t phy_scratch_ring; + void *scratch_head; + struct clk *clks[MTK_CLK_MAX]; + + struct mii_bus *mii_bus; + struct work_struct pending_work; + unsigned long state; + + const struct mtk_soc_data *soc; + + u32 tx_int_mask_reg; + u32 tx_int_status_reg; + u32 rx_dma_l4_valid; + int ip_align; + spinlock_t syscfg0_lock; + struct timer_list mtk_dma_monitor_timer; +}; + +/* struct mtk_mac - the structure that holds the info about the MACs of the + * SoC + * @id: The number of the MAC + * @interface: Interface mode kept for detecting change in hw settings + * @of_node: Our devicetree node + * @hw: Backpointer to our main datastruture + * @hw_stats: Packet statistics counter + */ +struct mtk_mac { + unsigned int id; + phy_interface_t interface; + unsigned int mode; + int speed; + struct device_node *of_node; + struct phylink *phylink; + struct phylink_config phylink_config; + struct mtk_eth *hw; + struct mtk_hw_stats *hw_stats; + __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; + int hwlro_ip_cnt; +}; + +/* the struct describing the SoC. these are declared in the soc_xyz.c files */ +extern const struct of_device_id of_mtk_match[]; +extern u32 mtk_hwlro_stats_ebl; + +/* read the hardware status register */ +void mtk_stats_update_mac(struct mtk_mac *mac); + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); +u32 mtk_r32(struct mtk_eth *eth, unsigned reg); +u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg); + +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, + u32 ana_rgc3); +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, unsigned int id); +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, unsigned int id, + const struct phylink_link_state *state); +void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); + +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); +void mtk_gdm_config(struct mtk_eth *eth, u32 config); +void ethsys_reset(struct mtk_eth *eth, u32 reset_bits); + +#endif /* MTK_ETH_H */ diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/Makefile b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/Makefile new file mode 100644 index 000000000..bf1bbcbc2 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/Makefile @@ -0,0 +1,5 @@ +ccflags-y=-Werror + +obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtkhnat.o +mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o hnat_mcast.o +mtkhnat-$(CONFIG_NET_DSA_MT7530) += hnat_stag.o diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.c new file mode 100644 index 000000000..ad4184aa7 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.c @@ -0,0 +1,912 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Sean Wang + * Copyright (C) 2016-2017 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nf_hnat_mtk.h" +#include "hnat.h" + +struct mtk_hnat *hnat_priv; +static struct socket *_hnat_roam_sock; +static struct work_struct _hnat_roam_work; + +int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL; +EXPORT_SYMBOL(ra_sw_nat_hook_rx); +int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL; +EXPORT_SYMBOL(ra_sw_nat_hook_tx); + +int (*ppe_del_entry_by_mac)(unsigned char *mac) = NULL; +EXPORT_SYMBOL(ppe_del_entry_by_mac); + +void (*ppe_dev_register_hook)(struct net_device *dev) = NULL; +EXPORT_SYMBOL(ppe_dev_register_hook); +void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL; +EXPORT_SYMBOL(ppe_dev_unregister_hook); + +static void hnat_sma_build_entry(struct timer_list *t) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG, + SMA, SMA_FWD_CPU_BUILD_ENTRY); +} + +void hnat_cache_ebl(int enable) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) { + cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 1); + cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 0); + cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable); + } +} + +static void hnat_reset_timestamp(struct timer_list *t) +{ + struct foe_entry *entry; + int hash_index; + + hnat_cache_ebl(0); + cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 0); + cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 0); + writel(0, hnat_priv->fe_base + 0x0010); + + for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) { + entry = hnat_priv->foe_table_cpu[0] + hash_index; + if (entry->bfib1.state == BIND) + entry->bfib1.time_stamp = + readl(hnat_priv->fe_base + 0x0010) & (0xFFFF); + } + + cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 1); + cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 1); + hnat_cache_ebl(1); + + mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ); +} + +static void cr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void cr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +void cr_set_field(void __iomem *reg, u32 field, u32 val) +{ + unsigned int tv = readl(reg); + + tv &= ~field; + tv |= ((val) << (ffs((unsigned int)field) - 1)); + writel(tv, reg); +} + +/*boundary entry can't be used to accelerate data flow*/ +static void exclude_boundary_entry(struct foe_entry *foe_table_cpu) +{ + int entry_base = 0; + int bad_entry, i, j; + struct foe_entry *foe_entry; + /*these entries are boundary every 128 entries*/ + int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115}; + + if (!foe_table_cpu) + return; + + for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) { + /* set boundary entries as static*/ + for (j = 0; j < 8; j++) { + bad_entry = entry_base + boundary_entry_offset[j]; + foe_entry = &foe_table_cpu[bad_entry]; + foe_entry->udib1.sta = 1; + } + entry_base = (i + 1) * 128; + } +} + +void set_gmac_ppe_fwd(int id, int enable) +{ + void __iomem *reg; + u32 val; + + reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG); + + if (enable) { + cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE); + + return; + } + + /*disabled */ + val = readl(reg); + if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE) + cr_set_field(reg, GDM_ALL_FRC_MASK, + BITS_GDM_ALL_FRC_P_CPU_PDMA); +} + +static int entry_mac_cmp(struct foe_entry *entry, u8 *mac) +{ + int ret = 0; + + if(IS_IPV4_GRP(entry)) { + if(((swab32(entry->ipv4_hnapt.dmac_hi) == *(u32 *)mac) && + (swab16(entry->ipv4_hnapt.dmac_lo) == *(u16 *)&mac[4])) || + ((swab32(entry->ipv4_hnapt.smac_hi) == *(u32 *)mac) && + (swab16(entry->ipv4_hnapt.smac_lo) == *(u16 *)&mac[4]))) + ret = 1; + } else { + if(((swab32(entry->ipv6_5t_route.dmac_hi) == *(u32 *)mac) && + (swab16(entry->ipv6_5t_route.dmac_lo) == *(u16 *)&mac[4])) || + ((swab32(entry->ipv6_5t_route.smac_hi) == *(u32 *)mac) && + (swab16(entry->ipv6_5t_route.smac_lo) == *(u16 *)&mac[4]))) + ret = 1; + } + + if (ret && debug_level >= 2) + pr_info("mac=%pM\n", mac); + + return ret; +} + +int entry_delete_by_mac(u8 *mac) +{ + struct foe_entry *entry = NULL; + int index, i, ret = 0; + + for (i = 0; i < CFG_PPE_NUM; i++) { + entry = hnat_priv->foe_table_cpu[i]; + for (index = 0; index < DEF_ETRY_NUM; entry++, index++) { + if(entry->bfib1.state == BIND && entry_mac_cmp(entry, mac)) { + memset(entry, 0, sizeof(*entry)); + hnat_cache_ebl(1); + if (debug_level >= 2) + pr_info("delete entry idx = %d\n", index); + ret++; + } + } + } + + if(!ret && debug_level >= 2) + pr_info("entry not found\n"); + + return ret; +} + +static void hnat_roam_handler(struct work_struct *work) +{ + struct kvec iov; + struct msghdr msg; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + struct nlattr *nla; + u8 rcv_buf[512]; + int len; + + if (!_hnat_roam_sock) + return; + + iov.iov_base = rcv_buf; + iov.iov_len = sizeof(rcv_buf); + memset(&msg, 0, sizeof(msg)); + msg.msg_namelen = sizeof(struct sockaddr_nl); + + len = kernel_recvmsg(_hnat_roam_sock, &msg, &iov, 1, iov.iov_len, 0); + if (len <= 0) + goto out; + + nlh = (struct nlmsghdr*)rcv_buf; + if (!NLMSG_OK(nlh, len) || nlh->nlmsg_type != RTM_NEWNEIGH) + goto out; + + len = nlh->nlmsg_len - NLMSG_HDRLEN; + ndm = (struct ndmsg *)NLMSG_DATA(nlh); + if (ndm->ndm_family != PF_BRIDGE) + goto out; + + nla = (struct nlattr *)((u8 *)ndm + sizeof(struct ndmsg)); + len -= NLMSG_LENGTH(sizeof(struct ndmsg)); + while (nla_ok(nla, len)) { + if (nla_type(nla) == NDA_LLADDR) { + entry_delete_by_mac(nla_data(nla)); + } + nla = nla_next(nla, &len); + } + +out: + schedule_work(&_hnat_roam_work); +} + +static int hnat_roaming_enable(void) +{ + struct socket *sock = NULL; + struct sockaddr_nl addr; + int ret; + + INIT_WORK(&_hnat_roam_work, hnat_roam_handler); + + ret = sock_create_kern(&init_net, AF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock); + if (ret < 0) + goto out; + + _hnat_roam_sock = sock; + + addr.nl_family = AF_NETLINK; + addr.nl_pad = 0; + addr.nl_pid = 65534; + addr.nl_groups = 1 << (RTNLGRP_NEIGH - 1); + ret = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr)); + if (ret < 0) + goto out; + + schedule_work(&_hnat_roam_work); + pr_info("hnat roaming work enable\n"); + + return 0; +out: + if (sock) + sock_release(sock); + + return ret; +} + +static void hnat_roaming_disable(void) +{ + if (_hnat_roam_sock) + sock_release(_hnat_roam_sock); + _hnat_roam_sock = NULL; + pr_info("hnat roaming work disable\n"); +} + +static int hnat_hw_init(u32 ppe_id) +{ + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + /* setup hashing */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ETRY_NUM, hnat_priv->etry_num_cfg); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, HASH_MODE, HASH_MODE_1); + writel(HASH_SEED_KEY, hnat_priv->ppe_base[ppe_id] + PPE_HASH_SEED); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, XMODE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY); + + /* set ip proto */ + writel(0xFFFFFFFF, hnat_priv->ppe_base[ppe_id] + PPE_IP_PROT_CHK); + + /* setup caching */ + hnat_cache_ebl(1); + + /* enable FOE */ + cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG, + BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN | + BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK | + BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN | + BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN); + + if (hnat_priv->data->version == MTK_HNAT_V4) + cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG, + BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN); + + /* setup FOE aging */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_MNP, 1000); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_DLTA, 3); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, UDP_DLTA, 12); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, NTU_DLTA, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, FIN_DLTA, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, TCP_DLTA, 7); + + /* setup FOE ka */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 0); + mdelay(10); + + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 2); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 3); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TICK_SEL, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 1); + + /* setup FOE rate limit */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, QURT_LMT, 16383); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, HALF_LMT, 16383); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, FULL_LMT, 16383); + /* setup binding threshold as 30 packets per second */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BNDR, BIND_RATE, 0x1E); + + /* setup FOE cf gen */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 1); + writel(0, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); /* pdma */ + /* writel(0x55555555, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); */ /* qdma */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1); + + if (hnat_priv->data->version == MTK_HNAT_V4) { + writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1); + writel(0x7f, hnat_priv->ppe_base[ppe_id] + PPE_SBW_CTRL); + } + + /*enable ppe mib counter*/ + if (hnat_priv->data->per_flow_accounting) { + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_EN, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_READ_CLEAR, 1); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1); + } + + hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd); + hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan); + + dev_info(hnat_priv->dev, "PPE%d hwnat start\n", ppe_id); + + return 0; +} + +static int hnat_start(u32 ppe_id) +{ + u32 foe_table_sz; + u32 foe_mib_tb_sz; + u32 etry_num_cfg; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + /* mapp the FOE table */ + for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ; + etry_num_cfg--, hnat_priv->foe_etry_num /= 2) { + foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry); + hnat_priv->foe_table_cpu[ppe_id] = dma_alloc_coherent( + hnat_priv->dev, foe_table_sz, + &hnat_priv->foe_table_dev[ppe_id], GFP_KERNEL); + + if (hnat_priv->foe_table_cpu[ppe_id]) + break; + } + + if (!hnat_priv->foe_table_cpu[ppe_id]) + return -1; + dev_info(hnat_priv->dev, "PPE%d entry number = %d\n", + ppe_id, hnat_priv->foe_etry_num); + + writel(hnat_priv->foe_table_dev[ppe_id], hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE); + memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz); + + if (hnat_priv->data->version == MTK_HNAT_V1) + exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]); + + if (hnat_priv->data->per_flow_accounting) { + foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry); + hnat_priv->foe_mib_cpu[ppe_id] = + dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz, + &hnat_priv->foe_mib_dev[ppe_id], GFP_KERNEL); + if (!hnat_priv->foe_mib_cpu[ppe_id]) + return -1; + writel(hnat_priv->foe_mib_dev[ppe_id], + hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE); + memset(hnat_priv->foe_mib_cpu[ppe_id], 0, foe_mib_tb_sz); + + hnat_priv->acct[ppe_id] = + kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting), + GFP_KERNEL); + if (!hnat_priv->acct[ppe_id]) + return -1; + } + + hnat_priv->etry_num_cfg = etry_num_cfg; + hnat_hw_init(ppe_id); + + return 0; +} + +static int ppe_busy_wait(u32 ppe_id) +{ + unsigned long t_start = jiffies; + u32 r = 0; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + while (1) { + r = readl((hnat_priv->ppe_base[ppe_id] + 0x0)); + if (!(r & BIT(31))) + return 0; + if (time_after(jiffies, t_start + HZ)) + break; + mdelay(10); + } + + dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__); + + return -1; +} + +static void hnat_stop(u32 ppe_id) +{ + u32 foe_table_sz; + u32 foe_mib_tb_sz; + struct foe_entry *entry, *end; + + if (ppe_id >= CFG_PPE_NUM) + return; + + /* send all traffic back to the DMA engine */ + set_gmac_ppe_fwd(0, 0); + set_gmac_ppe_fwd(1, 0); + + dev_info(hnat_priv->dev, "hwnat stop\n"); + + if (hnat_priv->foe_table_cpu[ppe_id]) { + entry = hnat_priv->foe_table_cpu[ppe_id]; + end = hnat_priv->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num; + while (entry < end) { + entry->bfib1.state = INVALID; + entry++; + } + } + /* disable caching */ + hnat_cache_ebl(0); + + /* flush cache has to be ahead of hnat disable --*/ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 0); + + /* disable scan mode and keep-alive */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0); + + ppe_busy_wait(ppe_id); + + /* disable FOE */ + cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG, + BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN | + BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN | + BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN | + BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE); + + if (hnat_priv->data->version == MTK_HNAT_V4) + cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG, + BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN); + + /* disable FOE aging */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 0); + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 0); + + /* free the FOE table */ + foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry); + if (hnat_priv->foe_table_cpu[ppe_id]) + dma_free_coherent(hnat_priv->dev, foe_table_sz, + hnat_priv->foe_table_cpu[ppe_id], + hnat_priv->foe_table_dev[ppe_id]); + hnat_priv->foe_table_cpu[ppe_id] = NULL; + writel(0, hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE); + + if (hnat_priv->data->per_flow_accounting) { + foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry); + if (hnat_priv->foe_mib_cpu[ppe_id]) + dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz, + hnat_priv->foe_mib_cpu[ppe_id], + hnat_priv->foe_mib_dev[ppe_id]); + writel(0, hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE); + kfree(hnat_priv->acct[ppe_id]); + } +} + +static void hnat_release_netdev(void) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (ext_entry->dev) + dev_put(ext_entry->dev); + ext_if_del(ext_entry); + kfree(ext_entry); + } + + if (hnat_priv->g_ppdev) + dev_put(hnat_priv->g_ppdev); + + if (hnat_priv->g_wandev) + dev_put(hnat_priv->g_wandev); +} + +static struct notifier_block nf_hnat_netdevice_nb __read_mostly = { + .notifier_call = nf_hnat_netdevice_event, +}; + +static struct notifier_block nf_hnat_netevent_nb __read_mostly = { + .notifier_call = nf_hnat_netevent_handler, +}; + +int hnat_enable_hook(void) +{ + /* register hook functions used by WHNAT module. + */ + if (hnat_priv->data->whnat) { + ra_sw_nat_hook_rx = + (hnat_priv->data->version == MTK_HNAT_V4) ? + mtk_sw_nat_hook_rx : NULL; + ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx; + ppe_dev_register_hook = mtk_ppe_dev_register_hook; + ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook; + } + + if (hnat_register_nf_hooks()) + return -1; + + ppe_del_entry_by_mac = entry_delete_by_mac; + hook_toggle = 1; + + return 0; +} + +int hnat_disable_hook(void) +{ + int i, hash_index; + struct foe_entry *entry; + + ra_sw_nat_hook_tx = NULL; + ra_sw_nat_hook_rx = NULL; + hnat_unregister_nf_hooks(); + + for (i = 0; i < CFG_PPE_NUM; i++) { + cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG, + SMA, SMA_ONLY_FWD_CPU); + + for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) { + entry = hnat_priv->foe_table_cpu[i] + hash_index; + if (entry->bfib1.state == BIND) { + entry->ipv4_hnapt.udib1.state = INVALID; + entry->ipv4_hnapt.udib1.time_stamp = + readl((hnat_priv->fe_base + 0x0010)) & 0xFF; + } + } + } + + /* clear HWNAT cache */ + hnat_cache_ebl(1); + + mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ); + ppe_del_entry_by_mac = NULL; + hook_toggle = 0; + + return 0; +} + +int hnat_warm_init(void) +{ + u32 foe_table_sz, foe_mib_tb_sz, ppe_id = 0; + + unregister_netevent_notifier(&nf_hnat_netevent_nb); + + for (ppe_id = 0; ppe_id < CFG_PPE_NUM; ppe_id++) { + foe_table_sz = + hnat_priv->foe_etry_num * sizeof(struct foe_entry); + writel(hnat_priv->foe_table_dev[ppe_id], + hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE); + memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz); + + if (hnat_priv->data->version == MTK_HNAT_V1) + exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]); + + if (hnat_priv->data->per_flow_accounting) { + foe_mib_tb_sz = + hnat_priv->foe_etry_num * sizeof(struct mib_entry); + writel(hnat_priv->foe_mib_dev[ppe_id], + hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE); + memset(hnat_priv->foe_mib_cpu[ppe_id], 0, + foe_mib_tb_sz); + } + + hnat_hw_init(ppe_id); + } + + set_gmac_ppe_fwd(0, 1); + set_gmac_ppe_fwd(1, 1); + register_netevent_notifier(&nf_hnat_netevent_nb); + + return 0; +} + +static struct packet_type mtk_pack_type __read_mostly = { + .type = HQOS_MAGIC_TAG, + .func = mtk_hqos_ptype_cb, +}; + +static int hnat_probe(struct platform_device *pdev) +{ + int i; + int err = 0; + int index = 0; + struct resource *res; + const char *name; + struct device_node *np; + unsigned int val; + struct property *prop; + struct extdev_entry *ext_entry; + const struct of_device_id *match; + + hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL); + if (!hnat_priv) + return -ENOMEM; + + hnat_priv->foe_etry_num = DEF_ETRY_NUM; + + match = of_match_device(of_hnat_match, &pdev->dev); + if (unlikely(!match)) + return -EINVAL; + + hnat_priv->data = (struct mtk_hnat_data *)match->data; + + hnat_priv->dev = &pdev->dev; + np = hnat_priv->dev->of_node; + + err = of_property_read_string(np, "mtketh-wan", &name); + if (err < 0) + return -EINVAL; + + strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ - 1); + dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan); + + err = of_property_read_string(np, "mtketh-lan", &name); + if (err < 0) + strncpy(hnat_priv->lan, "eth0", IFNAMSIZ); + else + strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ - 1); + dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan); + + err = of_property_read_string(np, "mtketh-ppd", &name); + if (err < 0) + strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ); + else + strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ - 1); + dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd); + + /*get total gmac num in hnat*/ + err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val); + + if (err < 0) + return -EINVAL; + + hnat_priv->gmac_num = val; + + dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num); + + err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val); + + if (err < 0) { + hnat_priv->wan_dsa_port = NONE_DSA_PORT; + } else { + hnat_priv->wan_dsa_port = val; + dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port); + } + + err = of_property_read_u32_index(np, "mtketh-ppe-num", 0, &val); + + if (err < 0) + hnat_priv->ppe_num = 1; + else + hnat_priv->ppe_num = val; + + dev_info(&pdev->dev, "ppe num = %d\n", hnat_priv->ppe_num); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + + hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start, + res->end - res->start + 1); + if (!hnat_priv->fe_base) + return -EADDRNOTAVAIL; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0x2200; + + if (CFG_PPE_NUM > 1) + hnat_priv->ppe_base[1] = hnat_priv->fe_base + 0x2600; +#else + hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0xe00; +#endif + + err = hnat_init_debugfs(hnat_priv); + if (err) + return err; + + prop = of_find_property(np, "ext-devices", NULL); + for (name = of_prop_next_string(prop, NULL); name; + name = of_prop_next_string(prop, name), index++) { + ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL); + if (!ext_entry) { + err = -ENOMEM; + goto err_out1; + } + strncpy(ext_entry->name, (char *)name, IFNAMSIZ - 1); + ext_if_add(ext_entry); + } + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name); + } + + hnat_priv->lvid = 1; + hnat_priv->wvid = 2; + + for (i = 0; i < CFG_PPE_NUM; i++) { + err = hnat_start(i); + if (err) + goto err_out; + } + + if (hnat_priv->data->whnat) { + err = whnat_adjust_nf_hooks(); + if (err) + goto err_out; + } + + err = hnat_enable_hook(); + if (err) + goto err_out; + + register_netdevice_notifier(&nf_hnat_netdevice_nb); + register_netevent_notifier(&nf_hnat_netevent_nb); + + if (hnat_priv->data->mcast) { + for (i = 0; i < CFG_PPE_NUM; i++) + hnat_mcast_enable(i); + } + + timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0); + if (hnat_priv->data->version == MTK_HNAT_V3) { + timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0); + hnat_priv->hnat_reset_timestamp_timer.expires = jiffies; + add_timer(&hnat_priv->hnat_reset_timestamp_timer); + } + + if (IS_HQOS_MODE && IS_GMAC1_MODE) + dev_add_pack(&mtk_pack_type); + + err = hnat_roaming_enable(); + if (err) + pr_info("hnat roaming work fail\n"); + + return 0; + +err_out: + for (i = 0; i < CFG_PPE_NUM; i++) + hnat_stop(i); +err_out1: + hnat_deinit_debugfs(hnat_priv); + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + ext_if_del(ext_entry); + kfree(ext_entry); + } + return err; +} + +static int hnat_remove(struct platform_device *pdev) +{ + int i; + + hnat_roaming_disable(); + unregister_netdevice_notifier(&nf_hnat_netdevice_nb); + unregister_netevent_notifier(&nf_hnat_netevent_nb); + hnat_disable_hook(); + + if (hnat_priv->data->mcast) + hnat_mcast_disable(); + + for (i = 0; i < CFG_PPE_NUM; i++) + hnat_stop(i); + + hnat_deinit_debugfs(hnat_priv); + hnat_release_netdev(); + del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer); + if (hnat_priv->data->version == MTK_HNAT_V3) + del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer); + + if (IS_HQOS_MODE && IS_GMAC1_MODE) + dev_remove_pack(&mtk_pack_type); + + return 0; +} + +static const struct mtk_hnat_data hnat_data_v1 = { + .num_of_sch = 2, + .whnat = false, + .per_flow_accounting = false, + .mcast = false, + .version = MTK_HNAT_V1, +}; + +static const struct mtk_hnat_data hnat_data_v2 = { + .num_of_sch = 2, + .whnat = true, + .per_flow_accounting = true, + .mcast = false, + .version = MTK_HNAT_V2, +}; + +static const struct mtk_hnat_data hnat_data_v3 = { + .num_of_sch = 4, + .whnat = false, + .per_flow_accounting = false, + .mcast = false, + .version = MTK_HNAT_V3, +}; + +static const struct mtk_hnat_data hnat_data_v4 = { + .num_of_sch = 4, + .whnat = true, + .per_flow_accounting = true, + .mcast = false, + .version = MTK_HNAT_V4, +}; + +const struct of_device_id of_hnat_match[] = { + { .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 }, + { .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 }, + { .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 }, + { .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 }, + { .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_hnat_match); + +static struct platform_driver hnat_driver = { + .probe = hnat_probe, + .remove = hnat_remove, + .driver = { + .name = "mediatek_soc_hnat", + .of_match_table = of_hnat_match, + }, +}; + +module_platform_driver(hnat_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Mediatek Hardware NAT"); diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.h new file mode 100644 index 000000000..94b0f5437 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat.h @@ -0,0 +1,975 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Sean Wang + * Copyright (C) 2016-2017 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include "hnat_mcast.h" + +/*--------------------------------------------------------------------------*/ +/* Register Offset*/ +/*--------------------------------------------------------------------------*/ +#define PPE_GLO_CFG 0x00 +#define PPE_FLOW_CFG 0x04 +#define PPE_IP_PROT_CHK 0x08 +#define PPE_IP_PROT_0 0x0C +#define PPE_IP_PROT_1 0x10 +#define PPE_IP_PROT_2 0x14 +#define PPE_IP_PROT_3 0x18 +#define PPE_TB_CFG 0x1C +#define PPE_TB_BASE 0x20 +#define PPE_TB_USED 0x24 +#define PPE_BNDR 0x28 +#define PPE_BIND_LMT_0 0x2C +#define PPE_BIND_LMT_1 0x30 +#define PPE_KA 0x34 +#define PPE_UNB_AGE 0x38 +#define PPE_BND_AGE_0 0x3C +#define PPE_BND_AGE_1 0x40 +#define PPE_HASH_SEED 0x44 +#define PPE_DFT_CPORT 0x48 +#define PPE_DFT_CPORT1 0x4C +#define PPE_MCAST_PPSE 0x84 +#define PPE_MCAST_L_0 0x88 +#define PPE_MCAST_H_0 0x8C +#define PPE_MCAST_L_1 0x90 +#define PPE_MCAST_H_1 0x94 +#define PPE_MCAST_L_2 0x98 +#define PPE_MCAST_H_2 0x9C +#define PPE_MCAST_L_3 0xA0 +#define PPE_MCAST_H_3 0xA4 +#define PPE_MCAST_L_4 0xA8 +#define PPE_MCAST_H_4 0xAC +#define PPE_MCAST_L_5 0xB0 +#define PPE_MCAST_H_5 0xB4 +#define PPE_MCAST_L_6 0xBC +#define PPE_MCAST_H_6 0xC0 +#define PPE_MCAST_L_7 0xC4 +#define PPE_MCAST_H_7 0xC8 +#define PPE_MCAST_L_8 0xCC +#define PPE_MCAST_H_8 0xD0 +#define PPE_MCAST_L_9 0xD4 +#define PPE_MCAST_H_9 0xD8 +#define PPE_MCAST_L_A 0xDC +#define PPE_MCAST_H_A 0xE0 +#define PPE_MCAST_L_B 0xE4 +#define PPE_MCAST_H_B 0xE8 +#define PPE_MCAST_L_C 0xEC +#define PPE_MCAST_H_C 0xF0 +#define PPE_MCAST_L_D 0xF4 +#define PPE_MCAST_H_D 0xF8 +#define PPE_MCAST_L_E 0xFC +#define PPE_MCAST_H_E 0xE0 +#define PPE_MCAST_L_F 0x100 +#define PPE_MCAST_H_F 0x104 +#define PPE_MCAST_L_10 0xC00 +#define PPE_MCAST_H_10 0xC04 +#define PPE_MTU_DRP 0x108 +#define PPE_MTU_VLYR_0 0x10C +#define PPE_MTU_VLYR_1 0x110 +#define PPE_MTU_VLYR_2 0x114 +#define PPE_VPM_TPID 0x118 +#define PPE_CAH_CTRL 0x120 +#define PPE_CAH_TAG_SRH 0x124 +#define PPE_CAH_LINE_RW 0x128 +#define PPE_CAH_WDATA 0x12C +#define PPE_CAH_RDATA 0x130 + +#define PPE_MIB_CFG 0X134 +#define PPE_MIB_TB_BASE 0X138 +#define PPE_MIB_SER_CR 0X13C +#define PPE_MIB_SER_R0 0X140 +#define PPE_MIB_SER_R1 0X144 +#define PPE_MIB_SER_R2 0X148 +#define PPE_MIB_CAH_CTRL 0X150 +#define PPE_MIB_CAH_TAG_SRH 0X154 +#define PPE_MIB_CAH_LINE_RW 0X158 +#define PPE_MIB_CAH_WDATA 0X15C +#define PPE_MIB_CAH_RDATA 0X160 +#define PPE_SBW_CTRL 0x174 + +#define GDMA1_FWD_CFG 0x500 +#define GDMA2_FWD_CFG 0x1500 + +/* QDMA Tx queue configuration */ +#define QTX_CFG(x) (QDMA_BASE + ((x) * 0x10)) +#define QTX_CFG_HW_RESV_CNT_OFFSET (8) +#define QTX_CFG_SW_RESV_CNT_OFFSET (0) + +#define QTX_SCH(x) (QDMA_BASE + 0x4 + ((x) * 0x10)) +#define QTX_SCH_MIN_RATE_EN BIT(27) +#define QTX_SCH_MAX_RATE_EN BIT(11) +#define QTX_SCH_MIN_RATE_MAN_OFFSET (20) +#define QTX_SCH_MIN_RATE_EXP_OFFSET (16) +#define QTX_SCH_MAX_RATE_WGHT_OFFSET (12) +#define QTX_SCH_MAX_RATE_MAN_OFFSET (4) +#define QTX_SCH_MAX_RATE_EXP_OFFSET (0) + +/* QDMA Tx scheduler configuration */ +#define QDMA_PAGE (QDMA_BASE + 0x1f0) +#define QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214) +#define QTX_MIB_IF (QDMA_BASE + 0x2bc) +#define QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4)) +#define QDMA_TX_SCH_WFQ_EN BIT(15) + +/*--------------------------------------------------------------------------*/ +/* Register Mask*/ +/*--------------------------------------------------------------------------*/ +/* PPE_TB_CFG mask */ +#define TB_ETRY_NUM (0x7 << 0) /* RW */ +#define TB_ENTRY_SIZE (0x1 << 3) /* RW */ +#define SMA (0x3 << 4) /* RW */ +#define NTU_AGE (0x1 << 7) /* RW */ +#define UNBD_AGE (0x1 << 8) /* RW */ +#define TCP_AGE (0x1 << 9) /* RW */ +#define UDP_AGE (0x1 << 10) /* RW */ +#define FIN_AGE (0x1 << 11) /* RW */ +#define KA_CFG (0x3 << 12) +#define HASH_MODE (0x3 << 14) /* RW */ +#define SCAN_MODE (0x3 << 16) /* RW */ +#define XMODE (0x3 << 18) /* RW */ +#define TICK_SEL (0x1 << 24) /* RW */ + + +/*PPE_CAH_CTRL mask*/ +#define CAH_EN (0x1 << 0) /* RW */ +#define CAH_X_MODE (0x1 << 9) /* RW */ + +/*PPE_UNB_AGE mask*/ +#define UNB_DLTA (0xff << 0) /* RW */ +#define UNB_MNP (0xffff << 16) /* RW */ + +/*PPE_BND_AGE_0 mask*/ +#define UDP_DLTA (0xffff << 0) /* RW */ +#define NTU_DLTA (0xffff << 16) /* RW */ + +/*PPE_BND_AGE_1 mask*/ +#define TCP_DLTA (0xffff << 0) /* RW */ +#define FIN_DLTA (0xffff << 16) /* RW */ + +/*PPE_KA mask*/ +#define KA_T (0xffff << 0) /* RW */ +#define TCP_KA (0xff << 16) /* RW */ +#define UDP_KA (0xff << 24) /* RW */ + +/*PPE_BIND_LMT_0 mask*/ +#define QURT_LMT (0x3ff << 0) /* RW */ +#define HALF_LMT (0x3ff << 16) /* RW */ + +/*PPE_BIND_LMT_1 mask*/ +#define FULL_LMT (0x3fff << 0) /* RW */ +#define NTU_KA (0xff << 16) /* RW */ + +/*PPE_BNDR mask*/ +#define BIND_RATE (0xffff << 0) /* RW */ +#define PBND_RD_PRD (0xffff << 16) /* RW */ + +/*PPE_GLO_CFG mask*/ +#define PPE_EN (0x1 << 0) /* RW */ +#define TTL0_DRP (0x1 << 4) /* RW */ +#define MCAST_TB_EN (0x1 << 7) /* RW */ +#define MCAST_HASH (0x3 << 12) /* RW */ + +#define MC_P3_PPSE (0xf << 12) /* RW */ +#define MC_P2_PPSE (0xf << 8) /* RW */ +#define MC_P1_PPSE (0xf << 4) /* RW */ +#define MC_P0_PPSE (0xf << 0) /* RW */ + +#define MIB_EN (0x1 << 0) /* RW */ +#define MIB_READ_CLEAR (0X1 << 1) /* RW */ +#define MIB_CAH_EN (0X1 << 0) /* RW */ + +/*GDMA_FWD_CFG mask */ +#define GDM_UFRC_MASK (0x7 << 12) /* RW */ +#define GDM_BFRC_MASK (0x7 << 8) /*RW*/ +#define GDM_MFRC_MASK (0x7 << 4) /*RW*/ +#define GDM_OFRC_MASK (0x7 << 0) /*RW*/ +#define GDM_ALL_FRC_MASK \ + (GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK) + +/*QDMA_PAGE mask*/ +#define QTX_CFG_PAGE (0xf << 0) /* RW */ + +/*QTX_MIB_IF mask*/ +#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */ +#define VQTX_MIB_EN (0x1 << 28) /* RW */ + +/*--------------------------------------------------------------------------*/ +/* Descriptor Structure */ +/*--------------------------------------------------------------------------*/ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +struct hnat_unbind_info_blk { + u32 time_stamp : 8; + u32 sp : 4; + u32 pcnt : 8; + u32 ilgf : 1; + u32 mc : 1; + u32 preb : 1; + u32 pkt_type : 5; + u32 state : 2; + u32 udp : 1; + u32 sta : 1; /* static entry */ +} __packed; + +struct hnat_bind_info_blk { + u32 time_stamp : 8; + u32 sp : 4; + u32 mc : 1; + u32 ka : 1; /* keep alive */ + u32 vlan_layer : 3; + u32 psn : 1; /* egress packet has PPPoE session */ + u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */ + u32 ps : 1; /* packet sampling */ + u32 cah : 1; /* cacheable flag */ + u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */ + u32 ttl : 1; + u32 pkt_type : 5; + u32 state : 2; + u32 udp : 1; + u32 sta : 1; /* static entry */ +} __packed; + +struct hnat_info_blk2 { + u32 qid : 7; /* QID in Qos Port */ + u32 port_mg : 1; + u32 fqos : 1; /* force to PSE QoS port */ + u32 dp : 4; /* force to PSE port x */ + u32 mcast : 1; /* multicast this packet to CPU */ + u32 pcpl : 1; /* OSBN */ + u32 mibf : 1; + u32 alen : 1; + u32 rxid : 2; + u32 winfoi : 1; + u32 port_ag : 4; + u32 dscp : 8; /* DSCP value */ +} __packed; + +struct hnat_winfo { + u32 bssid : 6; /* WiFi Bssidx */ + u32 wcid : 10; /* WiFi wtable Idx */ +} __packed; + +#else +struct hnat_unbind_info_blk { + u32 time_stamp : 8; + u32 pcnt : 16; /* packet count */ + u32 preb : 1; + u32 pkt_type : 3; + u32 state : 2; + u32 udp : 1; + u32 sta : 1; /* static entry */ +} __packed; + +struct hnat_bind_info_blk { + u32 time_stamp : 15; + u32 ka : 1; /* keep alive */ + u32 vlan_layer : 3; + u32 psn : 1; /* egress packet has PPPoE session */ + u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */ + u32 ps : 1; /* packet sampling */ + u32 cah : 1; /* cacheable flag */ + u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */ + u32 ttl : 1; + u32 pkt_type : 3; + u32 state : 2; + u32 udp : 1; + u32 sta : 1; /* static entry */ +} __packed; + +struct hnat_info_blk2 { + u32 qid : 4; /* QID in Qos Port */ + u32 fqos : 1; /* force to PSE QoS port */ + u32 dp : 3; /* force to PSE port x + * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP + */ + u32 mcast : 1; /* multicast this packet to CPU */ + u32 pcpl : 1; /* OSBN */ + u32 mibf : 1; /* 0:off 1:on PPE MIB counter */ + u32 alen : 1; /* 0:post 1:pre packet length in accounting */ + u32 port_mg : 6; /* port meter group */ + u32 port_ag : 6; /* port account group */ + u32 dscp : 8; /* DSCP value */ +} __packed; + +struct hnat_winfo { + u32 bssid : 6; /* WiFi Bssidx */ + u32 wcid : 8; /* WiFi wtable Idx */ + u32 rxid : 2; /* WiFi Ring idx */ +} __packed; +#endif + +/* info blk2 for WHNAT */ +struct hnat_info_blk2_whnat { + u32 qid : 4; /* QID[3:0] in Qos Port */ + u32 fqos : 1; /* force to PSE QoS port */ + u32 dp : 3; /* force to PSE port x + * 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP + */ + u32 mcast : 1; /* multicast this packet to CPU */ + u32 pcpl : 1; /* OSBN */ + u32 mibf : 1; /* 0:off 1:on PPE MIB counter */ + u32 alen : 1; /* 0:post 1:pre packet length in accounting */ + u32 qid2 : 2; /* QID[5:4] in Qos Port */ + u32 resv : 2; + u32 wdmaid : 1; /* 0:to pcie0 dev 1:to pcie1 dev */ + u32 winfoi : 1; /* 0:off 1:on Wi-Fi hwnat support */ + u32 port_ag : 6; /* port account group */ + u32 dscp : 8; /* DSCP value */ +} __packed; + +struct hnat_ipv4_hnapt { + union { + struct hnat_bind_info_blk bfib1; + struct hnat_unbind_info_blk udib1; + u32 info_blk1; + }; + u32 sip; + u32 dip; + u16 dport; + u16 sport; + union { + struct hnat_info_blk2 iblk2; + struct hnat_info_blk2_whnat iblk2w; + u32 info_blk2; + }; + u32 new_sip; + u32 new_dip; + u16 new_dport; + u16 new_sport; + u16 m_timestamp; /* For mcast*/ + u16 resv1; + u32 resv2; + u32 resv3 : 26; + u32 act_dp : 6; /* UDF */ + u16 vlan1; + u16 etype; + u32 dmac_hi; + union { +#if !defined(CONFIG_MEDIATEK_NETSYS_V2) + struct hnat_winfo winfo; +#endif + u16 vlan2; + }; + u16 dmac_lo; + u32 smac_hi; + u16 pppoe_id; + u16 smac_lo; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + u16 minfo; + struct hnat_winfo winfo; +#endif +} __packed; + +struct hnat_ipv4_dslite { + union { + struct hnat_bind_info_blk bfib1; + struct hnat_unbind_info_blk udib1; + u32 info_blk1; + }; + u32 sip; + u32 dip; + u16 dport; + u16 sport; + + u32 tunnel_sipv6_0; + u32 tunnel_sipv6_1; + u32 tunnel_sipv6_2; + u32 tunnel_sipv6_3; + + u32 tunnel_dipv6_0; + u32 tunnel_dipv6_1; + u32 tunnel_dipv6_2; + u32 tunnel_dipv6_3; + + u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */ + u8 priority; /* in order to consist with Linux kernel (should be 8bits) */ + u32 hop_limit : 8; + u32 resv2 : 18; + u32 act_dp : 6; /* UDF */ + + union { + struct hnat_info_blk2 iblk2; + struct hnat_info_blk2_whnat iblk2w; + u32 info_blk2; + }; + + u16 vlan1; + u16 etype; + u32 dmac_hi; + union { +#if !defined(CONFIG_MEDIATEK_NETSYS_V2) + struct hnat_winfo winfo; +#endif + u16 vlan2; + }; + u16 dmac_lo; + u32 smac_hi; + u16 pppoe_id; + u16 smac_lo; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + u16 minfo; + struct hnat_winfo winfo; + u32 new_sip; + u32 new_dip; + u16 new_dport; + u16 new_sport; +#endif +} __packed; + +struct hnat_ipv6_3t_route { + union { + struct hnat_bind_info_blk bfib1; + struct hnat_unbind_info_blk udib1; + u32 info_blk1; + }; + u32 ipv6_sip0; + u32 ipv6_sip1; + u32 ipv6_sip2; + u32 ipv6_sip3; + u32 ipv6_dip0; + u32 ipv6_dip1; + u32 ipv6_dip2; + u32 ipv6_dip3; + u32 prot : 8; + u32 hph : 24; /* hash placeholder */ + + u32 resv1; + u32 resv2; + u32 resv3; + u32 resv4 : 26; + u32 act_dp : 6; /* UDF */ + + union { + struct hnat_info_blk2 iblk2; + struct hnat_info_blk2_whnat iblk2w; + u32 info_blk2; + }; + u16 vlan1; + u16 etype; + u32 dmac_hi; + union { +#if !defined(CONFIG_MEDIATEK_NETSYS_V2) + struct hnat_winfo winfo; +#endif + u16 vlan2; + }; + u16 dmac_lo; + u32 smac_hi; + u16 pppoe_id; + u16 smac_lo; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + u16 minfo; + struct hnat_winfo winfo; +#endif +} __packed; + +struct hnat_ipv6_5t_route { + union { + struct hnat_bind_info_blk bfib1; + struct hnat_unbind_info_blk udib1; + u32 info_blk1; + }; + u32 ipv6_sip0; + u32 ipv6_sip1; + u32 ipv6_sip2; + u32 ipv6_sip3; + u32 ipv6_dip0; + u32 ipv6_dip1; + u32 ipv6_dip2; + u32 ipv6_dip3; + u16 dport; + u16 sport; + + u32 resv1; + u32 resv2; + u32 resv3; + u32 resv4 : 26; + u32 act_dp : 6; /* UDF */ + + union { + struct hnat_info_blk2 iblk2; + struct hnat_info_blk2_whnat iblk2w; + u32 info_blk2; + }; + + u16 vlan1; + u16 etype; + u32 dmac_hi; + union { +#if !defined(CONFIG_MEDIATEK_NETSYS_V2) + struct hnat_winfo winfo; +#endif + u16 vlan2; + }; + u16 dmac_lo; + u32 smac_hi; + u16 pppoe_id; + u16 smac_lo; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + u16 minfo; + struct hnat_winfo winfo; +#endif +} __packed; + +struct hnat_ipv6_6rd { + union { + struct hnat_bind_info_blk bfib1; + struct hnat_unbind_info_blk udib1; + u32 info_blk1; + }; + u32 ipv6_sip0; + u32 ipv6_sip1; + u32 ipv6_sip2; + u32 ipv6_sip3; + u32 ipv6_dip0; + u32 ipv6_dip1; + u32 ipv6_dip2; + u32 ipv6_dip3; + u16 dport; + u16 sport; + + u32 tunnel_sipv4; + u32 tunnel_dipv4; + u32 hdr_chksum : 16; + u32 dscp : 8; + u32 ttl : 8; + u32 flag : 3; + u32 resv1 : 13; + u32 per_flow_6rd_id : 1; + u32 resv2 : 9; + u32 act_dp : 6; /* UDF */ + + union { + struct hnat_info_blk2 iblk2; + struct hnat_info_blk2_whnat iblk2w; + u32 info_blk2; + }; + + u16 vlan1; + u16 etype; + u32 dmac_hi; + union { +#if !defined(CONFIG_MEDIATEK_NETSYS_V2) + struct hnat_winfo winfo; +#endif + u16 vlan2; + }; + u16 dmac_lo; + u32 smac_hi; + u16 pppoe_id; + u16 smac_lo; +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + u16 minfo; + struct hnat_winfo winfo; + u32 resv3; + u32 resv4; + u16 new_dport; + u16 new_sport; +#endif +} __packed; + +struct foe_entry { + union { + struct hnat_unbind_info_blk udib1; + struct hnat_bind_info_blk bfib1; + struct hnat_ipv4_hnapt ipv4_hnapt; + struct hnat_ipv4_dslite ipv4_dslite; + struct hnat_ipv6_3t_route ipv6_3t_route; + struct hnat_ipv6_5t_route ipv6_5t_route; + struct hnat_ipv6_6rd ipv6_6rd; + }; +}; + +/* If user wants to change default FOE entry number, both DEF_ETRY_NUM and + * DEF_ETRY_NUM_CFG need to be modified. + */ +#define DEF_ETRY_NUM 8192 +/* feasible values : 32768, 16384, 8192, 4096, 2048, 1024 */ +#define DEF_ETRY_NUM_CFG TABLE_8K +/* corresponding values : TABLE_32K, TABLE_16K, TABLE_8K, TABLE_4K, TABLE_2K, + * TABLE_1K + */ +#define MAX_EXT_DEVS (0x3fU) +#define MAX_IF_NUM 64 + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define MAX_PPE_NUM 2 +#else +#define MAX_PPE_NUM 1 +#endif +#define CFG_PPE_NUM (hnat_priv->ppe_num) + +struct mib_entry { + u32 byt_cnt_l; + u16 byt_cnt_h; + u32 pkt_cnt_l; + u8 pkt_cnt_h; + u8 resv0; + u32 resv1; +} __packed; + +struct hnat_accounting { + u64 bytes; + u64 packets; +}; + +enum mtk_hnat_version { + MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */ + MTK_HNAT_V2, /* version 2: mt7622 */ + MTK_HNAT_V3, /* version 3: mt7629 */ + MTK_HNAT_V4, /* version 4: mt7986 */ +}; + +struct mtk_hnat_data { + u8 num_of_sch; + bool whnat; + bool per_flow_accounting; + bool mcast; + enum mtk_hnat_version version; +}; + +struct mtk_hnat { + struct device *dev; + void __iomem *fe_base; + void __iomem *ppe_base[MAX_PPE_NUM]; + struct foe_entry *foe_table_cpu[MAX_PPE_NUM]; + dma_addr_t foe_table_dev[MAX_PPE_NUM]; + u8 enable; + u8 enable1; + struct dentry *root; + struct debugfs_regset32 *regset[MAX_PPE_NUM]; + + struct mib_entry *foe_mib_cpu[MAX_PPE_NUM]; + dma_addr_t foe_mib_dev[MAX_PPE_NUM]; + struct hnat_accounting *acct[MAX_PPE_NUM]; + const struct mtk_hnat_data *data; + + /*devices we plays for*/ + char wan[IFNAMSIZ]; + char lan[IFNAMSIZ]; + char ppd[IFNAMSIZ]; + u16 lvid; + u16 wvid; + + struct reset_control *rstc; + + u8 ppe_num; + u8 gmac_num; + u8 wan_dsa_port; + struct ppe_mcast_table *pmcast; + + u32 foe_etry_num; + u32 etry_num_cfg; + struct net_device *g_ppdev; + struct net_device *g_wandev; + struct net_device *wifi_hook_if[MAX_IF_NUM]; + struct extdev_entry *ext_if[MAX_EXT_DEVS]; + struct timer_list hnat_sma_build_entry_timer; + struct timer_list hnat_reset_timestamp_timer; + struct timer_list hnat_mcast_check_timer; + bool nf_stat_en; +}; + +struct extdev_entry { + char name[IFNAMSIZ]; + struct net_device *dev; +}; + +struct tcpudphdr { + __be16 src; + __be16 dst; +}; + +enum FoeEntryState { INVALID = 0, UNBIND = 1, BIND = 2, FIN = 3 }; + +enum FoeIpAct { + IPV4_HNAPT = 0, + IPV4_HNAT = 1, + IPV4_DSLITE = 3, + IPV6_3T_ROUTE = 4, + IPV6_5T_ROUTE = 5, + IPV6_6RD = 7, +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + IPV4_MAP_T = 8, + IPV4_MAP_E = 9, +#else + IPV4_MAP_T = 6, + IPV4_MAP_E = 6, +#endif +}; + +/*--------------------------------------------------------------------------*/ +/* Common Definition*/ +/*--------------------------------------------------------------------------*/ + +#define HNAT_SW_VER "1.1.0" +#define HASH_SEED_KEY 0x12345678 + +/*PPE_TB_CFG value*/ +#define ENTRY_80B 1 +#define ENTRY_64B 0 +#define TABLE_1K 0 +#define TABLE_2K 1 +#define TABLE_4K 2 +#define TABLE_8K 3 +#define TABLE_16K 4 +#define TABLE_32K 5 +#define SMA_DROP 0 /* Drop the packet */ +#define SMA_DROP2 1 /* Drop the packet */ +#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */ +#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */ +#define HASH_MODE_0 0 +#define HASH_MODE_1 1 +#define HASH_MODE_2 2 +#define HASH_MODE_3 3 + +/*PPE_FLOW_CFG*/ +#define BIT_FUC_FOE BIT(2) +#define BIT_FMC_FOE BIT(1) +#define BIT_FBC_FOE BIT(0) +#define BIT_UDP_IP4F_NAT_EN BIT(7) /*Enable IPv4 fragment + UDP packet NAT*/ +#define BIT_IPV6_3T_ROUTE_EN BIT(8) +#define BIT_IPV6_5T_ROUTE_EN BIT(9) +#define BIT_IPV6_6RD_EN BIT(10) +#define BIT_IPV4_NAT_EN BIT(12) +#define BIT_IPV4_NAPT_EN BIT(13) +#define BIT_IPV4_DSL_EN BIT(14) +#define BIT_MIB_BUSY BIT(16) +#define BIT_IPV4_NAT_FRAG_EN BIT(17) +#define BIT_IPV4_HASH_GREK BIT(19) +#define BIT_IPV6_HASH_GREK BIT(20) +#define BIT_IPV4_MAPE_EN BIT(21) +#define BIT_IPV4_MAPT_EN BIT(22) + +/*GDMA_FWD_CFG value*/ +#define BITS_GDM_UFRC_P_PPE (NR_PPE0_PORT << 12) +#define BITS_GDM_BFRC_P_PPE (NR_PPE0_PORT << 8) +#define BITS_GDM_MFRC_P_PPE (NR_PPE0_PORT << 4) +#define BITS_GDM_OFRC_P_PPE (NR_PPE0_PORT << 0) +#define BITS_GDM_ALL_FRC_P_PPE \ + (BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE | \ + BITS_GDM_OFRC_P_PPE) + +#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12) +#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8) +#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4) +#define BITS_GDM_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0) +#define BITS_GDM_ALL_FRC_P_CPU_PDMA \ + (BITS_GDM_UFRC_P_CPU_PDMA | BITS_GDM_BFRC_P_CPU_PDMA | \ + BITS_GDM_MFRC_P_CPU_PDMA | BITS_GDM_OFRC_P_CPU_PDMA) + +#define BITS_GDM_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12) +#define BITS_GDM_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8) +#define BITS_GDM_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4) +#define BITS_GDM_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0) +#define BITS_GDM_ALL_FRC_P_CPU_QDMA \ + (BITS_GDM_UFRC_P_CPU_QDMA | BITS_GDM_BFRC_P_CPU_QDMA | \ + BITS_GDM_MFRC_P_CPU_QDMA | BITS_GDM_OFRC_P_CPU_QDMA) + +#define BITS_GDM_UFRC_P_DISCARD (NR_DISCARD << 12) +#define BITS_GDM_BFRC_P_DISCARD (NR_DISCARD << 8) +#define BITS_GDM_MFRC_P_DISCARD (NR_DISCARD << 4) +#define BITS_GDM_OFRC_P_DISCARD (NR_DISCARD << 0) +#define BITS_GDM_ALL_FRC_P_DISCARD \ + (BITS_GDM_UFRC_P_DISCARD | BITS_GDM_BFRC_P_DISCARD | \ + BITS_GDM_MFRC_P_DISCARD | BITS_GDM_OFRC_P_DISCARD) + +#define hnat_is_enabled(hnat_priv) (hnat_priv->enable) +#define hnat_enabled(hnat_priv) (hnat_priv->enable = 1) +#define hnat_disabled(hnat_priv) (hnat_priv->enable = 0) +#define hnat_is_enabled1(hnat_priv) (hnat_priv->enable1) +#define hnat_enabled1(hnat_priv) (hnat_priv->enable1 = 1) +#define hnat_disabled1(hnat_priv) (hnat_priv->enable1 = 0) + +#define entry_hnat_is_bound(e) (e->bfib1.state == BIND) +#define entry_hnat_state(e) (e->bfib1.state) + +#define skb_hnat_is_hashed(skb) \ + (skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num) +#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN) +#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN) +#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD) +#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL) +#define FROM_EXT(skb) (skb_hnat_iface(skb) == FOE_MAGIC_EXT) +#define FROM_WED(skb) ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) || \ + (skb_hnat_iface(skb) == FOE_MAGIC_WED1)) +#define FOE_MAGIC_GE_LAN 0x1 +#define FOE_MAGIC_GE_WAN 0x2 +#define FOE_MAGIC_EXT 0x3 +#define FOE_MAGIC_GE_VIRTUAL 0x4 +#define FOE_MAGIC_GE_PPD 0x5 +#define FOE_MAGIC_WED0 0x78 +#define FOE_MAGIC_WED1 0x79 +#define FOE_INVALID 0xf +#define index6b(i) (0x3fU - i) + +#define IPV4_HNAPT 0 +#define IPV4_HNAT 1 +#define IP_FORMAT(addr) \ + (((unsigned char *)&addr)[3], ((unsigned char *)&addr)[2], \ + ((unsigned char *)&addr)[1], ((unsigned char *)&addr)[0]) + +/*PSE Ports*/ +#define NR_PDMA_PORT 0 +#define NR_GMAC1_PORT 1 +#define NR_GMAC2_PORT 2 +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +#define NR_WHNAT_WDMA_PORT EINVAL +#define NR_PPE0_PORT 3 +#define NR_PPE1_PORT 4 +#else +#define NR_WHNAT_WDMA_PORT 3 +#define NR_PPE0_PORT 4 +#endif +#define NR_QDMA_PORT 5 +#define NR_DISCARD 7 +#define NR_WDMA0_PORT 8 +#define NR_WDMA1_PORT 9 +#define LAN_DEV_NAME hnat_priv->lan +#define IS_WAN(dev) \ + (!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan))) +#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME))) +#define IS_BR(dev) (!strncmp(dev->name, "br", 2)) +#define IS_WHNAT(dev) \ + ((hnat_priv->data->whnat && \ + (get_wifi_hook_if_index_from_dev(dev) != 0)) ? 1 : 0) +#define IS_EXT(dev) ((get_index_from_dev(dev) != 0) ? 1 : 0) +#define IS_PPD(dev) (!strcmp(dev->name, hnat_priv->ppd)) +#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0) +#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0) +#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x)) +#define IS_IPV4_DSLITE(x) (((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0) +#define IS_IPV4_MAPE(x) (((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0) +#define IS_IPV4_MAPT(x) (((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0) +#define IS_IPV6_3T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0) +#define IS_IPV6_5T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0) +#define IS_IPV6_6RD(x) (((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0) +#define IS_IPV6_GRP(x) \ + (IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) | \ + IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x) | IS_IPV4_MAPT(x)) +#define IS_BOND_MODE (!strncmp(LAN_DEV_NAME, "bond", 4)) +#define IS_GMAC1_MODE ((hnat_priv->gmac_num == 1) ? 1 : 0) +#define IS_HQOS_MODE (qos_toggle == 1) +#define IS_PPPQ_MODE (qos_toggle == 2) /* Per Port Per Queue */ +#define MAX_PPPQ_PORT_NUM 6 + +#define es(entry) (entry_state[entry->bfib1.state]) +#define ei(entry, end) (hnat_priv->foe_etry_num - (int)(end - entry)) +#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type]) +#define ipv4_smac(mac, e) \ + ({ \ + mac[0] = e->ipv4_hnapt.smac_hi[3]; \ + mac[1] = e->ipv4_hnapt.smac_hi[2]; \ + mac[2] = e->ipv4_hnapt.smac_hi[1]; \ + mac[3] = e->ipv4_hnapt.smac_hi[0]; \ + mac[4] = e->ipv4_hnapt.smac_lo[1]; \ + mac[5] = e->ipv4_hnapt.smac_lo[0]; \ + }) +#define ipv4_dmac(mac, e) \ + ({ \ + mac[0] = e->ipv4_hnapt.dmac_hi[3]; \ + mac[1] = e->ipv4_hnapt.dmac_hi[2]; \ + mac[2] = e->ipv4_hnapt.dmac_hi[1]; \ + mac[3] = e->ipv4_hnapt.dmac_hi[0]; \ + mac[4] = e->ipv4_hnapt.dmac_lo[1]; \ + mac[5] = e->ipv4_hnapt.dmac_lo[0]; \ + }) + +#define IS_DSA_LAN(dev) (!strncmp(dev->name, "lan", 3)) +#define IS_DSA_WAN(dev) (!strncmp(dev->name, "wan", 3)) +#define NONE_DSA_PORT 0xff +#define MAX_CRSN_NUM 32 +#define IPV6_HDR_LEN 40 + +/*QDMA_PAGE value*/ +#define NUM_OF_Q_PER_PAGE 16 + +/*IPv6 Header*/ +#ifndef NEXTHDR_IPIP +#define NEXTHDR_IPIP 4 +#endif + +extern const struct of_device_id of_hnat_match[]; +extern struct mtk_hnat *hnat_priv; + +#if defined(CONFIG_NET_DSA_MT7530) +u32 hnat_dsa_fill_stag(const struct net_device *netdev, + struct foe_entry *entry, + struct flow_offload_hw_path *hw_path, + u16 eth_proto, int mape); + +static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv) +{ + return (priv->wan_dsa_port != NONE_DSA_PORT); +} +#else +static inline u32 hnat_dsa_fill_stag(const struct net_device *netdev, + struct foe_entry *entry, + struct flow_offload_hw_path *hw_path, + u16 eth_proto, int mape) +{ + return 0; +} + +static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv) +{ + return false; +} +#endif + +void hnat_deinit_debugfs(struct mtk_hnat *h); +int hnat_init_debugfs(struct mtk_hnat *h); +int hnat_register_nf_hooks(void); +void hnat_unregister_nf_hooks(void); +int whnat_adjust_nf_hooks(void); +int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *unused); +extern int dbg_cpu_reason; +extern int debug_level; +extern int hook_toggle; +extern int mape_toggle; +extern int qos_toggle; + +int ext_if_add(struct extdev_entry *ext_entry); +int ext_if_del(struct extdev_entry *ext_entry); +void cr_set_field(void __iomem *reg, u32 field, u32 val); +int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no); +int mtk_sw_nat_hook_rx(struct sk_buff *skb); +void mtk_ppe_dev_register_hook(struct net_device *dev); +void mtk_ppe_dev_unregister_hook(struct net_device *dev); +int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event, + void *ptr); +int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event, + void *ptr); +uint32_t foe_dump_pkt(struct sk_buff *skb); +uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb); +int hnat_enable_hook(void); +int hnat_disable_hook(void); +void hnat_cache_ebl(int enable); +void hnat_qos_shaper_ebl(u32 id, u32 enable); +void set_gmac_ppe_fwd(int gmac_no, int enable); +int entry_detail(u32 ppe_id, int index); +int entry_delete_by_mac(u8 *mac); +int entry_delete(u32 ppe_id, int index); +int hnat_warm_init(void); + +struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id, + u32 index, struct hnat_accounting *diff); + +static inline u16 foe_timestamp(struct mtk_hnat *h) +{ + return (readl(hnat_priv->fe_base + 0x0010)) & 0xffff; +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_debugfs.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_debugfs.c new file mode 100644 index 000000000..d4b9b6368 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_debugfs.c @@ -0,0 +1,2351 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Sean Wang + * Copyright (C) 2016-2017 John Crispin + */ + +#include +#include +#include +#include +#include + +#include "hnat.h" +#include "nf_hnat_mtk.h" +#include "../mtk_eth_soc.h" + +int dbg_entry_state = BIND; +typedef int (*debugfs_write_func)(int par1); +int debug_level; +int dbg_cpu_reason; +int hook_toggle; +int mape_toggle; +int qos_toggle; +unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM]; + +static const char * const entry_state[] = { "INVALID", "UNBIND", "BIND", "FIN" }; + +static const char * const packet_type[] = { + "IPV4_HNAPT", "IPV4_HNAT", "IPV6_1T_ROUTE", "IPV4_DSLITE", + "IPV6_3T_ROUTE", "IPV6_5T_ROUTE", "REV", "IPV6_6RD", + "IPV4_MAP_T", "IPV4_MAP_E", +}; + +static uint8_t *show_cpu_reason(struct sk_buff *skb) +{ + static u8 buf[32]; + + switch (skb_hnat_reason(skb)) { + case TTL_0: + return "IPv4(IPv6) TTL(hop limit)\n"; + case HAS_OPTION_HEADER: + return "Ipv4(IPv6) has option(extension) header\n"; + case NO_FLOW_IS_ASSIGNED: + return "No flow is assigned\n"; + case IPV4_WITH_FRAGMENT: + return "IPv4 HNAT doesn't support IPv4 /w fragment\n"; + case IPV4_HNAPT_DSLITE_WITH_FRAGMENT: + return "IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n"; + case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP: + return "IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n"; + case IPV6_5T_6RD_WITHOUT_TCP_UDP: + return "IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n"; + case TCP_FIN_SYN_RST: + return "Ingress packet is TCP fin/syn/rst\n"; + case UN_HIT: + return "FOE Un-hit\n"; + case HIT_UNBIND: + return "FOE Hit unbind\n"; + case HIT_UNBIND_RATE_REACH: + return "FOE Hit unbind & rate reach\n"; + case HIT_BIND_TCP_FIN: + return "Hit bind PPE TCP FIN entry\n"; + case HIT_BIND_TTL_1: + return "Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1\n"; + case HIT_BIND_WITH_VLAN_VIOLATION: + return "Hit bind and VLAN replacement violation\n"; + case HIT_BIND_KEEPALIVE_UC_OLD_HDR: + return "Hit bind and keep alive with unicast old-header packet\n"; + case HIT_BIND_KEEPALIVE_MC_NEW_HDR: + return "Hit bind and keep alive with multicast new-header packet\n"; + case HIT_BIND_KEEPALIVE_DUP_OLD_HDR: + return "Hit bind and keep alive with duplicate old-header packet\n"; + case HIT_BIND_FORCE_TO_CPU: + return "FOE Hit bind & force to CPU\n"; + case HIT_BIND_EXCEED_MTU: + return "Hit bind and exceed MTU\n"; + case HIT_BIND_MULTICAST_TO_CPU: + return "Hit bind multicast packet to CPU\n"; + case HIT_BIND_MULTICAST_TO_GMAC_CPU: + return "Hit bind multicast packet to GMAC & CPU\n"; + case HIT_PRE_BIND: + return "Pre bind\n"; + } + + sprintf(buf, "CPU Reason Error - %X\n", skb_hnat_entry(skb)); + return buf; +} + +uint32_t foe_dump_pkt(struct sk_buff *skb) +{ + struct foe_entry *entry; + + if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num || + skb_hnat_ppe(skb) >= CFG_PPE_NUM) + return 1; + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + pr_info("\nRx========\n", skb_hnat_entry(skb)); + pr_info("RcvIF=%s\n", skb->dev->name); + pr_info("PPE_ID=%d\n", skb_hnat_ppe(skb)); + pr_info("FOE_Entry=%d\n", skb_hnat_entry(skb)); + pr_info("CPU Reason=%s", show_cpu_reason(skb)); + pr_info("ALG=%d\n", skb_hnat_alg(skb)); + pr_info("SP=%d\n", skb_hnat_sport(skb)); + + /* some special alert occurred, so entry_num is useless (just skip it) */ + if (skb_hnat_entry(skb) == 0x3fff) + return 1; + + /* PPE: IPv4 packet=IPV4_HNAT IPv6 packet=IPV6_ROUTE */ + if (IS_IPV4_GRP(entry)) { + __be32 saddr = htonl(entry->ipv4_hnapt.sip); + __be32 daddr = htonl(entry->ipv4_hnapt.dip); + + pr_info("Information Block 1=%x\n", + entry->ipv4_hnapt.info_blk1); + pr_info("SIP=%pI4\n", &saddr); + pr_info("DIP=%pI4\n", &daddr); + pr_info("SPORT=%d\n", entry->ipv4_hnapt.sport); + pr_info("DPORT=%d\n", entry->ipv4_hnapt.dport); + pr_info("Information Block 2=%x\n", + entry->ipv4_hnapt.info_blk2); + pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ? + "Invalid" : entry->bfib1.state == 1 ? + "Unbind" : entry->bfib1.state == 2 ? + "BIND" : entry->bfib1.state == 3 ? + "FIN" : "Unknown", + entry->ipv4_hnapt.bfib1.udp == 0 ? + "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ? + "UDP" : "Unknown"); + } else if (IS_IPV6_GRP(entry)) { + pr_info("Information Block 1=%x\n", + entry->ipv6_5t_route.info_blk1); + pr_info("IPv6_SIP=%08X:%08X:%08X:%08X\n", + entry->ipv6_5t_route.ipv6_sip0, + entry->ipv6_5t_route.ipv6_sip1, + entry->ipv6_5t_route.ipv6_sip2, + entry->ipv6_5t_route.ipv6_sip3); + pr_info("IPv6_DIP=%08X:%08X:%08X:%08X\n", + entry->ipv6_5t_route.ipv6_dip0, + entry->ipv6_5t_route.ipv6_dip1, + entry->ipv6_5t_route.ipv6_dip2, + entry->ipv6_5t_route.ipv6_dip3); + pr_info("SPORT=%d\n", entry->ipv6_5t_route.sport); + pr_info("DPORT=%d\n", entry->ipv6_5t_route.dport); + pr_info("Information Block 2=%x\n", + entry->ipv6_5t_route.info_blk2); + pr_info("State = %s, proto = %s\n", entry->bfib1.state == 0 ? + "Invalid" : entry->bfib1.state == 1 ? + "Unbind" : entry->bfib1.state == 2 ? + "BIND" : entry->bfib1.state == 3 ? + "FIN" : "Unknown", + entry->ipv6_5t_route.bfib1.udp == 0 ? + "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ? + "UDP" : "Unknown"); + } else { + pr_info("unknown Pkt_type=%d\n", entry->bfib1.pkt_type); + } + + pr_info("==================================\n"); + return 1; +} + +uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb) +{ + switch (skb_hnat_reason(skb)) { + case TTL_0: + dbg_cpu_reason_cnt[0]++; + return 0; + case HAS_OPTION_HEADER: + dbg_cpu_reason_cnt[1]++; + return 0; + case NO_FLOW_IS_ASSIGNED: + dbg_cpu_reason_cnt[2]++; + return 0; + case IPV4_WITH_FRAGMENT: + dbg_cpu_reason_cnt[3]++; + return 0; + case IPV4_HNAPT_DSLITE_WITH_FRAGMENT: + dbg_cpu_reason_cnt[4]++; + return 0; + case IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP: + dbg_cpu_reason_cnt[5]++; + return 0; + case IPV6_5T_6RD_WITHOUT_TCP_UDP: + dbg_cpu_reason_cnt[6]++; + return 0; + case TCP_FIN_SYN_RST: + dbg_cpu_reason_cnt[7]++; + return 0; + case UN_HIT: + dbg_cpu_reason_cnt[8]++; + return 0; + case HIT_UNBIND: + dbg_cpu_reason_cnt[9]++; + return 0; + case HIT_UNBIND_RATE_REACH: + dbg_cpu_reason_cnt[10]++; + return 0; + case HIT_BIND_TCP_FIN: + dbg_cpu_reason_cnt[11]++; + return 0; + case HIT_BIND_TTL_1: + dbg_cpu_reason_cnt[12]++; + return 0; + case HIT_BIND_WITH_VLAN_VIOLATION: + dbg_cpu_reason_cnt[13]++; + return 0; + case HIT_BIND_KEEPALIVE_UC_OLD_HDR: + dbg_cpu_reason_cnt[14]++; + return 0; + case HIT_BIND_KEEPALIVE_MC_NEW_HDR: + dbg_cpu_reason_cnt[15]++; + return 0; + case HIT_BIND_KEEPALIVE_DUP_OLD_HDR: + dbg_cpu_reason_cnt[16]++; + return 0; + case HIT_BIND_FORCE_TO_CPU: + dbg_cpu_reason_cnt[17]++; + return 0; + case HIT_BIND_EXCEED_MTU: + dbg_cpu_reason_cnt[18]++; + return 0; + case HIT_BIND_MULTICAST_TO_CPU: + dbg_cpu_reason_cnt[19]++; + return 0; + case HIT_BIND_MULTICAST_TO_GMAC_CPU: + dbg_cpu_reason_cnt[20]++; + return 0; + case HIT_PRE_BIND: + dbg_cpu_reason_cnt[21]++; + return 0; + } + + return 0; +} + +int hnat_set_usage(int level) +{ + debug_level = level; + pr_info("Read cpu_reason count: cat /sys/kernel/debug/hnat/cpu_reason\n\n"); + pr_info("====================Advanced Settings====================\n"); + pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/cpu_reason\n\n"); + pr_info("Commands: [type] [option]\n"); + pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n", + debug_level); + pr_info(" 1 cpu_reason Track entries of the set cpu_reason\n"); + pr_info(" Set type=1 will change debug_level=7\n"); + pr_info("cpu_reason list:\n"); + pr_info(" 2 IPv4(IPv6) TTL(hop limit) = 0\n"); + pr_info(" 3 IPv4(IPv6) has option(extension) header\n"); + pr_info(" 7 No flow is assigned\n"); + pr_info(" 8 IPv4 HNAT doesn't support IPv4 /w fragment\n"); + pr_info(" 9 IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment\n"); + pr_info(" 10 IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport\n"); + pr_info(" 11 IPv6 5T-route/6RD can't find TCP/UDP sport/dport\n"); + pr_info(" 12 Ingress packet is TCP fin/syn/rst\n"); + pr_info(" 13 FOE Un-hit\n"); + pr_info(" 14 FOE Hit unbind\n"); + pr_info(" 15 FOE Hit unbind & rate reach\n"); + pr_info(" 16 Hit bind PPE TCP FIN entry\n"); + pr_info(" 17 Hit bind PPE entry and TTL(hop limit) = 1\n"); + pr_info(" 18 Hit bind and VLAN replacement violation\n"); + pr_info(" 19 Hit bind and keep alive with unicast old-header packet\n"); + pr_info(" 20 Hit bind and keep alive with multicast new-header packet\n"); + pr_info(" 21 Hit bind and keep alive with duplicate old-header packet\n"); + pr_info(" 22 FOE Hit bind & force to CPU\n"); + pr_info(" 23 HIT_BIND_WITH_OPTION_HEADER\n"); + pr_info(" 24 Switch clone multicast packet to CPU\n"); + pr_info(" 25 Switch clone multicast packet to GMAC1 & CPU\n"); + pr_info(" 26 HIT_PRE_BIND\n"); + pr_info(" 27 HIT_BIND_PACKET_SAMPLING\n"); + pr_info(" 28 Hit bind and exceed MTU\n"); + + return 0; +} + +int hnat_cpu_reason(int cpu_reason) +{ + dbg_cpu_reason = cpu_reason; + debug_level = 7; + pr_info("show cpu reason = %d\n", cpu_reason); + + return 0; +} + +int entry_set_usage(int level) +{ + debug_level = level; + pr_info("Show all entries(default state=bind): cat /sys/kernel/debug/hnat/hnat_entry\n\n"); + pr_info("====================Advanced Settings====================\n"); + pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_entry\n\n"); + pr_info("Commands: [type] [option]\n"); + pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n", + debug_level); + pr_info(" 1 0~3 Change tracking state\n"); + pr_info(" (0:invalid; 1:unbind; 2:bind; 3:fin)\n"); + pr_info(" 2 Show PPE0 specific foe entry info. of assigned \n"); + pr_info(" 3 Delete PPE0 specific foe entry of assigned \n"); + pr_info(" 4 Show PPE1 specific foe entry info. of assigned \n"); + pr_info(" 5 Delete PPE1 specific foe entry of assigned \n"); + pr_info(" When entry_idx is -1, clear all entries\n"); + + return 0; +} + +int entry_set_state(int state) +{ + dbg_entry_state = state; + pr_info("ENTRY STATE = %s\n", dbg_entry_state == 0 ? + "Invalid" : dbg_entry_state == 1 ? + "Unbind" : dbg_entry_state == 2 ? + "BIND" : dbg_entry_state == 3 ? + "FIN" : "Unknown"); + return 0; +} + +int wrapped_ppe0_entry_detail(int index) { + entry_detail(0, index); + return 0; +} + +int wrapped_ppe1_entry_detail(int index) { + entry_detail(1, index); + return 0; +} + +int entry_detail(u32 ppe_id, int index) +{ + struct foe_entry *entry; + struct mtk_hnat *h = hnat_priv; + u32 *p; + u32 i = 0; + u32 print_cnt; + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be32 saddr, daddr, nsaddr, ndaddr; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + if (index < 0 || index >= h->foe_etry_num) { + pr_info("Invalid entry index\n"); + return -EINVAL; + } + + entry = h->foe_table_cpu[ppe_id] + index; + saddr = htonl(entry->ipv4_hnapt.sip); + daddr = htonl(entry->ipv4_hnapt.dip); + nsaddr = htonl(entry->ipv4_hnapt.new_sip); + ndaddr = htonl(entry->ipv4_hnapt.new_dip); + p = (uint32_t *)entry; + pr_info("=========================\n", + ppe_id, index, entry); + if (debug_level >= 2) { + print_cnt = 20; + for (i = 0; i < print_cnt; i++) + pr_info("%02d: %08X\n", i, *(p + i)); + } + pr_info("-----------------------------------\n"); + pr_info("Information Block 1: %08X\n", entry->ipv4_hnapt.info_blk1); + + if (IS_IPV4_HNAPT(entry)) { + pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)", + entry->ipv4_hnapt.info_blk2, + entry->ipv4_hnapt.iblk2.dp, + entry->ipv4_hnapt.iblk2.fqos, + entry->ipv4_hnapt.iblk2.qid); + pr_info("Create IPv4 HNAPT entry\n"); + pr_info("IPv4 Org IP/Port: %pI4:%d->%pI4:%d\n", &saddr, + entry->ipv4_hnapt.sport, &daddr, + entry->ipv4_hnapt.dport); + pr_info("IPv4 New IP/Port: %pI4:%d->%pI4:%d\n", &nsaddr, + entry->ipv4_hnapt.new_sport, &ndaddr, + entry->ipv4_hnapt.new_dport); + } else if (IS_IPV4_HNAT(entry)) { + pr_info("Information Block 2: %08X\n", + entry->ipv4_hnapt.info_blk2); + pr_info("Create IPv4 HNAT entry\n"); + pr_info("IPv4 Org IP: %pI4->%pI4\n", &saddr, &daddr); + pr_info("IPv4 New IP: %pI4->%pI4\n", &nsaddr, &ndaddr); + } else if (IS_IPV4_DSLITE(entry)) { + pr_info("Information Block 2: %08X\n", + entry->ipv4_dslite.info_blk2); + pr_info("Create IPv4 Ds-Lite entry\n"); + pr_info("IPv4 Ds-Lite: %pI4:%d->%pI4:%d\n", &saddr, + entry->ipv4_dslite.sport, &daddr, + entry->ipv4_dslite.dport); + pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n", + entry->ipv4_dslite.tunnel_sipv6_0, + entry->ipv4_dslite.tunnel_sipv6_1, + entry->ipv4_dslite.tunnel_sipv6_2, + entry->ipv4_dslite.tunnel_sipv6_3, + entry->ipv4_dslite.tunnel_dipv6_0, + entry->ipv4_dslite.tunnel_dipv6_1, + entry->ipv4_dslite.tunnel_dipv6_2, + entry->ipv4_dslite.tunnel_dipv6_3); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + } else if (IS_IPV4_MAPE(entry)) { + nsaddr = htonl(entry->ipv4_dslite.new_sip); + ndaddr = htonl(entry->ipv4_dslite.new_dip); + + pr_info("Information Block 2: %08X\n", + entry->ipv4_dslite.info_blk2); + pr_info("Create IPv4 MAP-E entry\n"); + pr_info("IPv4 MAP-E Org IP/Port: %pI4:%d->%pI4:%d\n", + &saddr, entry->ipv4_dslite.sport, + &daddr, entry->ipv4_dslite.dport); + pr_info("IPv4 MAP-E New IP/Port: %pI4:%d->%pI4:%d\n", + &nsaddr, entry->ipv4_dslite.new_sport, + &ndaddr, entry->ipv4_dslite.new_dport); + pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n", + entry->ipv4_dslite.tunnel_sipv6_0, + entry->ipv4_dslite.tunnel_sipv6_1, + entry->ipv4_dslite.tunnel_sipv6_2, + entry->ipv4_dslite.tunnel_sipv6_3, + entry->ipv4_dslite.tunnel_dipv6_0, + entry->ipv4_dslite.tunnel_dipv6_1, + entry->ipv4_dslite.tunnel_dipv6_2, + entry->ipv4_dslite.tunnel_dipv6_3); +#endif + } else if (IS_IPV6_3T_ROUTE(entry)) { + pr_info("Information Block 2: %08X\n", + entry->ipv6_3t_route.info_blk2); + pr_info("Create IPv6 3-Tuple entry\n"); + pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X-> %08X:%08X:%08X:%08X (Prot=%d)\n", + entry->ipv6_3t_route.ipv6_sip0, + entry->ipv6_3t_route.ipv6_sip1, + entry->ipv6_3t_route.ipv6_sip2, + entry->ipv6_3t_route.ipv6_sip3, + entry->ipv6_3t_route.ipv6_dip0, + entry->ipv6_3t_route.ipv6_dip1, + entry->ipv6_3t_route.ipv6_dip2, + entry->ipv6_3t_route.ipv6_dip3, + entry->ipv6_3t_route.prot); + } else if (IS_IPV6_5T_ROUTE(entry)) { + pr_info("Information Block 2: %08X\n", + entry->ipv6_5t_route.info_blk2); + pr_info("Create IPv6 5-Tuple entry\n"); + pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n", + entry->ipv6_5t_route.ipv6_sip0, + entry->ipv6_5t_route.ipv6_sip1, + entry->ipv6_5t_route.ipv6_sip2, + entry->ipv6_5t_route.ipv6_sip3, + entry->ipv6_5t_route.sport, + entry->ipv6_5t_route.ipv6_dip0, + entry->ipv6_5t_route.ipv6_dip1, + entry->ipv6_5t_route.ipv6_dip2, + entry->ipv6_5t_route.ipv6_dip3, + entry->ipv6_5t_route.dport); + } else if (IS_IPV6_6RD(entry)) { + pr_info("Information Block 2: %08X\n", + entry->ipv6_6rd.info_blk2); + pr_info("Create IPv6 6RD entry\n"); + pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n", + entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1, + entry->ipv6_6rd.ipv6_sip2, entry->ipv6_6rd.ipv6_sip3, + entry->ipv6_6rd.sport, entry->ipv6_6rd.ipv6_dip0, + entry->ipv6_6rd.ipv6_dip1, entry->ipv6_6rd.ipv6_dip2, + entry->ipv6_6rd.ipv6_dip3, entry->ipv6_6rd.dport); + } + if (IS_IPV4_HNAPT(entry) || IS_IPV4_HNAT(entry)) { + *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi); + *((u16 *)&h_source[4]) = swab16(entry->ipv4_hnapt.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi); + *((u16 *)&h_dest[4]) = swab16(entry->ipv4_hnapt.dmac_lo); + pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest); + pr_info("State = %s, ", entry->bfib1.state == 0 ? + "Invalid" : entry->bfib1.state == 1 ? + "Unbind" : entry->bfib1.state == 2 ? + "BIND" : entry->bfib1.state == 3 ? + "FIN" : "Unknown"); + pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer); + pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n", + entry->ipv4_hnapt.etype, entry->ipv4_hnapt.vlan1, + entry->ipv4_hnapt.vlan2); + pr_info("multicast = %d, pppoe = %d, proto = %s\n", + entry->ipv4_hnapt.iblk2.mcast, + entry->ipv4_hnapt.bfib1.psn, + entry->ipv4_hnapt.bfib1.udp == 0 ? + "TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ? + "UDP" : "Unknown"); + pr_info("=========================================\n\n"); + } else { + *((u32 *)h_source) = swab32(entry->ipv6_5t_route.smac_hi); + *((u16 *)&h_source[4]) = swab16(entry->ipv6_5t_route.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi); + *((u16 *)&h_dest[4]) = swab16(entry->ipv6_5t_route.dmac_lo); + pr_info("SMAC=%pM => DMAC=%pM\n", h_source, h_dest); + pr_info("State = %s, ", entry->bfib1.state == 0 ? + "Invalid" : entry->bfib1.state == 1 ? + "Unbind" : entry->bfib1.state == 2 ? + "BIND" : entry->bfib1.state == 3 ? + "FIN" : "Unknown"); + + pr_info("Vlan_Layer = %u, ", entry->bfib1.vlan_layer); + pr_info("Eth_type = 0x%x, Vid1 = 0x%x, Vid2 = 0x%x\n", + entry->ipv6_5t_route.etype, entry->ipv6_5t_route.vlan1, + entry->ipv6_5t_route.vlan2); + pr_info("multicast = %d, pppoe = %d, proto = %s\n", + entry->ipv6_5t_route.iblk2.mcast, + entry->ipv6_5t_route.bfib1.psn, + entry->ipv6_5t_route.bfib1.udp == 0 ? + "TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ? + "UDP" : "Unknown"); + pr_info("=========================================\n\n"); + } + return 0; +} + +int wrapped_ppe0_entry_delete(int index) { + entry_delete(0, index); + return 0; +} + +int wrapped_ppe1_entry_delete(int index) { + entry_delete(1, index); + return 0; +} + +int entry_delete(u32 ppe_id, int index) +{ + struct foe_entry *entry; + struct mtk_hnat *h = hnat_priv; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + if (index < -1 || index >= (int)h->foe_etry_num) { + pr_info("Invalid entry index\n"); + return -EINVAL; + } + + if (index == -1) { + memset(h->foe_table_cpu[ppe_id], 0, h->foe_etry_num * sizeof(struct foe_entry)); + pr_info("clear all foe entry\n"); + } else { + + entry = h->foe_table_cpu[ppe_id] + index; + memset(entry, 0, sizeof(struct foe_entry)); + pr_info("delete ppe id = %d, entry idx = %d\n", ppe_id, index); + } + + /* clear HWNAT cache */ + hnat_cache_ebl(1); + + return 0; +} +EXPORT_SYMBOL(entry_delete); + +int cr_set_usage(int level) +{ + debug_level = level; + pr_info("Dump hnat CR: cat /sys/kernel/debug/hnat/hnat_setting\n\n"); + pr_info("====================Advanced Settings====================\n"); + pr_info("Usage: echo [type] [option] > /sys/kernel/debug/hnat/hnat_setting\n\n"); + pr_info("Commands: [type] [option]\n"); + pr_info(" 0 0~7 Set debug_level(0~7), current debug_level=%d\n", + debug_level); + pr_info(" 1 0~65535 Set binding threshold\n"); + pr_info(" 2 0~65535 Set TCP bind lifetime\n"); + pr_info(" 3 0~65535 Set FIN bind lifetime\n"); + pr_info(" 4 0~65535 Set UDP bind lifetime\n"); + pr_info(" 5 0~255 Set TCP keep alive interval\n"); + pr_info(" 6 0~255 Set UDP keep alive interval\n"); + pr_info(" 7 0~1 Set hnat counter update to nf_conntrack\n"); + + return 0; +} + +int binding_threshold(int threshold) +{ + int i; + + pr_info("Binding Threshold =%d\n", threshold); + + for (i = 0; i < CFG_PPE_NUM; i++) + writel(threshold, hnat_priv->ppe_base[i] + PPE_BNDR); + + return 0; +} + +int tcp_bind_lifetime(int tcp_life) +{ + int i; + + pr_info("tcp_life = %d\n", tcp_life); + + /* set Delta time for aging out an bind TCP FOE entry */ + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_BND_AGE_1, + TCP_DLTA, tcp_life); + + return 0; +} + +int fin_bind_lifetime(int fin_life) +{ + int i; + + pr_info("fin_life = %d\n", fin_life); + + /* set Delta time for aging out an bind TCP FIN FOE entry */ + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_BND_AGE_1, + FIN_DLTA, fin_life); + + return 0; +} + +int udp_bind_lifetime(int udp_life) +{ + int i; + + pr_info("udp_life = %d\n", udp_life); + + /* set Delta time for aging out an bind UDP FOE entry */ + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_BND_AGE_0, + UDP_DLTA, udp_life); + + return 0; +} + +int tcp_keep_alive(int tcp_interval) +{ + int i; + + if (tcp_interval > 255) { + tcp_interval = 255; + pr_info("TCP keep alive max interval = 255\n"); + } else { + pr_info("tcp_interval = %d\n", tcp_interval); + } + + /* Keep alive time for bind FOE TCP entry */ + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_KA, + TCP_KA, tcp_interval); + + return 0; +} + +int udp_keep_alive(int udp_interval) +{ + int i; + + if (udp_interval > 255) { + udp_interval = 255; + pr_info("TCP/UDP keep alive max interval = 255\n"); + } else { + pr_info("udp_interval = %d\n", udp_interval); + } + + /* Keep alive timer for bind FOE UDP entry */ + for (i = 0; i < CFG_PPE_NUM; i++) + cr_set_field(hnat_priv->ppe_base[i] + PPE_KA, + UDP_KA, udp_interval); + + return 0; +} + +int set_nf_update_toggle(int toggle) +{ + struct mtk_hnat *h = hnat_priv; + + if (toggle == 1) + pr_info("Enable hnat counter update to nf_conntrack\n"); + else if (toggle == 0) + pr_info("Disable hnat counter update to nf_conntrack\n"); + else + pr_info("input error\n"); + h->nf_stat_en = toggle; + + return 0; +} + +static const debugfs_write_func hnat_set_func[] = { + [0] = hnat_set_usage, + [1] = hnat_cpu_reason, +}; + +static const debugfs_write_func entry_set_func[] = { + [0] = entry_set_usage, + [1] = entry_set_state, + [2] = wrapped_ppe0_entry_detail, + [3] = wrapped_ppe0_entry_delete, + [4] = wrapped_ppe1_entry_detail, + [5] = wrapped_ppe1_entry_delete, +}; + +static const debugfs_write_func cr_set_func[] = { + [0] = cr_set_usage, [1] = binding_threshold, + [2] = tcp_bind_lifetime, [3] = fin_bind_lifetime, + [4] = udp_bind_lifetime, [5] = tcp_keep_alive, + [6] = udp_keep_alive, [7] = set_nf_update_toggle, +}; + +int read_mib(struct mtk_hnat *h, u32 ppe_id, + u32 index, u64 *bytes, u64 *packets) +{ + int ret; + u32 val, cnt_r0, cnt_r1, cnt_r2; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + writel(index | (1 << 16), h->ppe_base[ppe_id] + PPE_MIB_SER_CR); + ret = readx_poll_timeout_atomic(readl, h->ppe_base[ppe_id] + PPE_MIB_SER_CR, val, + !(val & BIT_MIB_BUSY), 20, 10000); + + if (ret < 0) { + pr_notice("mib busy, please check later\n"); + return ret; + } + cnt_r0 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R0); + cnt_r1 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R1); + cnt_r2 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R2); + *bytes = cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32); + *packets = ((cnt_r1 & 0xffff0000) >> 16) + ((cnt_r2 & 0xffffff) << 16); + + return 0; + +} + +struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id, + u32 index, struct hnat_accounting *diff) + +{ + u64 bytes, packets; + + if (ppe_id >= CFG_PPE_NUM) + return NULL; + + if (!hnat_priv->data->per_flow_accounting) + return NULL; + + if (read_mib(h, ppe_id, index, &bytes, &packets)) + return NULL; + + h->acct[ppe_id][index].bytes += bytes; + h->acct[ppe_id][index].packets += packets; + + if (diff) { + diff->bytes = bytes; + diff->packets = packets; + } + + return &h->acct[ppe_id][index]; +} +EXPORT_SYMBOL(hnat_get_count); + +#define PRINT_COUNT(m, acct) {if (acct) \ + seq_printf(m, "bytes=%llu|packets=%llu|", \ + acct->bytes, acct->packets); } +static int __hnat_debug_show(struct seq_file *m, void *private, u32 ppe_id) +{ + struct mtk_hnat *h = hnat_priv; + struct foe_entry *entry, *end; + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + struct hnat_accounting *acct; + u32 entry_index = 0; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + entry = h->foe_table_cpu[ppe_id]; + end = h->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num; + while (entry < end) { + if (!entry->bfib1.state) { + entry++; + entry_index++; + continue; + } + acct = hnat_get_count(h, ppe_id, entry_index, NULL); + if (IS_IPV4_HNAPT(entry)) { + __be32 saddr = htonl(entry->ipv4_hnapt.sip); + __be32 daddr = htonl(entry->ipv4_hnapt.dip); + __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip); + __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip); + + *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv4_hnapt.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv4_hnapt.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|%pI4:%d->%pI4:%d=>%pI4:%d->%pI4:%d|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), &saddr, + entry->ipv4_hnapt.sport, &daddr, + entry->ipv4_hnapt.dport, &nsaddr, + entry->ipv4_hnapt.new_sport, &ndaddr, + entry->ipv4_hnapt.new_dport, h_source, h_dest, + ntohs(entry->ipv4_hnapt.etype), + entry->ipv4_hnapt.info_blk1, + entry->ipv4_hnapt.info_blk2, + entry->ipv4_hnapt.vlan1, + entry->ipv4_hnapt.vlan2); + } else if (IS_IPV4_HNAT(entry)) { + __be32 saddr = htonl(entry->ipv4_hnapt.sip); + __be32 daddr = htonl(entry->ipv4_hnapt.dip); + __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip); + __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip); + + *((u32 *)h_source) = swab32(entry->ipv4_hnapt.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv4_hnapt.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv4_hnapt.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|%pI4->%pI4=>%pI4->%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), &saddr, + &daddr, &nsaddr, &ndaddr, h_source, h_dest, + ntohs(entry->ipv4_hnapt.etype), + entry->ipv4_hnapt.info_blk1, + entry->ipv4_hnapt.info_blk2, + entry->ipv4_hnapt.vlan1, + entry->ipv4_hnapt.vlan2); + } else if (IS_IPV6_5T_ROUTE(entry)) { + u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0; + u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1; + u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2; + u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3; + u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0; + u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1; + u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2; + u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3; + + *((u32 *)h_source) = + swab32(entry->ipv6_5t_route.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv6_5t_route.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv6_5t_route.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n", + entry, ppe_id, ei(entry, end), es(entry), pt(entry), ipv6_sip0, + ipv6_sip1, ipv6_sip2, ipv6_sip3, + entry->ipv6_5t_route.sport, ipv6_dip0, + ipv6_dip1, ipv6_dip2, ipv6_dip3, + entry->ipv6_5t_route.dport, h_source, h_dest, + ntohs(entry->ipv6_5t_route.etype), + entry->ipv6_5t_route.info_blk1, + entry->ipv6_5t_route.info_blk2); + } else if (IS_IPV6_3T_ROUTE(entry)) { + u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0; + u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1; + u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2; + u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3; + u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0; + u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1; + u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2; + u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3; + + *((u32 *)h_source) = + swab32(entry->ipv6_5t_route.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv6_5t_route.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv6_5t_route.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x->DIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), ipv6_sip0, + ipv6_sip1, ipv6_sip2, ipv6_sip3, ipv6_dip0, + ipv6_dip1, ipv6_dip2, ipv6_dip3, h_source, + h_dest, ntohs(entry->ipv6_5t_route.etype), + entry->ipv6_5t_route.info_blk1, + entry->ipv6_5t_route.info_blk2); + } else if (IS_IPV6_6RD(entry)) { + u32 ipv6_sip0 = entry->ipv6_3t_route.ipv6_sip0; + u32 ipv6_sip1 = entry->ipv6_3t_route.ipv6_sip1; + u32 ipv6_sip2 = entry->ipv6_3t_route.ipv6_sip2; + u32 ipv6_sip3 = entry->ipv6_3t_route.ipv6_sip3; + u32 ipv6_dip0 = entry->ipv6_3t_route.ipv6_dip0; + u32 ipv6_dip1 = entry->ipv6_3t_route.ipv6_dip1; + u32 ipv6_dip2 = entry->ipv6_3t_route.ipv6_dip2; + u32 ipv6_dip3 = entry->ipv6_3t_route.ipv6_dip3; + __be32 tsaddr = htonl(entry->ipv6_6rd.tunnel_sipv4); + __be32 tdaddr = htonl(entry->ipv6_6rd.tunnel_dipv4); + + *((u32 *)h_source) = + swab32(entry->ipv6_5t_route.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv6_5t_route.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv6_5t_route.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv6_5t_route.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|SIP=%08x:%08x:%08x:%08x(sp=%d)->DIP=%08x:%08x:%08x:%08x(dp=%d)|TSIP=%pI4->TDIP=%pI4|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), ipv6_sip0, + ipv6_sip1, ipv6_sip2, ipv6_sip3, + entry->ipv6_5t_route.sport, ipv6_dip0, + ipv6_dip1, ipv6_dip2, ipv6_dip3, + entry->ipv6_5t_route.dport, &tsaddr, &tdaddr, + h_source, h_dest, + ntohs(entry->ipv6_5t_route.etype), + entry->ipv6_5t_route.info_blk1, + entry->ipv6_5t_route.info_blk2); + } else if (IS_IPV4_DSLITE(entry)) { + __be32 saddr = htonl(entry->ipv4_hnapt.sip); + __be32 daddr = htonl(entry->ipv4_hnapt.dip); + u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0; + u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1; + u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2; + u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3; + u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0; + u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1; + u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2; + u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3; + + *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv4_dslite.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv4_dslite.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|SIP=%pI4->DIP=%pI4|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), &saddr, + &daddr, ipv6_tsip0, ipv6_tsip1, ipv6_tsip2, + ipv6_tsip3, ipv6_tdip0, ipv6_tdip1, ipv6_tdip2, + ipv6_tdip3, h_source, h_dest, + ntohs(entry->ipv6_5t_route.etype), + entry->ipv6_5t_route.info_blk1, + entry->ipv6_5t_route.info_blk2); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + } else if (IS_IPV4_MAPE(entry)) { + __be32 saddr = htonl(entry->ipv4_dslite.sip); + __be32 daddr = htonl(entry->ipv4_dslite.dip); + __be32 nsaddr = htonl(entry->ipv4_dslite.new_sip); + __be32 ndaddr = htonl(entry->ipv4_dslite.new_dip); + u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0; + u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1; + u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2; + u32 ipv6_tsip3 = entry->ipv4_dslite.tunnel_sipv6_3; + u32 ipv6_tdip0 = entry->ipv4_dslite.tunnel_dipv6_0; + u32 ipv6_tdip1 = entry->ipv4_dslite.tunnel_dipv6_1; + u32 ipv6_tdip2 = entry->ipv4_dslite.tunnel_dipv6_2; + u32 ipv6_tdip3 = entry->ipv4_dslite.tunnel_dipv6_3; + + *((u32 *)h_source) = swab32(entry->ipv4_dslite.smac_hi); + *((u16 *)&h_source[4]) = + swab16(entry->ipv4_dslite.smac_lo); + *((u32 *)h_dest) = swab32(entry->ipv4_dslite.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv4_dslite.dmac_lo); + PRINT_COUNT(m, acct); + seq_printf(m, + "addr=0x%p|ppe=%d|index=%d|state=%s|type=%s|SIP=%pI4:%d->DIP=%pI4:%d|NSIP=%pI4:%d->NDIP=%pI4:%d|TSIP=%08x:%08x:%08x:%08x->TDIP=%08x:%08x:%08x:%08x|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x\n", + entry, ppe_id, ei(entry, end), + es(entry), pt(entry), + &saddr, entry->ipv4_dslite.sport, + &daddr, entry->ipv4_dslite.dport, + &nsaddr, entry->ipv4_dslite.new_sport, + &ndaddr, entry->ipv4_dslite.new_dport, + ipv6_tsip0, ipv6_tsip1, ipv6_tsip2, + ipv6_tsip3, ipv6_tdip0, ipv6_tdip1, + ipv6_tdip2, ipv6_tdip3, h_source, h_dest, + ntohs(entry->ipv6_5t_route.etype), + entry->ipv6_5t_route.info_blk1, + entry->ipv6_5t_route.info_blk2); +#endif + } else + seq_printf(m, "addr=0x%p|ppe=%d|index=%d state=%s\n", entry, ppe_id, ei(entry, end), + es(entry)); + entry++; + entry_index++; + } + + return 0; +} + +static int hnat_debug_show(struct seq_file *m, void *private) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) + __hnat_debug_show(m, private, i); + + return 0; +} + +static int hnat_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_debug_show, file->private_data); +} + +static const struct file_operations hnat_debug_fops = { + .open = hnat_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int hnat_whnat_show(struct seq_file *m, void *private) +{ + int i; + struct net_device *dev; + + for (i = 0; i < MAX_IF_NUM; i++) { + dev = hnat_priv->wifi_hook_if[i]; + if (dev) + seq_printf(m, "%d:%s\n", i, dev->name); + else + continue; + } + + return 0; +} + +static int hnat_whnat_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_whnat_show, file->private_data); +} + +static ssize_t hnat_whnat_write(struct file *file, const char __user *buf, + size_t length, loff_t *offset) +{ + char line[64] = {0}; + struct net_device *dev; + int enable; + char name[32]; + size_t size; + + if (length >= sizeof(line)) + return -EINVAL; + + if (copy_from_user(line, buf, length)) + return -EFAULT; + + if (sscanf(line, "%s %d", name, &enable) != 2) + return -EFAULT; + + line[length] = '\0'; + + dev = dev_get_by_name(&init_net, name); + + if (dev) { + if (enable) { + mtk_ppe_dev_register_hook(dev); + pr_info("register wifi extern if = %s\n", dev->name); + } else { + mtk_ppe_dev_unregister_hook(dev); + pr_info("unregister wifi extern if = %s\n", dev->name); + } + } else { + pr_info("no such device!\n"); + } + + size = strlen(line); + *offset += size; + + return length; +} + + +static const struct file_operations hnat_whnat_fops = { + .open = hnat_whnat_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_whnat_write, + .release = single_release, +}; + +int cpu_reason_read(struct seq_file *m, void *private) +{ + int i; + + pr_info("============ CPU REASON =========\n"); + pr_info("(2)IPv4(IPv6) TTL(hop limit) = %u\n", dbg_cpu_reason_cnt[0]); + pr_info("(3)Ipv4(IPv6) has option(extension) header = %u\n", + dbg_cpu_reason_cnt[1]); + pr_info("(7)No flow is assigned = %u\n", dbg_cpu_reason_cnt[2]); + pr_info("(8)IPv4 HNAT doesn't support IPv4 /w fragment = %u\n", + dbg_cpu_reason_cnt[3]); + pr_info("(9)IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment = %u\n", + dbg_cpu_reason_cnt[4]); + pr_info("(10)IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport = %u\n", + dbg_cpu_reason_cnt[5]); + pr_info("(11)IPv6 5T-route/6RD can't find TCP/UDP sport/dport = %u\n", + dbg_cpu_reason_cnt[6]); + pr_info("(12)Ingress packet is TCP fin/syn/rst = %u\n", + dbg_cpu_reason_cnt[7]); + pr_info("(13)FOE Un-hit = %u\n", dbg_cpu_reason_cnt[8]); + pr_info("(14)FOE Hit unbind = %u\n", dbg_cpu_reason_cnt[9]); + pr_info("(15)FOE Hit unbind & rate reach = %u\n", + dbg_cpu_reason_cnt[10]); + pr_info("(16)Hit bind PPE TCP FIN entry = %u\n", + dbg_cpu_reason_cnt[11]); + pr_info("(17)Hit bind PPE entry and TTL(hop limit) = 1 and TTL(hot limit) - 1 = %u\n", + dbg_cpu_reason_cnt[12]); + pr_info("(18)Hit bind and VLAN replacement violation = %u\n", + dbg_cpu_reason_cnt[13]); + pr_info("(19)Hit bind and keep alive with unicast old-header packet = %u\n", + dbg_cpu_reason_cnt[14]); + pr_info("(20)Hit bind and keep alive with multicast new-header packet = %u\n", + dbg_cpu_reason_cnt[15]); + pr_info("(21)Hit bind and keep alive with duplicate old-header packet = %u\n", + dbg_cpu_reason_cnt[16]); + pr_info("(22)FOE Hit bind & force to CPU = %u\n", + dbg_cpu_reason_cnt[17]); + pr_info("(28)Hit bind and exceed MTU =%u\n", dbg_cpu_reason_cnt[18]); + pr_info("(24)Hit bind multicast packet to CPU = %u\n", + dbg_cpu_reason_cnt[19]); + pr_info("(25)Hit bind multicast packet to GMAC & CPU = %u\n", + dbg_cpu_reason_cnt[20]); + pr_info("(26)Pre bind = %u\n", dbg_cpu_reason_cnt[21]); + + for (i = 0; i < 22; i++) + dbg_cpu_reason_cnt[i] = 0; + return 0; +} + +static int cpu_reason_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpu_reason_read, file->private_data); +} + +ssize_t cpu_reason_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[32]; + char *p_buf; + u32 len = count; + long arg0 = 0, arg1 = 0; + char *p_token = NULL; + char *p_delimiter = " \t"; + int ret; + + if (len >= sizeof(buf)) { + pr_info("input handling fail!\n"); + return -1; + } + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + p_buf = buf; + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg0 = 0; + else + ret = kstrtol(p_token, 10, &arg0); + + switch (arg0) { + case 0: + case 1: + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg1 = 0; + else + ret = kstrtol(p_token, 10, &arg1); + break; + default: + pr_info("no handler defined for command id(0x%08lx)\n\r", arg0); + arg0 = 0; + arg1 = 0; + break; + } + + (*hnat_set_func[arg0])(arg1); + + return len; +} + +static const struct file_operations cpu_reason_fops = { + .open = cpu_reason_open, + .read = seq_read, + .llseek = seq_lseek, + .write = cpu_reason_write, + .release = single_release, +}; + +void dbg_dump_entry(struct seq_file *m, struct foe_entry *entry, + uint32_t index) +{ + __be32 saddr, daddr, nsaddr, ndaddr; + + saddr = htonl(entry->ipv4_hnapt.sip); + daddr = htonl(entry->ipv4_hnapt.dip); + nsaddr = htonl(entry->ipv4_hnapt.new_sip); + ndaddr = htonl(entry->ipv4_hnapt.new_dip); + + if (IS_IPV4_HNAPT(entry)) { + seq_printf(m, + "NAPT(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d\n", + index, &saddr, entry->ipv4_hnapt.sport, &daddr, + entry->ipv4_hnapt.dport, &nsaddr, + entry->ipv4_hnapt.new_sport, &ndaddr, + entry->ipv4_hnapt.new_dport); + } else if (IS_IPV4_HNAT(entry)) { + seq_printf(m, "NAT(%d): %pI4->%pI4 => %pI4->%pI4\n", + index, &saddr, &daddr, &nsaddr, &ndaddr); + } + + if (IS_IPV4_DSLITE(entry)) { + seq_printf(m, + "IPv4 Ds-Lite(%d): %pI4:%d->%pI4:%d => %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n", + index, &saddr, entry->ipv4_dslite.sport, &daddr, + entry->ipv4_dslite.dport, + entry->ipv4_dslite.tunnel_sipv6_0, + entry->ipv4_dslite.tunnel_sipv6_1, + entry->ipv4_dslite.tunnel_sipv6_2, + entry->ipv4_dslite.tunnel_sipv6_3, + entry->ipv4_dslite.tunnel_dipv6_0, + entry->ipv4_dslite.tunnel_dipv6_1, + entry->ipv4_dslite.tunnel_dipv6_2, + entry->ipv4_dslite.tunnel_dipv6_3); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + } else if (IS_IPV4_MAPE(entry)) { + nsaddr = htonl(entry->ipv4_dslite.new_sip); + ndaddr = htonl(entry->ipv4_dslite.new_dip); + + seq_printf(m, + "IPv4 MAP-E(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d | Tunnel=%08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n", + index, &saddr, entry->ipv4_dslite.sport, + &daddr, entry->ipv4_dslite.dport, + &nsaddr, entry->ipv4_dslite.new_sport, + &ndaddr, entry->ipv4_dslite.new_dport, + entry->ipv4_dslite.tunnel_sipv6_0, + entry->ipv4_dslite.tunnel_sipv6_1, + entry->ipv4_dslite.tunnel_sipv6_2, + entry->ipv4_dslite.tunnel_sipv6_3, + entry->ipv4_dslite.tunnel_dipv6_0, + entry->ipv4_dslite.tunnel_dipv6_1, + entry->ipv4_dslite.tunnel_dipv6_2, + entry->ipv4_dslite.tunnel_dipv6_3); +#endif + } else if (IS_IPV6_3T_ROUTE(entry)) { + seq_printf(m, + "IPv6_3T(%d): %08X:%08X:%08X:%08X => %08X:%08X:%08X:%08X (Prot=%d)\n", + index, entry->ipv6_3t_route.ipv6_sip0, + entry->ipv6_3t_route.ipv6_sip1, + entry->ipv6_3t_route.ipv6_sip2, + entry->ipv6_3t_route.ipv6_sip3, + entry->ipv6_3t_route.ipv6_dip0, + entry->ipv6_3t_route.ipv6_dip1, + entry->ipv6_3t_route.ipv6_dip2, + entry->ipv6_3t_route.ipv6_dip3, + entry->ipv6_3t_route.prot); + } else if (IS_IPV6_5T_ROUTE(entry)) { + seq_printf(m, + "IPv6_5T(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n", + index, entry->ipv6_5t_route.ipv6_sip0, + entry->ipv6_5t_route.ipv6_sip1, + entry->ipv6_5t_route.ipv6_sip2, + entry->ipv6_5t_route.ipv6_sip3, + entry->ipv6_5t_route.sport, + entry->ipv6_5t_route.ipv6_dip0, + entry->ipv6_5t_route.ipv6_dip1, + entry->ipv6_5t_route.ipv6_dip2, + entry->ipv6_5t_route.ipv6_dip3, + entry->ipv6_5t_route.dport); + } else if (IS_IPV6_6RD(entry)) { + seq_printf(m, + "IPv6_6RD(%d): %08X:%08X:%08X:%08X:%d => %08X:%08X:%08X:%08X:%d\n", + index, entry->ipv6_6rd.ipv6_sip0, + entry->ipv6_6rd.ipv6_sip1, entry->ipv6_6rd.ipv6_sip2, + entry->ipv6_6rd.ipv6_sip3, entry->ipv6_6rd.sport, + entry->ipv6_6rd.ipv6_dip0, entry->ipv6_6rd.ipv6_dip1, + entry->ipv6_6rd.ipv6_dip2, entry->ipv6_6rd.ipv6_dip3, + entry->ipv6_6rd.dport); + } +} + +int __hnat_entry_read(struct seq_file *m, void *private, u32 ppe_id) +{ + struct mtk_hnat *h = hnat_priv; + struct foe_entry *entry, *end; + int hash_index; + int cnt; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + hash_index = 0; + cnt = 0; + entry = h->foe_table_cpu[ppe_id]; + end = h->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num; + + seq_printf(m, "============================\n"); + seq_printf(m, "PPE_ID = %d\n", ppe_id); + + while (entry < end) { + if (entry->bfib1.state == dbg_entry_state) { + cnt++; + dbg_dump_entry(m, entry, hash_index); + } + hash_index++; + entry++; + } + + seq_printf(m, "Total State = %s cnt = %d\n", + dbg_entry_state == 0 ? + "Invalid" : dbg_entry_state == 1 ? + "Unbind" : dbg_entry_state == 2 ? + "BIND" : dbg_entry_state == 3 ? + "FIN" : "Unknown", cnt); + + return 0; +} + +int hnat_entry_read(struct seq_file *m, void *private) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) + __hnat_entry_read(m, private, i); + + return 0; +} + +ssize_t hnat_entry_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[32]; + char *p_buf; + u32 len = count; + long arg0 = 0, arg1 = 0; + char *p_token = NULL; + char *p_delimiter = " \t"; + int ret; + + if (len >= sizeof(buf)) { + pr_info("input handling fail!\n"); + return -1; + } + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + p_buf = buf; + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg0 = 0; + else + ret = kstrtol(p_token, 10, &arg0); + + switch (arg0) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg1 = 0; + else + ret = kstrtol(p_token, 10, &arg1); + break; + default: + pr_info("no handler defined for command id(0x%08lx)\n\r", arg0); + arg0 = 0; + arg1 = 0; + break; + } + + (*entry_set_func[arg0])(arg1); + + return len; +} + +static int hnat_entry_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_entry_read, file->private_data); +} + +static const struct file_operations hnat_entry_fops = { + .open = hnat_entry_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_entry_write, + .release = single_release, +}; + +int __hnat_setting_read(struct seq_file *m, void *private, u32 ppe_id) +{ + struct mtk_hnat *h = hnat_priv; + int i; + int cr_max; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + cr_max = 319 * 4; + for (i = 0; i < cr_max; i = i + 0x10) { + pr_info("0x%p : 0x%08x 0x%08x 0x%08x 0x%08x\n", + (void *)h->foe_table_dev[ppe_id] + i, + readl(h->ppe_base[ppe_id] + i), + readl(h->ppe_base[ppe_id] + i + 4), + readl(h->ppe_base[ppe_id] + i + 8), + readl(h->ppe_base[ppe_id] + i + 0xc)); + } + + return 0; +} + +int hnat_setting_read(struct seq_file *m, void *private) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) + __hnat_setting_read(m, private, i); + + return 0; +} + +static int hnat_setting_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_setting_read, file->private_data); +} + +ssize_t hnat_setting_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[32]; + char *p_buf; + u32 len = count; + long arg0 = 0, arg1 = 0; + char *p_token = NULL; + char *p_delimiter = " \t"; + int ret; + + if (len >= sizeof(buf)) { + pr_info("input handling fail!\n"); + return -1; + } + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + p_buf = buf; + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg0 = 0; + else + ret = kstrtol(p_token, 10, &arg0); + + switch (arg0) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + p_token = strsep(&p_buf, p_delimiter); + if (!p_token) + arg1 = 0; + else + ret = kstrtol(p_token, 10, &arg1); + break; + default: + pr_info("no handler defined for command id(0x%08lx)\n\r", arg0); + arg0 = 0; + arg1 = 0; + break; + } + + (*cr_set_func[arg0])(arg1); + + return len; +} + +static const struct file_operations hnat_setting_fops = { + .open = hnat_setting_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_setting_write, + .release = single_release, +}; + +int __mcast_table_dump(struct seq_file *m, void *private, u32 ppe_id) +{ + struct mtk_hnat *h = hnat_priv; + struct ppe_mcast_h mcast_h; + struct ppe_mcast_l mcast_l; + u8 i, max; + void __iomem *reg; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + if (!h->pmcast) + return 0; + + max = h->pmcast->max_entry; + pr_info("============================\n"); + pr_info("PPE_ID = %d\n", ppe_id); + pr_info("MAC | VID | PortMask | QosPortMask\n"); + for (i = 0; i < max; i++) { + if (i < 0x10) { + reg = h->ppe_base[ppe_id] + PPE_MCAST_H_0 + i * 8; + mcast_h.u.value = readl(reg); + reg = h->ppe_base[ppe_id] + PPE_MCAST_L_0 + i * 8; + mcast_l.addr = readl(reg); + } else { + reg = h->fe_base + PPE_MCAST_H_10 + (i - 0x10) * 8; + mcast_h.u.value = readl(reg); + reg = h->fe_base + PPE_MCAST_L_10 + (i - 0x10) * 8; + mcast_l.addr = readl(reg); + } + pr_info("%08x %d %c%c%c%c %c%c%c%c (QID=%d, mc_mpre_sel=%d)\n", + mcast_l.addr, + mcast_h.u.info.mc_vid, + (mcast_h.u.info.mc_px_en & 0x08) ? '1' : '-', + (mcast_h.u.info.mc_px_en & 0x04) ? '1' : '-', + (mcast_h.u.info.mc_px_en & 0x02) ? '1' : '-', + (mcast_h.u.info.mc_px_en & 0x01) ? '1' : '-', + (mcast_h.u.info.mc_px_qos_en & 0x08) ? '1' : '-', + (mcast_h.u.info.mc_px_qos_en & 0x04) ? '1' : '-', + (mcast_h.u.info.mc_px_qos_en & 0x02) ? '1' : '-', + (mcast_h.u.info.mc_px_qos_en & 0x01) ? '1' : '-', + mcast_h.u.info.mc_qos_qid + + ((mcast_h.u.info.mc_qos_qid54) << 4), + mcast_h.u.info.mc_mpre_sel); + } + + return 0; +} + +int mcast_table_dump(struct seq_file *m, void *private) +{ + int i; + + for (i = 0; i < CFG_PPE_NUM; i++) + __mcast_table_dump(m, private, i); + + return 0; +} + +static int mcast_table_open(struct inode *inode, struct file *file) +{ + return single_open(file, mcast_table_dump, file->private_data); +} + +static const struct file_operations hnat_mcast_fops = { + .open = mcast_table_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int hnat_ext_show(struct seq_file *m, void *private) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (ext_entry->dev) + seq_printf(m, "ext devices [%d] = %s (dev=%p, ifindex=%d)\n", + i, ext_entry->name, ext_entry->dev, + ext_entry->dev->ifindex); + } + + return 0; +} + +static int hnat_ext_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_ext_show, file->private_data); +} + +static const struct file_operations hnat_ext_fops = { + .open = hnat_ext_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t hnat_sched_show(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + long id = (long)file->private_data; + struct mtk_hnat *h = hnat_priv; + u32 qdma_tx_sch; + int enable; + int scheduling; + int max_rate; + char *buf; + unsigned int len = 0, buf_len = 1500; + ssize_t ret_cnt; + int scheduler, i; + u32 sch_reg; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (hnat_priv->data->num_of_sch == 4) + qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id)); + else + qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE); + + if (id & 0x1) + qdma_tx_sch >>= 16; + qdma_tx_sch &= 0xffff; + enable = !!(qdma_tx_sch & BIT(11)); + scheduling = !!(qdma_tx_sch & BIT(15)); + max_rate = ((qdma_tx_sch >> 4) & 0x7f); + qdma_tx_sch &= 0xf; + while (qdma_tx_sch--) + max_rate *= 10; + + len += scnprintf(buf + len, buf_len - len, + "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable, + (scheduling == 1) ? "WRR" : "SP", max_rate); + + for (i = 0; i < MTK_QDMA_TX_NUM; i++) { + cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, + (i / NUM_OF_Q_PER_PAGE)); + sch_reg = readl(h->fe_base + QTX_SCH(i % NUM_OF_Q_PER_PAGE)); + if (hnat_priv->data->num_of_sch == 4) + scheduler = (sch_reg >> 30) & 0x3; + else + scheduler = !!(sch_reg & BIT(31)); + if (id == scheduler) + len += scnprintf(buf + len, buf_len - len, "%d ", i); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static ssize_t hnat_sched_write(struct file *file, const char __user *buf, + size_t length, loff_t *offset) +{ + long id = (long)file->private_data; + struct mtk_hnat *h = hnat_priv; + char line[64] = {0}; + int enable, rate, exp = 0, shift = 0; + char scheduling[32]; + size_t size; + u32 qdma_tx_sch; + u32 val = 0; + + if (length >= sizeof(line)) + return -EINVAL; + + if (copy_from_user(line, buf, length)) + return -EFAULT; + + if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3) + return -EFAULT; + + while (rate > 127) { + rate /= 10; + exp++; + } + + line[length] = '\0'; + + if (enable) + val |= BIT(11); + if (strcmp(scheduling, "sp") != 0) + val |= BIT(15); + val |= (rate & 0x7f) << 4; + val |= exp & 0xf; + if (id & 0x1) + shift = 16; + + if (hnat_priv->data->num_of_sch == 4) + qdma_tx_sch = readl(h->fe_base + QDMA_TX_4SCH_BASE(id)); + else + qdma_tx_sch = readl(h->fe_base + QDMA_TX_2SCH_BASE); + + qdma_tx_sch &= ~(0xffff << shift); + qdma_tx_sch |= val << shift; + if (hnat_priv->data->num_of_sch == 4) + writel(qdma_tx_sch, h->fe_base + QDMA_TX_4SCH_BASE(id)); + else + writel(qdma_tx_sch, h->fe_base + QDMA_TX_2SCH_BASE); + + size = strlen(line); + *offset += size; + + return length; +} + +static const struct file_operations hnat_sched_fops = { + .open = simple_open, + .read = hnat_sched_show, + .write = hnat_sched_write, + .llseek = default_llseek, +}; + +static ssize_t hnat_queue_show(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct mtk_hnat *h = hnat_priv; + long id = (long)file->private_data; + u32 qtx_sch; + u32 qtx_cfg; + int scheduler; + int min_rate_en; + int min_rate; + int min_rate_exp; + int max_rate_en; + int max_weight; + int max_rate; + int max_rate_exp; + char *buf; + unsigned int len = 0, buf_len = 1500; + ssize_t ret_cnt; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE)); + qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); + if (hnat_priv->data->num_of_sch == 4) + scheduler = (qtx_sch >> 30) & 0x3; + else + scheduler = !!(qtx_sch & BIT(31)); + min_rate_en = !!(qtx_sch & BIT(27)); + min_rate = (qtx_sch >> 20) & 0x7f; + min_rate_exp = (qtx_sch >> 16) & 0xf; + max_rate_en = !!(qtx_sch & BIT(11)); + max_weight = (qtx_sch >> 12) & 0xf; + max_rate = (qtx_sch >> 4) & 0x7f; + max_rate_exp = qtx_sch & 0xf; + while (min_rate_exp--) + min_rate *= 10; + + while (max_rate_exp--) + max_rate *= 10; + + len += scnprintf(buf + len, buf_len - len, + "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler, + (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff); + + if (hnat_priv->data->version != MTK_HNAT_V1) { + /* Switch to debug mode */ + cr_set_field(h->fe_base + QTX_MIB_IF, MIB_ON_QTX_CFG, 1); + cr_set_field(h->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 1); + qtx_cfg = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + qtx_sch = readl(h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); + len += scnprintf(buf + len, buf_len - len, + "packet count: %u\n", qtx_cfg); + len += scnprintf(buf + len, buf_len - len, + "packet drop: %u\n\n", qtx_sch); + + /* Recover to normal mode */ + cr_set_field(hnat_priv->fe_base + QTX_MIB_IF, + MIB_ON_QTX_CFG, 0); + cr_set_field(hnat_priv->fe_base + QTX_MIB_IF, VQTX_MIB_EN, 0); + } + + len += scnprintf(buf + len, buf_len - len, + " EN RATE WEIGHT\n"); + len += scnprintf(buf + len, buf_len - len, + "----------------------------\n"); + len += scnprintf(buf + len, buf_len - len, + "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight); + len += scnprintf(buf + len, buf_len - len, + "min%5d%9d -\n", min_rate_en, min_rate); + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static ssize_t hnat_queue_write(struct file *file, const char __user *buf, + size_t length, loff_t *offset) +{ + long id = (long)file->private_data; + struct mtk_hnat *h = hnat_priv; + char line[64] = {0}; + int max_enable, max_rate, max_exp = 0; + int min_enable, min_rate, min_exp = 0; + int weight; + int resv; + int scheduler; + size_t size; + u32 qtx_sch = 0; + + cr_set_field(h->fe_base + QDMA_PAGE, QTX_CFG_PAGE, (id / NUM_OF_Q_PER_PAGE)); + if (length >= sizeof(line)) + return -EINVAL; + + if (copy_from_user(line, buf, length)) + return -EFAULT; + + if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate, + &max_enable, &max_rate, &weight, &resv) != 7) + return -EFAULT; + + line[length] = '\0'; + + while (max_rate > 127) { + max_rate /= 10; + max_exp++; + } + + while (min_rate > 127) { + min_rate /= 10; + min_exp++; + } + + if (hnat_priv->data->num_of_sch == 4) + qtx_sch |= (scheduler & 0x3) << 30; + else + qtx_sch |= (scheduler & 0x1) << 31; + if (min_enable) + qtx_sch |= BIT(27); + qtx_sch |= (min_rate & 0x7f) << 20; + qtx_sch |= (min_exp & 0xf) << 16; + if (max_enable) + qtx_sch |= BIT(11); + qtx_sch |= (weight & 0xf) << 12; + qtx_sch |= (max_rate & 0x7f) << 4; + qtx_sch |= max_exp & 0xf; + writel(qtx_sch, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); + + resv &= 0xff; + qtx_sch = readl(h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + qtx_sch &= 0xffff0000; + qtx_sch |= (resv << 8) | resv; + writel(qtx_sch, h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + + size = strlen(line); + *offset += size; + + return length; +} + +static const struct file_operations hnat_queue_fops = { + .open = simple_open, + .read = hnat_queue_show, + .write = hnat_queue_write, + .llseek = default_llseek, +}; + +static ssize_t hnat_ppd_if_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[IFNAMSIZ]; + struct net_device *dev; + char *p, *tmp; + + if (count >= IFNAMSIZ) + return -EFAULT; + + memset(buf, 0, IFNAMSIZ); + if (copy_from_user(buf, buffer, count)) + return -EFAULT; + + tmp = buf; + p = strsep(&tmp, "\n\r "); + dev = dev_get_by_name(&init_net, p); + + if (dev) { + if (hnat_priv->g_ppdev) + dev_put(hnat_priv->g_ppdev); + hnat_priv->g_ppdev = dev; + + strncpy(hnat_priv->ppd, p, IFNAMSIZ - 1); + pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd); + } else { + pr_info("no such device!\n"); + } + + return count; +} + +static int hnat_ppd_if_read(struct seq_file *m, void *private) +{ + pr_info("hnat_priv ppd = %s\n", hnat_priv->ppd); + + if (hnat_priv->g_ppdev) { + pr_info("hnat_priv g_ppdev name = %s\n", + hnat_priv->g_ppdev->name); + } else { + pr_info("hnat_priv g_ppdev is null!\n"); + } + + return 0; +} + +static int hnat_ppd_if_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_ppd_if_read, file->private_data); +} + +static const struct file_operations hnat_ppd_if_fops = { + .open = hnat_ppd_if_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_ppd_if_write, + .release = single_release, +}; + +static int hnat_mape_toggle_read(struct seq_file *m, void *private) +{ + pr_info("value=%d, %s is enabled now!\n", mape_toggle, (mape_toggle) ? "mape" : "ds-lite"); + + return 0; +} + +static int hnat_mape_toggle_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_mape_toggle_read, file->private_data); +} + +static ssize_t hnat_mape_toggle_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf = 0; + int i; + u32 ppe_cfg; + + if ((count < 1) || copy_from_user(&buf, buffer, sizeof(buf))) + return -EFAULT; + + if (buf == '1') { + pr_info("mape is going to be enabled, ds-lite is going to be disabled !\n"); + mape_toggle = 1; + } else if (buf == '0') { + pr_info("ds-lite is going to be enabled, mape is going to be disabled !\n"); + mape_toggle = 0; + } else { + pr_info("Invalid parameter.\n"); + return -EFAULT; + } + + for (i = 0; i < CFG_PPE_NUM; i++) { + ppe_cfg = readl(hnat_priv->ppe_base[i] + PPE_FLOW_CFG); + + if (mape_toggle) + ppe_cfg &= ~BIT_IPV4_DSL_EN; + else + ppe_cfg |= BIT_IPV4_DSL_EN; + + writel(ppe_cfg, hnat_priv->ppe_base[i] + PPE_FLOW_CFG); + } + + return count; +} + +static const struct file_operations hnat_mape_toggle_fops = { + .open = hnat_mape_toggle_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_mape_toggle_write, + .release = single_release, +}; + +static int hnat_hook_toggle_read(struct seq_file *m, void *private) +{ + pr_info("value=%d, hook is %s now!\n", hook_toggle, (hook_toggle) ? "enabled" : "disabled"); + + return 0; +} + +static int hnat_hook_toggle_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_hook_toggle_read, file->private_data); +} + +static ssize_t hnat_hook_toggle_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[8] = {0}; + int len = count; + u32 id; + + if ((len > 8) || copy_from_user(buf, buffer, len)) + return -EFAULT; + + if (buf[0] == '1' && !hook_toggle) { + pr_info("hook is going to be enabled !\n"); + hnat_enable_hook(); + + if (IS_PPPQ_MODE) { + for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) + hnat_qos_shaper_ebl(id, 1); + } + } else if (buf[0] == '0' && hook_toggle) { + pr_info("hook is going to be disabled !\n"); + hnat_disable_hook(); + + if (IS_PPPQ_MODE) { + for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) + hnat_qos_shaper_ebl(id, 0); + } + } + + return len; +} + +static const struct file_operations hnat_hook_toggle_fops = { + .open = hnat_hook_toggle_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_hook_toggle_write, + .release = single_release, +}; + +static int hnat_qos_toggle_read(struct seq_file *m, void *private) +{ + pr_info("value=%d, HQoS is %s now!\n", qos_toggle, (qos_toggle) ? "enabled" : "disabled"); + + return 0; +} + +static int hnat_qos_toggle_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_qos_toggle_read, file->private_data); +} + +void hnat_qos_shaper_ebl(u32 id, u32 enable) +{ + struct mtk_hnat *h = hnat_priv; + u32 cfg; + + if (enable) { + cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN; + cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) | + (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) | + (25 << QTX_SCH_MAX_RATE_MAN_OFFSET) | + (5 << QTX_SCH_MAX_RATE_EXP_OFFSET) | + (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET); + + writel(cfg, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); + } else { + writel(0, h->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); + } +} + +static void hnat_qos_disable(void) +{ + struct mtk_hnat *h = hnat_priv; + u32 id, cfg; + + for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) { + hnat_qos_shaper_ebl(id, 0); + writel((4 << QTX_CFG_HW_RESV_CNT_OFFSET) | + (4 << QTX_CFG_SW_RESV_CNT_OFFSET), + h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + } + + cfg = (QDMA_TX_SCH_WFQ_EN) | (QDMA_TX_SCH_WFQ_EN << 16); + for (id = 0; id < h->data->num_of_sch; id += 2) { + if (h->data->num_of_sch == 4) + writel(cfg, h->fe_base + QDMA_TX_4SCH_BASE(id)); + else + writel(cfg, h->fe_base + QDMA_TX_2SCH_BASE); + } +} + +static void hnat_qos_pppq_enable(void) +{ + struct mtk_hnat *h = hnat_priv; + u32 id, cfg; + + for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) { + if (hook_toggle) + hnat_qos_shaper_ebl(id, 1); + else + hnat_qos_shaper_ebl(id, 0); + + writel((4 << QTX_CFG_HW_RESV_CNT_OFFSET) | + (4 << QTX_CFG_SW_RESV_CNT_OFFSET), + h->fe_base + QTX_CFG(id % NUM_OF_Q_PER_PAGE)); + } + + cfg = (QDMA_TX_SCH_WFQ_EN) | (QDMA_TX_SCH_WFQ_EN << 16); + for (id = 0; id < h->data->num_of_sch; id+= 2) { + if (h->data->num_of_sch == 4) + writel(cfg, h->fe_base + QDMA_TX_4SCH_BASE(id)); + else + writel(cfg, h->fe_base + QDMA_TX_2SCH_BASE); + } +} + +static ssize_t hnat_qos_toggle_write(struct file *file, const char __user *buffer, + size_t count, loff_t *data) +{ + char buf[8]; + int len = count; + + if ((len > 8) || copy_from_user(buf, buffer, len)) + return -EFAULT; + + if (buf[0] == '0') { + pr_info("HQoS is going to be disabled !\n"); + qos_toggle = 0; + hnat_qos_disable(); + } else if (buf[0] == '1') { + pr_info("HQoS mode is going to be enabled !\n"); + qos_toggle = 1; + } else if (buf[0] == '2') { + pr_info("Per-port-per-queue mode is going to be enabled !\n"); + qos_toggle = 2; + hnat_qos_pppq_enable(); + } + + return len; +} + +static const struct file_operations hnat_qos_toggle_fops = { + .open = hnat_qos_toggle_open, + .read = seq_read, + .llseek = seq_lseek, + .write = hnat_qos_toggle_write, + .release = single_release, +}; + +static int hnat_version_read(struct seq_file *m, void *private) +{ + pr_info("HNAT SW version : %s\nHNAT HW version : %d\n", HNAT_SW_VER, hnat_priv->data->version); + + return 0; +} + +static int hnat_version_open(struct inode *inode, struct file *file) +{ + return single_open(file, hnat_version_read, file->private_data); +} + +static const struct file_operations hnat_version_fops = { + .open = hnat_version_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int get_ppe_mib(u32 ppe_id, int index, u64 *pkt_cnt, u64 *byte_cnt) +{ + struct mtk_hnat *h = hnat_priv; + struct hnat_accounting *acct; + struct foe_entry *entry; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + if (index < 0 || index >= h->foe_etry_num) { + pr_info("Invalid entry index\n"); + return -EINVAL; + } + + acct = hnat_get_count(h, ppe_id, index, NULL); + entry = hnat_priv->foe_table_cpu[ppe_id] + index; + + if (!acct) + return -1; + + if (entry->bfib1.state != BIND) + return -1; + + *pkt_cnt = acct->packets; + *byte_cnt = acct->bytes; + + return 0; +} +EXPORT_SYMBOL(get_ppe_mib); + +int is_entry_binding(u32 ppe_id, int index) +{ + struct mtk_hnat *h = hnat_priv; + struct foe_entry *entry; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + if (index < 0 || index >= h->foe_etry_num) { + pr_info("Invalid entry index\n"); + return -EINVAL; + } + + entry = hnat_priv->foe_table_cpu[ppe_id] + index; + + return entry->bfib1.state == BIND; +} +EXPORT_SYMBOL(is_entry_binding); + +#define dump_register(nm) \ + { \ + .name = __stringify(nm), .offset = PPE_##nm, \ + } + +static const struct debugfs_reg32 hnat_regs[] = { + dump_register(GLO_CFG), dump_register(FLOW_CFG), + dump_register(IP_PROT_CHK), dump_register(IP_PROT_0), + dump_register(IP_PROT_1), dump_register(IP_PROT_2), + dump_register(IP_PROT_3), dump_register(TB_CFG), + dump_register(TB_BASE), dump_register(TB_USED), + dump_register(BNDR), dump_register(BIND_LMT_0), + dump_register(BIND_LMT_1), dump_register(KA), + dump_register(UNB_AGE), dump_register(BND_AGE_0), + dump_register(BND_AGE_1), dump_register(HASH_SEED), + dump_register(DFT_CPORT), dump_register(MCAST_PPSE), + dump_register(MCAST_L_0), dump_register(MCAST_H_0), + dump_register(MCAST_L_1), dump_register(MCAST_H_1), + dump_register(MCAST_L_2), dump_register(MCAST_H_2), + dump_register(MCAST_L_3), dump_register(MCAST_H_3), + dump_register(MCAST_L_4), dump_register(MCAST_H_4), + dump_register(MCAST_L_5), dump_register(MCAST_H_5), + dump_register(MCAST_L_6), dump_register(MCAST_H_6), + dump_register(MCAST_L_7), dump_register(MCAST_H_7), + dump_register(MCAST_L_8), dump_register(MCAST_H_8), + dump_register(MCAST_L_9), dump_register(MCAST_H_9), + dump_register(MCAST_L_A), dump_register(MCAST_H_A), + dump_register(MCAST_L_B), dump_register(MCAST_H_B), + dump_register(MCAST_L_C), dump_register(MCAST_H_C), + dump_register(MCAST_L_D), dump_register(MCAST_H_D), + dump_register(MCAST_L_E), dump_register(MCAST_H_E), + dump_register(MCAST_L_F), dump_register(MCAST_H_F), + dump_register(MTU_DRP), dump_register(MTU_VLYR_0), + dump_register(MTU_VLYR_1), dump_register(MTU_VLYR_2), + dump_register(VPM_TPID), dump_register(VPM_TPID), + dump_register(CAH_CTRL), dump_register(CAH_TAG_SRH), + dump_register(CAH_LINE_RW), dump_register(CAH_WDATA), + dump_register(CAH_RDATA), +}; + +int hnat_init_debugfs(struct mtk_hnat *h) +{ + int ret = 0; + struct dentry *root; + struct dentry *file; + long i; + char name[16]; + + root = debugfs_create_dir("hnat", NULL); + if (!root) { + dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__); + ret = -ENOMEM; + goto err0; + } + h->root = root; + + for (i = 0; i < CFG_PPE_NUM; i++) { + h->regset[i] = kzalloc(sizeof(*h->regset[i]), GFP_KERNEL); + if (!h->regset[i]) { + dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__); + ret = -ENOMEM; + goto err1; + } + h->regset[i]->regs = hnat_regs; + h->regset[i]->nregs = ARRAY_SIZE(hnat_regs); + h->regset[i]->base = h->ppe_base[i]; + + snprintf(name, sizeof(name), "regdump%ld", i); + file = debugfs_create_regset32(name, S_IRUGO, + root, h->regset[i]); + if (!file) { + dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__); + ret = -ENOMEM; + goto err1; + } + } + + debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops); + debugfs_create_file("external_interface", S_IRUGO, root, h, + &hnat_ext_fops); + debugfs_create_file("whnat_interface", S_IRUGO, root, h, + &hnat_whnat_fops); + debugfs_create_file("cpu_reason", S_IFREG | S_IRUGO, root, h, + &cpu_reason_fops); + debugfs_create_file("hnat_entry", S_IRUGO | S_IRUGO, root, h, + &hnat_entry_fops); + debugfs_create_file("hnat_setting", S_IRUGO | S_IRUGO, root, h, + &hnat_setting_fops); + debugfs_create_file("mcast_table", S_IRUGO | S_IRUGO, root, h, + &hnat_mcast_fops); + debugfs_create_file("hook_toggle", S_IRUGO | S_IRUGO, root, h, + &hnat_hook_toggle_fops); + debugfs_create_file("mape_toggle", S_IRUGO | S_IRUGO, root, h, + &hnat_mape_toggle_fops); + debugfs_create_file("qos_toggle", S_IRUGO | S_IRUGO, root, h, + &hnat_qos_toggle_fops); + debugfs_create_file("hnat_version", S_IRUGO | S_IRUGO, root, h, + &hnat_version_fops); + debugfs_create_file("hnat_ppd_if", S_IRUGO | S_IRUGO, root, h, + &hnat_ppd_if_fops); + + for (i = 0; i < hnat_priv->data->num_of_sch; i++) { + snprintf(name, sizeof(name), "qdma_sch%ld", i); + debugfs_create_file(name, S_IRUGO, root, (void *)i, + &hnat_sched_fops); + } + + for (i = 0; i < MTK_QDMA_TX_NUM; i++) { + snprintf(name, sizeof(name), "qdma_txq%ld", i); + debugfs_create_file(name, S_IRUGO, root, (void *)i, + &hnat_queue_fops); + } + + return 0; + +err1: + debugfs_remove_recursive(root); +err0: + return ret; +} + +void hnat_deinit_debugfs(struct mtk_hnat *h) +{ + int i; + + debugfs_remove_recursive(h->root); + h->root = NULL; + + for (i = 0; i < CFG_PPE_NUM; i++) + kfree(h->regset[i]); +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.c new file mode 100644 index 000000000..2011b6c5a --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.c @@ -0,0 +1,354 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Zhiqiang Yang + */ +#include +#include +#include +#include +#include "hnat.h" + +/* * + * mcast_entry_get - Returns the index of an unused entry + * or an already existed entry in mtbl + */ +static int mcast_entry_get(u16 vlan_id, u32 dst_mac) +{ + int index = -1; + u8 i; + struct ppe_mcast_group *p = hnat_priv->pmcast->mtbl; + u8 max = hnat_priv->pmcast->max_entry; + + for (i = 0; i < max; i++) { + if ((index == -1) && (!p->valid)) { + index = i; /*get the first unused entry index*/ + continue; + } + if ((p->vid == vlan_id) && (p->mac_hi == dst_mac)) { + index = i; + break; + } + p++; + } + if (index == -1) + pr_info("%s:group table is full\n", __func__); + + return index; +} + +static void get_mac_from_mdb_entry(struct br_mdb_entry *entry, + u32 *mac_hi, u16 *mac_lo) +{ + switch (ntohs(entry->addr.proto)) { + case ETH_P_IP: + *mac_lo = 0x0100; + *mac_hi = swab32((entry->addr.u.ip4 & 0xfffffe00) + 0x5e); + break; + case ETH_P_IPV6: + *mac_lo = 0x3333; + *mac_hi = swab32(entry->addr.u.ip6.s6_addr32[3]); + break; + } + trace_printk("%s:group mac_h=0x%08x, mac_l=0x%04x\n", + __func__, *mac_hi, *mac_lo); +} + +/*set_hnat_mtbl - set ppe multicast register*/ +static int set_hnat_mtbl(struct ppe_mcast_group *group, u32 ppe_id, int index) +{ + struct ppe_mcast_h mcast_h; + struct ppe_mcast_l mcast_l; + u16 mac_lo = group->mac_lo; + u32 mac_hi = group->mac_hi; + u8 mc_port = group->mc_port; + void __iomem *reg; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + mcast_h.u.value = 0; + mcast_l.addr = 0; + if (mac_lo == 0x0100) + mcast_h.u.info.mc_mpre_sel = 0; + else if (mac_lo == 0x3333) + mcast_h.u.info.mc_mpre_sel = 1; + + mcast_h.u.info.mc_px_en = mc_port; + mcast_l.addr = mac_hi; + mcast_h.u.info.valid = group->valid; + trace_printk("%s:index=%d,group info=0x%x,addr=0x%x\n", + __func__, index, mcast_h.u.value, mcast_l.addr); + if (index < 0x10) { + reg = hnat_priv->ppe_base[ppe_id] + PPE_MCAST_H_0 + ((index) * 8); + writel(mcast_h.u.value, reg); + reg = hnat_priv->ppe_base[ppe_id] + PPE_MCAST_L_0 + ((index) * 8); + writel(mcast_l.addr, reg); + } else { + index = index - 0x10; + reg = hnat_priv->fe_base + PPE_MCAST_H_10 + ((index) * 8); + writel(mcast_h.u.value, reg); + reg = hnat_priv->fe_base + PPE_MCAST_L_10 + ((index) * 8); + writel(mcast_h.u.value, reg); + } + + return 0; +} + +/** + * hnat_mcast_table_update - + * 1.get a valid group entry + * 2.update group info + * a.update eif&oif count + * b.eif ==0 & oif == 0,delete it from group table + * c.oif != 0,set mc forward port to cpu,else do not forward to cpu + * 3.set the group info to ppe register + */ +static int hnat_mcast_table_update(int type, struct br_mdb_entry *entry) +{ + struct net_device *dev; + u32 mac_hi = 0; + u16 mac_lo = 0; + int i, index; + struct ppe_mcast_group *group; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(&init_net, entry->ifindex); + if (!dev) { + rcu_read_unlock(); + return -ENODEV; + } + rcu_read_unlock(); + + get_mac_from_mdb_entry(entry, &mac_hi, &mac_lo); + index = mcast_entry_get(entry->vid, mac_hi); + if (index == -1) + return -1; + + group = &hnat_priv->pmcast->mtbl[index]; + group->mac_hi = mac_hi; + group->mac_lo = mac_lo; + switch (type) { + case RTM_NEWMDB: + if (IS_LAN(dev) || IS_WAN(dev)) + group->eif++; + else + group->oif++; + group->vid = entry->vid; + group->valid = true; + break; + case RTM_DELMDB: + if (group->valid) { + if (IS_LAN(dev) || IS_WAN(dev)) + group->eif--; + else + group->oif--; + } + break; + } + trace_printk("%s:devname=%s,eif=%d,oif=%d\n", __func__, + dev->name, group->eif, group->oif); + if (group->valid) { + if (group->oif && group->eif) + /*eth&wifi both in group,forward to cpu&GDMA1*/ + group->mc_port = (MCAST_TO_PDMA || MCAST_TO_GDMA1); + else if (group->oif) + /*only wifi in group,forward to cpu only*/ + group->mc_port = MCAST_TO_PDMA; + else + /*only eth in group,forward to GDMA1 only*/ + group->mc_port = MCAST_TO_GDMA1; + if (!group->oif && !group->eif) + /*nobody in this group,clear the entry*/ + memset(group, 0, sizeof(struct ppe_mcast_group)); + + for (i = 0; i < CFG_PPE_NUM; i++) + set_hnat_mtbl(group, i, index); + } + + return 0; +} + +static void hnat_mcast_nlmsg_handler(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + struct nlattr *nest, *nest2, *info; + struct br_port_msg *bpm; + struct br_mdb_entry *entry; + struct ppe_mcast_table *pmcast; + struct sock *sk; + + pmcast = container_of(work, struct ppe_mcast_table, work); + sk = pmcast->msock->sk; + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + nlh = nlmsg_hdr(skb); + if (!nlmsg_ok(nlh, skb->len)) { + kfree_skb(skb); + continue; + } + bpm = nlmsg_data(nlh); + nest = nlmsg_find_attr(nlh, sizeof(bpm), MDBA_MDB); + if (!nest) { + kfree_skb(skb); + continue; + } + nest2 = nla_find_nested(nest, MDBA_MDB_ENTRY); + if (nest2) { + info = nla_find_nested(nest2, MDBA_MDB_ENTRY_INFO); + if (!info) { + kfree_skb(skb); + continue; + } + + entry = (struct br_mdb_entry *)nla_data(info); + trace_printk("%s:cmd=0x%2x,ifindex=0x%x,state=0x%x", + __func__, nlh->nlmsg_type, + entry->ifindex, entry->state); + trace_printk("vid=0x%x,ip=0x%x,proto=0x%x\n", + entry->vid, entry->addr.u.ip4, + entry->addr.proto); + hnat_mcast_table_update(nlh->nlmsg_type, entry); + } + kfree_skb(skb); + } +} + +static void hnat_mcast_nlmsg_rcv(struct sock *sk) +{ + struct ppe_mcast_table *pmcast = hnat_priv->pmcast; + struct workqueue_struct *queue = pmcast->queue; + struct work_struct *work = &pmcast->work; + + queue_work(queue, work); +} + +static struct socket *hnat_mcast_netlink_open(struct net *net) +{ + struct socket *sock = NULL; + int ret; + struct sockaddr_nl addr; + + ret = sock_create_kern(net, PF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock); + if (ret < 0) + goto out; + + sock->sk->sk_data_ready = hnat_mcast_nlmsg_rcv; + addr.nl_family = PF_NETLINK; + addr.nl_pid = 65536; /*fix me:how to get an unique id?*/ + addr.nl_groups = RTMGRP_MDB; + ret = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr)); + if (ret < 0) + goto out; + + return sock; +out: + if (sock) + sock_release(sock); + + return NULL; +} + +static void hnat_mcast_check_timestamp(struct timer_list *t) +{ + struct foe_entry *entry; + int i, hash_index; + u16 e_ts, foe_ts; + + for (i = 0; i < CFG_PPE_NUM; i++) { + for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) { + entry = hnat_priv->foe_table_cpu[i] + hash_index; + if (entry->bfib1.sta == 1) { + e_ts = (entry->ipv4_hnapt.m_timestamp) & 0xffff; + foe_ts = foe_timestamp(hnat_priv); + if ((foe_ts - e_ts) > 0x3000) + foe_ts = (~(foe_ts)) & 0xffff; + if (abs(foe_ts - e_ts) > 20) + entry_delete(i, hash_index); + } + } + } + mod_timer(&hnat_priv->hnat_mcast_check_timer, jiffies + 10 * HZ); +} + +int hnat_mcast_enable(u32 ppe_id) +{ + struct ppe_mcast_table *pmcast; + + if (ppe_id >= CFG_PPE_NUM) + return -EINVAL; + + pmcast = kzalloc(sizeof(*pmcast), GFP_KERNEL); + if (!pmcast) + return -1; + + if (hnat_priv->data->version == MTK_HNAT_V1) + pmcast->max_entry = 0x10; + else + pmcast->max_entry = MAX_MCAST_ENTRY; + + INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler); + pmcast->queue = create_singlethread_workqueue("ppe_mcast"); + if (!pmcast->queue) + goto err; + + pmcast->msock = hnat_mcast_netlink_open(&init_net); + if (!pmcast->msock) + goto err; + + hnat_priv->pmcast = pmcast; + + /* mt7629 should checkout mcast entry life time manualy */ + if (hnat_priv->data->version == MTK_HNAT_V3) { + timer_setup(&hnat_priv->hnat_mcast_check_timer, + hnat_mcast_check_timestamp, 0); + hnat_priv->hnat_mcast_check_timer.expires = jiffies; + add_timer(&hnat_priv->hnat_mcast_check_timer); + } + + /* Enable multicast table lookup */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1); + /* multicast port0 map to PDMA */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P0_PPSE, 0); + /* multicast port1 map to GMAC1 */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P1_PPSE, 1); + /* multicast port2 map to GMAC2 */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P2_PPSE, 2); + /* multicast port3 map to QDMA */ + cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P3_PPSE, 5); + + return 0; +err: + if (pmcast->queue) + destroy_workqueue(pmcast->queue); + if (pmcast->msock) + sock_release(pmcast->msock); + kfree(pmcast); + + return -1; +} + +int hnat_mcast_disable(void) +{ + struct ppe_mcast_table *pmcast = hnat_priv->pmcast; + + if (!pmcast) + return -EINVAL; + + if (hnat_priv->data->version == MTK_HNAT_V3) + del_timer_sync(&hnat_priv->hnat_mcast_check_timer); + + flush_work(&pmcast->work); + destroy_workqueue(pmcast->queue); + sock_release(pmcast->msock); + kfree(pmcast); + + return 0; +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.h new file mode 100644 index 000000000..ad5b5d1e4 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_mcast.h @@ -0,0 +1,69 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Zhiqiang Yang + */ + +#ifndef NF_HNAT_MCAST_H +#define NF_HNAT_MCAST_H + +#define RTMGRP_IPV4_MROUTE 0x20 +#define RTMGRP_MDB 0x2000000 + +#define MAX_MCAST_ENTRY 64 + +#define MCAST_TO_PDMA (0x1 << 0) +#define MCAST_TO_GDMA1 (0x1 << 1) +#define MCAST_TO_GDMA2 (0x1 << 2) + +struct ppe_mcast_group { + u32 mac_hi; /*multicast mac addr*/ + u16 mac_lo; /*multicast mac addr*/ + u16 vid; + u8 mc_port; /*1:forward to cpu,2:forward to GDMA1,4:forward to GDMA2*/ + u8 eif; /*num of eth if added to multi group. */ + u8 oif; /* num of other if added to multi group ,ex wifi.*/ + bool valid; +}; + +struct ppe_mcast_table { + struct workqueue_struct *queue; + struct work_struct work; + struct socket *msock; + struct ppe_mcast_group mtbl[MAX_MCAST_ENTRY]; + u8 max_entry; +}; + +struct ppe_mcast_h { + union { + u32 value; + struct { + u32 mc_vid:12; + u32 mc_qos_qid54:2; /* mt7622 only */ + u32 valid:1; + u32 rev1:1; + /*0:forward to cpu,1:forward to GDMA1*/ + u32 mc_px_en:4; + u32 mc_mpre_sel:2; /* 0=01:00, 2=33:33 */ + u32 mc_vid_cmp:1; + u32 rev2:1; + u32 mc_px_qos_en:4; + u32 mc_qos_qid:4; + } info; + } u; +}; + +struct ppe_mcast_l { + u32 addr; +}; + +int hnat_mcast_enable(u32 ppe_id); +int hnat_mcast_disable(void); + +#endif diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_nf_hook.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_nf_hook.c new file mode 100644 index 000000000..13409aa33 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_nf_hook.c @@ -0,0 +1,2379 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Sean Wang + * Copyright (C) 2016-2017 John Crispin + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nf_hnat_mtk.h" +#include "hnat.h" + +#include "../mtk_eth_soc.h" +#include "../mtk_eth_reset.h" + +#define do_ge2ext_fast(dev, skb) \ + ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \ + skb_hnat_is_hashed(skb) && \ + skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) +#define do_ext2ge_fast_learn(dev, skb) \ + (IS_PPD(dev) && \ + (skb_hnat_sport(skb) == NR_PDMA_PORT || \ + skb_hnat_sport(skb) == NR_QDMA_PORT) && \ + ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \ + get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK))) +#define do_mape_w2l_fast(dev, skb) \ + (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb))) + +static struct ipv6hdr mape_l2w_v6h; +static struct ipv6hdr mape_w2l_v6h; +static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev) +{ + int i; + + for (i = 1; i < MAX_IF_NUM; i++) { + if (hnat_priv->wifi_hook_if[i] == dev) + return i; + } + + return 0; +} + +static inline int get_ext_device_number(void) +{ + int i, number = 0; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) + number += 1; + return number; +} + +static inline int find_extif_from_devname(const char *name) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (!strcmp(name, ext_entry->name)) + return 1; + } + return 0; +} + +static inline int get_index_from_dev(const struct net_device *dev) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (dev == ext_entry->dev) + return ext_entry->dev->ifindex; + } + return 0; +} + +static inline struct net_device *get_dev_from_index(int index) +{ + int i; + struct extdev_entry *ext_entry; + struct net_device *dev = 0; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (ext_entry->dev && index == ext_entry->dev->ifindex) { + dev = ext_entry->dev; + break; + } + } + return dev; +} + +static inline struct net_device *get_wandev_from_index(int index) +{ + if (!hnat_priv->g_wandev) + hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan); + + if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index) + return hnat_priv->g_wandev; + return NULL; +} + +static inline int extif_set_dev(struct net_device *dev) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) { + dev_hold(dev); + ext_entry->dev = dev; + pr_info("%s(%s)\n", __func__, dev->name); + + return ext_entry->dev->ifindex; + } + } + + return -1; +} + +static inline int extif_put_dev(struct net_device *dev) +{ + int i; + struct extdev_entry *ext_entry; + + for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) { + ext_entry = hnat_priv->ext_if[i]; + if (ext_entry->dev == dev) { + ext_entry->dev = NULL; + dev_put(dev); + pr_info("%s(%s)\n", __func__, dev->name); + + return 0; + } + } + + return -1; +} + +int ext_if_add(struct extdev_entry *ext_entry) +{ + int len = get_ext_device_number(); + + if (len < MAX_EXT_DEVS) + hnat_priv->ext_if[len++] = ext_entry; + + return len; +} + +int ext_if_del(struct extdev_entry *ext_entry) +{ + int i, j; + + for (i = 0; i < MAX_EXT_DEVS; i++) { + if (hnat_priv->ext_if[i] == ext_entry) { + for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++) + hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1]; + hnat_priv->ext_if[j] = NULL; + break; + } + } + + return i; +} + +void foe_clear_all_bind_entries(struct net_device *dev) +{ + int i, hash_index; + struct foe_entry *entry; + + if (!IS_LAN(dev) && !IS_WAN(dev) && + !find_extif_from_devname(dev->name) && + !dev->netdev_ops->ndo_flow_offload_check) + return; + + for (i = 0; i < CFG_PPE_NUM; i++) { + cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG, + SMA, SMA_ONLY_FWD_CPU); + + for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) { + entry = hnat_priv->foe_table_cpu[i] + hash_index; + if (entry->bfib1.state == BIND) { + entry->ipv4_hnapt.udib1.state = INVALID; + entry->ipv4_hnapt.udib1.time_stamp = + readl((hnat_priv->fe_base + 0x0010)) & 0xFF; + } + } + } + + /* clear HWNAT cache */ + hnat_cache_ebl(1); + + mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ); +} + +static void gmac_ppe_fwd_enable(struct net_device *dev) +{ + if (IS_LAN(dev) || IS_GMAC1_MODE) + set_gmac_ppe_fwd(0, 1); + else if (IS_WAN(dev)) + set_gmac_ppe_fwd(1, 1); +} + +int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event, + void *ptr) +{ + struct net_device *dev; + + dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UP: + gmac_ppe_fwd_enable(dev); + + extif_set_dev(dev); + + break; + case NETDEV_GOING_DOWN: + if (!get_wifi_hook_if_index_from_dev(dev)) + extif_put_dev(dev); + + foe_clear_all_bind_entries(dev); + + break; + case NETDEV_UNREGISTER: + if (hnat_priv->g_ppdev == dev) { + hnat_priv->g_ppdev = NULL; + dev_put(dev); + } + if (hnat_priv->g_wandev == dev) { + hnat_priv->g_wandev = NULL; + dev_put(dev); + } + + break; + case NETDEV_REGISTER: + if (IS_PPD(dev) && !hnat_priv->g_ppdev) + hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd); + if (IS_WAN(dev) && !hnat_priv->g_wandev) + hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan); + + break; + case MTK_FE_RESET_NAT_DONE: + pr_info("[%s] HNAT driver starts to do warm init !\n", __func__); + hnat_warm_init(); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +void foe_clear_entry(struct neighbour *neigh) +{ + u32 *daddr = (u32 *)neigh->primary_key; + unsigned char h_dest[ETH_ALEN]; + struct foe_entry *entry; + int i, hash_index; + u32 dip; + + dip = (u32)(*daddr); + + for (i = 0; i < CFG_PPE_NUM; i++) { + if (!hnat_priv->foe_table_cpu[i]) + continue; + + for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) { + entry = hnat_priv->foe_table_cpu[i] + hash_index; + if (entry->bfib1.state == BIND && + entry->ipv4_hnapt.new_dip == ntohl(dip)) { + *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi); + *((u16 *)&h_dest[4]) = + swab16(entry->ipv4_hnapt.dmac_lo); + if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) { + pr_info("%s: state=%d\n", __func__, + neigh->nud_state); + cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG, + SMA, SMA_ONLY_FWD_CPU); + + entry->ipv4_hnapt.udib1.state = INVALID; + entry->ipv4_hnapt.udib1.time_stamp = + readl((hnat_priv->fe_base + 0x0010)) & 0xFF; + + /* clear HWNAT cache */ + hnat_cache_ebl(1); + + mod_timer(&hnat_priv->hnat_sma_build_entry_timer, + jiffies + 3 * HZ); + + pr_info("Delete old entry: dip =%pI4\n", &dip); + pr_info("Old mac= %pM\n", h_dest); + pr_info("New mac= %pM\n", neigh->ha); + } + } + } + } +} + +int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event, + void *ptr) +{ + struct net_device *dev = NULL; + struct neighbour *neigh = NULL; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + neigh = ptr; + dev = neigh->dev; + if (dev) + foe_clear_entry(neigh); + break; + } + + return NOTIFY_DONE; +} + +unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h) +{ + struct ethhdr *eth = NULL; + struct ipv6hdr *ip6h = NULL; + struct iphdr *iph = NULL; + + if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) || + (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { + return -1; + } + + /* point to L3 */ + memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN); + memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN); + + eth = (struct ethhdr *)(skb->data - ETH_HLEN); + eth->h_proto = htons(ETH_P_IPV6); + skb->protocol = htons(ETH_P_IPV6); + + iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN); + ip6h = (struct ipv6hdr *)(skb->data); + ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */ + + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN); + return 0; +} + +static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev, + struct ethhdr *eth) +{ + skb->pkt_type = PACKET_HOST; + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + } +} + +unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in, + const char *func) +{ + if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) { + u16 vlan_id = 0; + skb_set_network_header(skb, 0); + skb_push(skb, ETH_HLEN); + set_to_ppe(skb); + + vlan_id = skb_vlan_tag_get_id(skb); + if (vlan_id) { + skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci); + if (!skb) + return -1; + } + + /*set where we come from*/ + skb->vlan_proto = htons(ETH_P_8021Q); + skb->vlan_tci = + (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK)); + trace_printk( + "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n", + __func__, ntohs(skb->vlan_proto), skb->vlan_tci, + in->name, hnat_priv->g_ppdev->name); + skb->dev = hnat_priv->g_ppdev; + dev_queue_xmit(skb); + trace_printk("%s: called from %s successfully\n", __func__, func); + return 0; + } + + trace_printk("%s: called from %s fail\n", __func__, func); + return -1; +} + +unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func) +{ + struct ethhdr *eth = eth_hdr(skb); + struct net_device *dev; + struct foe_entry *entry; + + trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__, + ntohs(skb->vlan_proto), skb->vlan_tci); + + dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK); + + if (dev) { + /*set where we to go*/ + skb->dev = dev; + skb->vlan_proto = 0; + skb->vlan_tci = 0; + + if (ntohs(eth->h_proto) == ETH_P_8021Q) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + return -1; + } + + if (IS_BOND_MODE && + (((hnat_priv->data->version == MTK_HNAT_V4) && + (skb_hnat_entry(skb) != 0x7fff)) || + ((hnat_priv->data->version != MTK_HNAT_V4) && + (skb_hnat_entry(skb) != 0x3fff)))) + skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4); + + set_from_extge(skb); + fix_skb_packet_type(skb, skb->dev, eth); + netif_rx(skb); + trace_printk("%s: called from %s successfully\n", __func__, + func); + return 0; + } else { + /* MapE WAN --> LAN/WLAN PingPong. */ + dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK); + if (mape_toggle && dev) { + if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) { + skb_set_mac_header(skb, -ETH_HLEN); + skb->dev = dev; + set_from_mape(skb); + skb->vlan_proto = 0; + skb->vlan_tci = 0; + fix_skb_packet_type(skb, skb->dev, eth_hdr(skb)); + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + entry->bfib1.pkt_type = IPV4_HNAPT; + netif_rx(skb); + return 0; + } + } + trace_printk("%s: called from %s fail\n", __func__, func); + return -1; + } +} + +unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func) +{ + /*set where we to go*/ + u8 index; + struct foe_entry *entry; + struct net_device *dev; + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + + if (IS_IPV4_GRP(entry)) + index = entry->ipv4_hnapt.act_dp; + else + index = entry->ipv6_5t_route.act_dp; + + skb->dev = get_dev_from_index(index); + + if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) { + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NF_ACCEPT; + + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + return NF_ACCEPT; + + skb_pull_rcsum(skb, VLAN_HLEN); + + memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN, + 2 * ETH_ALEN); + } + + if (skb->dev) { + skb_set_network_header(skb, 0); + skb_push(skb, ETH_HLEN); + dev_queue_xmit(skb); + trace_printk("%s: called from %s successfully\n", __func__, + func); + return 0; + } else { + if (mape_toggle) { + /* Add ipv6 header mape for lan/wlan -->wan */ + dev = get_wandev_from_index(index); + if (dev) { + if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) { + skb_set_network_header(skb, 0); + skb_push(skb, ETH_HLEN); + skb_set_mac_header(skb, 0); + skb->dev = dev; + dev_queue_xmit(skb); + return 0; + } + trace_printk("%s: called from %s fail[MapE]\n", __func__, + func); + return -1; + } + } + } + /*if external devices is down, invalidate related ppe entry*/ + if (entry_hnat_is_bound(entry)) { + entry->bfib1.state = INVALID; + if (IS_IPV4_GRP(entry)) + entry->ipv4_hnapt.act_dp = 0; + else + entry->ipv6_5t_route.act_dp = 0; + + /* clear HWNAT cache */ + hnat_cache_ebl(1); + } + trace_printk("%s: called from %s fail, index=%x\n", __func__, + func, index); + return -1; +} + +static void pre_routing_print(struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const char *func) +{ + trace_printk( + "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n", + __func__, in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb), + func); +} + +static void post_routing_print(struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const char *func) +{ + trace_printk( + "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n", + __func__, in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb), + func); +} + +static inline void hnat_set_iif(const struct nf_hook_state *state, + struct sk_buff *skb, int val) +{ + if (IS_WHNAT(state->in) && FROM_WED(skb)) { + return; + } else if (IS_LAN(state->in)) { + skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN; + } else if (IS_PPD(state->in)) { + skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD; + } else if (IS_EXT(state->in)) { + skb_hnat_iface(skb) = FOE_MAGIC_EXT; + } else if (IS_WAN(state->in)) { + skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN; + } else if (!IS_BR(state->in)) { + if (state->in->netdev_ops->ndo_flow_offload_check) { + skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL; + } else { + skb_hnat_iface(skb) = FOE_INVALID; + + if (is_magic_tag_valid(skb) && + IS_SPACE_AVAILABLE_HEAD(skb)) + memset(skb_hnat_info(skb), 0, FOE_INFO_LEN); + } + } +} + +static inline void hnat_set_alg(const struct nf_hook_state *state, + struct sk_buff *skb, int val) +{ + skb_hnat_alg(skb) = val; +} + +static inline void hnat_set_head_frags(const struct nf_hook_state *state, + struct sk_buff *head_skb, int val, + void (*fn)(const struct nf_hook_state *state, + struct sk_buff *skb, int val)) +{ + struct sk_buff *segs = skb_shinfo(head_skb)->frag_list; + + fn(state, head_skb, val); + while (segs) { + fn(state, segs, val); + segs = segs->next; + } +} + +static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h) +{ + entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2]; + entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1]; + entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0]; +} + +unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in, + const char *func) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct iphdr _iphdr; + struct iphdr *iph; + struct ethhdr *eth; + + /* WAN -> LAN/WLAN MapE. */ + if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) { + iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr); + if (unlikely(!iph)) + return -1; + + switch (iph->protocol) { + case IPPROTO_UDP: + case IPPROTO_TCP: + break; + default: + return -1; + } + mape_w2l_v6h = *ip6h; + + /* Remove ipv6 header. */ + memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN, + skb->data - ETH_HLEN, ETH_HLEN); + skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN); + skb_set_mac_header(skb, 0); + skb_set_network_header(skb, ETH_HLEN); + skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr)); + + eth = eth_hdr(skb); + eth->h_proto = htons(ETH_P_IP); + set_to_ppe(skb); + + skb->vlan_proto = htons(ETH_P_8021Q); + skb->vlan_tci = + (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK)); + + if (!hnat_priv->g_ppdev) + hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd); + + skb->dev = hnat_priv->g_ppdev; + skb->protocol = htons(ETH_P_IP); + + dev_queue_xmit(skb); + + return 0; + } + return -1; +} + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +unsigned int do_hnat_mape_w2l(struct sk_buff *skb, const struct net_device *in, + const char *func) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct iphdr _iphdr; + struct iphdr *iph; + struct foe_entry *entry; + struct tcpudphdr _ports; + const struct tcpudphdr *pptr; + int udp = 0; + + /* WAN -> LAN/WLAN MapE learn info(include innner IPv4 header info). */ + if (ip6h->nexthdr == NEXTHDR_IPIP) { + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + + entry->ipv4_dslite.tunnel_sipv6_0 = + ntohl(ip6h->saddr.s6_addr32[0]); + entry->ipv4_dslite.tunnel_sipv6_1 = + ntohl(ip6h->saddr.s6_addr32[1]); + entry->ipv4_dslite.tunnel_sipv6_2 = + ntohl(ip6h->saddr.s6_addr32[2]); + entry->ipv4_dslite.tunnel_sipv6_3 = + ntohl(ip6h->saddr.s6_addr32[3]); + + entry->ipv4_dslite.tunnel_dipv6_0 = + ntohl(ip6h->daddr.s6_addr32[0]); + entry->ipv4_dslite.tunnel_dipv6_1 = + ntohl(ip6h->daddr.s6_addr32[1]); + entry->ipv4_dslite.tunnel_dipv6_2 = + ntohl(ip6h->daddr.s6_addr32[2]); + entry->ipv4_dslite.tunnel_dipv6_3 = + ntohl(ip6h->daddr.s6_addr32[3]); + + ppe_fill_flow_lbl(entry, ip6h); + + iph = skb_header_pointer(skb, IPV6_HDR_LEN, + sizeof(_iphdr), &_iphdr); + if (unlikely(!iph)) + return NF_ACCEPT; + + switch (iph->protocol) { + case IPPROTO_UDP: + udp = 1; + case IPPROTO_TCP: + break; + + default: + return NF_ACCEPT; + } + + pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4, + sizeof(_ports), &_ports); + if (unlikely(!pptr)) + return NF_ACCEPT; + + entry->bfib1.udp = udp; + + entry->ipv4_dslite.new_sip = ntohl(iph->saddr); + entry->ipv4_dslite.new_dip = ntohl(iph->daddr); + entry->ipv4_dslite.new_sport = ntohs(pptr->src); + entry->ipv4_dslite.new_dport = ntohs(pptr->dst); + + return 0; + } + return -1; +} +#endif + +static unsigned int is_ppe_support_type(struct sk_buff *skb) +{ + struct ethhdr *eth = NULL; + struct iphdr *iph = NULL; + struct ipv6hdr *ip6h = NULL; + struct iphdr _iphdr; + + eth = eth_hdr(skb); + if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) || + is_broadcast_ether_addr(eth->h_dest)) + return 0; + + switch (ntohs(skb->protocol)) { + case ETH_P_IP: + iph = ip_hdr(skb); + + /* do not accelerate non tcp/udp traffic */ + if ((iph->protocol == IPPROTO_TCP) || + (iph->protocol == IPPROTO_UDP) || + (iph->protocol == IPPROTO_IPV6)) { + return 1; + } + + break; + case ETH_P_IPV6: + ip6h = ipv6_hdr(skb); + + if ((ip6h->nexthdr == NEXTHDR_TCP) || + (ip6h->nexthdr == NEXTHDR_UDP)) { + return 1; + } else if (ip6h->nexthdr == NEXTHDR_IPIP) { + iph = skb_header_pointer(skb, IPV6_HDR_LEN, + sizeof(_iphdr), &_iphdr); + if (unlikely(!iph)) + return 0; + + if ((iph->protocol == IPPROTO_TCP) || + (iph->protocol == IPPROTO_UDP)) { + return 1; + } + + } + + break; + case ETH_P_8021Q: + return 1; + } + + return 0; +} + +static unsigned int +mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + if (!is_ppe_support_type(skb)) { + hnat_set_head_frags(state, skb, 1, hnat_set_alg); + return NF_ACCEPT; + } + + hnat_set_head_frags(state, skb, -1, hnat_set_iif); + + pre_routing_print(skb, state->in, state->out, __func__); + + /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/ + if (do_ext2ge_fast_try(state->in, skb)) { + if (!do_hnat_ext_to_ge(skb, state->in, __func__)) + return NF_STOLEN; + if (!skb) + goto drop; + return NF_ACCEPT; + } + + /* packets form ge -> external device + * For standalone wan interface + */ + if (do_ge2ext_fast(state->in, skb)) { + if (!do_hnat_ge_to_ext(skb, __func__)) + return NF_STOLEN; + goto drop; + } + + /* MapE need remove ipv6 header and pingpong. */ + if (do_mape_w2l_fast(state->in, skb)) { +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (mape_toggle && do_hnat_mape_w2l(skb, state->in, __func__)) + return NF_ACCEPT; +#else + if (!do_hnat_mape_w2l_fast(skb, state->in, __func__)) + return NF_STOLEN; + else + return NF_ACCEPT; +#endif + } + + if (is_from_mape(skb)) + clr_from_extge(skb); + + return NF_ACCEPT; +drop: + printk_ratelimited(KERN_WARNING + "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, state->in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + if (!is_ppe_support_type(skb)) { + hnat_set_head_frags(state, skb, 1, hnat_set_alg); + return NF_ACCEPT; + } + + hnat_set_head_frags(state, skb, -1, hnat_set_iif); + + pre_routing_print(skb, state->in, state->out, __func__); + + /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/ + if (do_ext2ge_fast_try(state->in, skb)) { + if (!do_hnat_ext_to_ge(skb, state->in, __func__)) + return NF_STOLEN; + if (!skb) + goto drop; + return NF_ACCEPT; + } + + /* packets form ge -> external device + * For standalone wan interface + */ + if (do_ge2ext_fast(state->in, skb)) { + if (!do_hnat_ge_to_ext(skb, __func__)) + return NF_STOLEN; + goto drop; + } + + return NF_ACCEPT; +drop: + printk_ratelimited(KERN_WARNING + "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, state->in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct vlan_ethhdr *veth; + + if (IS_HQOS_MODE && hnat_priv->data->whnat) { + veth = (struct vlan_ethhdr *)skb_mac_header(skb); + + if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) { + skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff; + skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU; + } + } + + if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) { + hnat_set_head_frags(state, skb, 1, hnat_set_alg); + return NF_ACCEPT; + } + + hnat_set_head_frags(state, skb, -1, hnat_set_iif); + + pre_routing_print(skb, state->in, state->out, __func__); + + if (unlikely(debug_level >= 7)) { + hnat_cpu_reason_cnt(skb); + if (skb_hnat_reason(skb) == dbg_cpu_reason) + foe_dump_pkt(skb); + } + + /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/ + if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) && + !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { + if (!hnat_priv->g_ppdev) + hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd); + + if (!do_hnat_ext_to_ge(skb, state->in, __func__)) + return NF_STOLEN; + if (!skb) + goto drop; + return NF_ACCEPT; + } + + if (hnat_priv->data->whnat) { + if (skb_hnat_iface(skb) == FOE_MAGIC_EXT) + clr_from_extge(skb); + + /* packets from external devices -> xxx ,step 2, learning stage */ + if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle || + (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) { + if (!do_hnat_ext_to_ge2(skb, __func__)) + return NF_STOLEN; + goto drop; + } + + /* packets form ge -> external device */ + if (do_ge2ext_fast(state->in, skb)) { + if (!do_hnat_ge_to_ext(skb, __func__)) + return NF_STOLEN; + goto drop; + } + } + + /* MapE need remove ipv6 header and pingpong. (bridge mode) */ + if (do_mape_w2l_fast(state->in, skb)) { + if (!do_hnat_mape_w2l_fast(skb, state->in, __func__)) + return NF_STOLEN; + else + return NF_ACCEPT; + } + + return NF_ACCEPT; +drop: + printk_ratelimited(KERN_WARNING + "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, state->in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb, + const struct net_device *out, + struct flow_offload_hw_path *hw_path) +{ + const struct in6_addr *ipv6_nexthop; + struct neighbour *neigh = NULL; + struct dst_entry *dst = skb_dst(skb); + struct ethhdr *eth; + + if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) { + memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN); + memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN); + return 0; + } + + rcu_read_lock_bh(); + ipv6_nexthop = + rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop); + if (unlikely(!neigh)) { + dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__, + &ipv6_hdr(skb)->daddr); + rcu_read_unlock_bh(); + return -1; + } + + /* why do we get all zero ethernet address ? */ + if (!is_valid_ether_addr(neigh->ha)) { + rcu_read_unlock_bh(); + return -1; + } + + if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) { + /*copy ether type for DS-Lite and MapE */ + eth = (struct ethhdr *)(skb->data - ETH_HLEN); + eth->h_proto = skb->protocol; + } else { + eth = eth_hdr(skb); + } + + ether_addr_copy(eth->h_dest, neigh->ha); + ether_addr_copy(eth->h_source, out->dev_addr); + + rcu_read_unlock_bh(); + + return 0; +} + +static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb, + const struct net_device *out, + struct flow_offload_hw_path *hw_path) +{ + u32 nexthop; + struct neighbour *neigh; + struct dst_entry *dst = skb_dst(skb); + struct rtable *rt = (struct rtable *)dst; + struct net_device *dev = (__force struct net_device *)out; + + if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) { + memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN); + memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN); + return 0; + } + + rcu_read_lock_bh(); + nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); + neigh = __ipv4_neigh_lookup_noref(dev, nexthop); + if (unlikely(!neigh)) { + dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__, + &ip_hdr(skb)->daddr); + rcu_read_unlock_bh(); + return -1; + } + + /* why do we get all zero ethernet address ? */ + if (!is_valid_ether_addr(neigh->ha)) { + rcu_read_unlock_bh(); + return -1; + } + + memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN); + memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN); + + rcu_read_unlock_bh(); + + return 0; +} + +static u16 ppe_get_chkbase(struct iphdr *iph) +{ + u16 org_chksum = ntohs(iph->check); + u16 org_tot_len = ntohs(iph->tot_len); + u16 org_id = ntohs(iph->id); + u16 chksum_tmp, tot_len_tmp, id_tmp; + u32 tmp = 0; + u16 chksum_base = 0; + + chksum_tmp = ~(org_chksum); + tot_len_tmp = ~(org_tot_len); + id_tmp = ~(org_id); + tmp = chksum_tmp + tot_len_tmp + id_tmp; + tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF); + tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF); + chksum_base = tmp & 0xFFFF; + + return chksum_base; +} + +struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry, + struct flow_offload_hw_path *hw_path) +{ + switch (entry.bfib1.pkt_type) { + case IPV4_HNAPT: + case IPV4_HNAT: + entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest)); + entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)ð->h_dest[4])); + entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source)); + entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4])); + entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid; + break; + case IPV4_DSLITE: + case IPV4_MAP_E: + case IPV6_6RD: + case IPV6_5T_ROUTE: + case IPV6_3T_ROUTE: + entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest)); + entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)ð->h_dest[4])); + entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source)); + entry.ipv6_5t_route.smac_lo = + swab16(*((u16 *)ð->h_source[4])); + entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid; + break; + } + return entry; +} + +struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry, + struct flow_offload_hw_path *hw_path) +{ + entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0; + entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0; + entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0; + entry.bfib1.cah = 1; + entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ? + readl(hnat_priv->fe_base + 0x0010) & (0xFF) : + readl(hnat_priv->fe_base + 0x0010) & (0x7FFF); + + switch (entry.bfib1.pkt_type) { + case IPV4_HNAPT: + case IPV4_HNAT: + if (hnat_priv->data->mcast && + is_multicast_ether_addr(ð->h_dest[0])) { + entry.ipv4_hnapt.iblk2.mcast = 1; + if (hnat_priv->data->version == MTK_HNAT_V3) { + entry.bfib1.sta = 1; + entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv); + } + } else { + entry.ipv4_hnapt.iblk2.mcast = 0; + } + + entry.ipv4_hnapt.iblk2.port_ag = + (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f; + break; + case IPV4_DSLITE: + case IPV4_MAP_E: + case IPV6_6RD: + case IPV6_5T_ROUTE: + case IPV6_3T_ROUTE: + if (hnat_priv->data->mcast && + is_multicast_ether_addr(ð->h_dest[0])) { + entry.ipv6_5t_route.iblk2.mcast = 1; + if (hnat_priv->data->version == MTK_HNAT_V3) { + entry.bfib1.sta = 1; + entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv); + } + } else { + entry.ipv6_5t_route.iblk2.mcast = 0; + } + + entry.ipv6_5t_route.iblk2.port_ag = + (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f; + break; + } + return entry; +} + +static unsigned int skb_to_hnat_info(struct sk_buff *skb, + const struct net_device *dev, + struct foe_entry *foe, + struct flow_offload_hw_path *hw_path) +{ + struct foe_entry entry = { 0 }; + int whnat = IS_WHNAT(dev); + struct ethhdr *eth; + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcpudphdr _ports; + const struct tcpudphdr *pptr; + u32 gmac = NR_DISCARD; + int udp = 0; + u32 qid = 0; + u32 port_id = 0; + int mape = 0; + + if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) + /* point to ethernet header for DS-Lite and MapE */ + eth = (struct ethhdr *)(skb->data - ETH_HLEN); + else + eth = eth_hdr(skb); + + /*do not bind multicast if PPE mcast not enable*/ + if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest)) + return 0; + + entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/ + entry.bfib1.state = foe->udib1.state; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + entry.bfib1.sp = foe->udib1.sp; +#endif + + switch (ntohs(eth->h_proto)) { + case ETH_P_IP: + iph = ip_hdr(skb); + switch (iph->protocol) { + case IPPROTO_UDP: + udp = 1; + /* fallthrough */ + case IPPROTO_TCP: + entry.ipv4_hnapt.etype = htons(ETH_P_IP); + + /* DS-Lite WAN->LAN */ + if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE || + entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) { + entry.ipv4_dslite.sip = foe->ipv4_dslite.sip; + entry.ipv4_dslite.dip = foe->ipv4_dslite.dip; + entry.ipv4_dslite.sport = + foe->ipv4_dslite.sport; + entry.ipv4_dslite.dport = + foe->ipv4_dslite.dport; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (entry.bfib1.pkt_type == IPV4_MAP_E) { + pptr = skb_header_pointer(skb, + iph->ihl * 4, + sizeof(_ports), + &_ports); + if (unlikely(!pptr)) + return -1; + + entry.ipv4_dslite.new_sip = + ntohl(iph->saddr); + entry.ipv4_dslite.new_dip = + ntohl(iph->daddr); + entry.ipv4_dslite.new_sport = + ntohs(pptr->src); + entry.ipv4_dslite.new_dport = + ntohs(pptr->dst); + } +#endif + + entry.ipv4_dslite.tunnel_sipv6_0 = + foe->ipv4_dslite.tunnel_sipv6_0; + entry.ipv4_dslite.tunnel_sipv6_1 = + foe->ipv4_dslite.tunnel_sipv6_1; + entry.ipv4_dslite.tunnel_sipv6_2 = + foe->ipv4_dslite.tunnel_sipv6_2; + entry.ipv4_dslite.tunnel_sipv6_3 = + foe->ipv4_dslite.tunnel_sipv6_3; + + entry.ipv4_dslite.tunnel_dipv6_0 = + foe->ipv4_dslite.tunnel_dipv6_0; + entry.ipv4_dslite.tunnel_dipv6_1 = + foe->ipv4_dslite.tunnel_dipv6_1; + entry.ipv4_dslite.tunnel_dipv6_2 = + foe->ipv4_dslite.tunnel_dipv6_2; + entry.ipv4_dslite.tunnel_dipv6_3 = + foe->ipv4_dslite.tunnel_dipv6_3; + + entry.ipv4_dslite.bfib1.rmt = 1; + entry.ipv4_dslite.iblk2.dscp = iph->tos; + entry.ipv4_dslite.vlan1 = hw_path->vlan_id; + if (hnat_priv->data->per_flow_accounting) + entry.ipv4_dslite.iblk2.mibf = 1; + + } else { + entry.ipv4_hnapt.iblk2.dscp = iph->tos; + if (hnat_priv->data->per_flow_accounting) + entry.ipv4_hnapt.iblk2.mibf = 1; + + entry.ipv4_hnapt.vlan1 = hw_path->vlan_id; + + if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) { + entry.bfib1.vlan_layer += 1; + + if (entry.ipv4_hnapt.vlan1) + entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK); + else + entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK); + } + + entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip; + entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip; + entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport; + entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport; + + entry.ipv4_hnapt.new_sip = ntohl(iph->saddr); + entry.ipv4_hnapt.new_dip = ntohl(iph->daddr); + } + + entry.ipv4_hnapt.bfib1.udp = udp; + if (IS_IPV4_HNAPT(foe)) { + pptr = skb_header_pointer(skb, iph->ihl * 4, + sizeof(_ports), + &_ports); + if (unlikely(!pptr)) + return -1; + + entry.ipv4_hnapt.new_sport = ntohs(pptr->src); + entry.ipv4_hnapt.new_dport = ntohs(pptr->dst); + } + + break; + + default: + return -1; + } + trace_printk( + "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n", + __func__, skb->head, skb->data, iph, skb->len, + skb->data_len); + break; + + case ETH_P_IPV6: + ip6h = ipv6_hdr(skb); + switch (ip6h->nexthdr) { + case NEXTHDR_UDP: + udp = 1; + /* fallthrough */ + case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */ + entry.ipv6_5t_route.etype = htons(ETH_P_IPV6); + + entry.ipv6_5t_route.vlan1 = hw_path->vlan_id; + + if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) { + entry.bfib1.vlan_layer += 1; + + if (entry.ipv6_5t_route.vlan1) + entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK); + else + entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK); + } + + if (hnat_priv->data->per_flow_accounting) + entry.ipv6_5t_route.iblk2.mibf = 1; + entry.ipv6_5t_route.bfib1.udp = udp; + + if (IS_IPV6_6RD(foe)) { + entry.ipv6_5t_route.bfib1.rmt = 1; + entry.ipv6_6rd.tunnel_sipv4 = + foe->ipv6_6rd.tunnel_sipv4; + entry.ipv6_6rd.tunnel_dipv4 = + foe->ipv6_6rd.tunnel_dipv4; + } + + entry.ipv6_3t_route.ipv6_sip0 = + foe->ipv6_3t_route.ipv6_sip0; + entry.ipv6_3t_route.ipv6_sip1 = + foe->ipv6_3t_route.ipv6_sip1; + entry.ipv6_3t_route.ipv6_sip2 = + foe->ipv6_3t_route.ipv6_sip2; + entry.ipv6_3t_route.ipv6_sip3 = + foe->ipv6_3t_route.ipv6_sip3; + + entry.ipv6_3t_route.ipv6_dip0 = + foe->ipv6_3t_route.ipv6_dip0; + entry.ipv6_3t_route.ipv6_dip1 = + foe->ipv6_3t_route.ipv6_dip1; + entry.ipv6_3t_route.ipv6_dip2 = + foe->ipv6_3t_route.ipv6_dip2; + entry.ipv6_3t_route.ipv6_dip3 = + foe->ipv6_3t_route.ipv6_dip3; + + if (IS_IPV6_3T_ROUTE(foe)) { + entry.ipv6_3t_route.prot = + foe->ipv6_3t_route.prot; + entry.ipv6_3t_route.hph = + foe->ipv6_3t_route.hph; + } + + if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) { + entry.ipv6_5t_route.sport = + foe->ipv6_5t_route.sport; + entry.ipv6_5t_route.dport = + foe->ipv6_5t_route.dport; + } + entry.ipv6_5t_route.iblk2.dscp = + (ip6h->priority << 4 | + (ip6h->flow_lbl[0] >> 4)); + break; + + case NEXTHDR_IPIP: + if ((!mape_toggle && + entry.bfib1.pkt_type == IPV4_DSLITE) || + (mape_toggle && + entry.bfib1.pkt_type == IPV4_MAP_E)) { + /* DS-Lite LAN->WAN */ + entry.ipv4_dslite.bfib1.udp = + foe->ipv4_dslite.bfib1.udp; + entry.ipv4_dslite.sip = foe->ipv4_dslite.sip; + entry.ipv4_dslite.dip = foe->ipv4_dslite.dip; + entry.ipv4_dslite.sport = + foe->ipv4_dslite.sport; + entry.ipv4_dslite.dport = + foe->ipv4_dslite.dport; + + entry.ipv4_dslite.tunnel_sipv6_0 = + ntohl(ip6h->saddr.s6_addr32[0]); + entry.ipv4_dslite.tunnel_sipv6_1 = + ntohl(ip6h->saddr.s6_addr32[1]); + entry.ipv4_dslite.tunnel_sipv6_2 = + ntohl(ip6h->saddr.s6_addr32[2]); + entry.ipv4_dslite.tunnel_sipv6_3 = + ntohl(ip6h->saddr.s6_addr32[3]); + + entry.ipv4_dslite.tunnel_dipv6_0 = + ntohl(ip6h->daddr.s6_addr32[0]); + entry.ipv4_dslite.tunnel_dipv6_1 = + ntohl(ip6h->daddr.s6_addr32[1]); + entry.ipv4_dslite.tunnel_dipv6_2 = + ntohl(ip6h->daddr.s6_addr32[2]); + entry.ipv4_dslite.tunnel_dipv6_3 = + ntohl(ip6h->daddr.s6_addr32[3]); + + ppe_fill_flow_lbl(&entry, ip6h); + + entry.ipv4_dslite.priority = ip6h->priority; + entry.ipv4_dslite.hop_limit = ip6h->hop_limit; + entry.ipv4_dslite.vlan1 = hw_path->vlan_id; + if (hnat_priv->data->per_flow_accounting) + entry.ipv4_dslite.iblk2.mibf = 1; + /* Map-E LAN->WAN record inner IPv4 header info. */ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + if (mape_toggle) { + entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp; + entry.ipv4_dslite.new_sip = foe->ipv4_dslite.new_sip; + entry.ipv4_dslite.new_dip = foe->ipv4_dslite.new_dip; + entry.ipv4_dslite.new_sport = foe->ipv4_dslite.new_sport; + entry.ipv4_dslite.new_dport = foe->ipv4_dslite.new_dport; + } +#endif + } else if (mape_toggle && + entry.bfib1.pkt_type == IPV4_HNAPT) { + /* MapE LAN -> WAN */ + mape = 1; + entry.ipv4_hnapt.iblk2.dscp = + foe->ipv4_hnapt.iblk2.dscp; + if (hnat_priv->data->per_flow_accounting) + entry.ipv4_hnapt.iblk2.mibf = 1; + + if (IS_GMAC1_MODE) + entry.ipv4_hnapt.vlan1 = 1; + else + entry.ipv4_hnapt.vlan1 = hw_path->vlan_id; + + entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip; + entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip; + entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport; + entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport; + + entry.ipv4_hnapt.new_sip = + foe->ipv4_hnapt.new_sip; + entry.ipv4_hnapt.new_dip = + foe->ipv4_hnapt.new_dip; + entry.ipv4_hnapt.etype = htons(ETH_P_IP); + + if (IS_HQOS_MODE) { + entry.ipv4_hnapt.iblk2.qid = + (hnat_priv->data->version == MTK_HNAT_V4) ? + skb->mark & 0x7f : skb->mark & 0xf; + entry.ipv4_hnapt.iblk2.fqos = 1; + } + + entry.ipv4_hnapt.bfib1.udp = + foe->ipv4_hnapt.bfib1.udp; + + entry.ipv4_hnapt.new_sport = + foe->ipv4_hnapt.new_sport; + entry.ipv4_hnapt.new_dport = + foe->ipv4_hnapt.new_dport; + mape_l2w_v6h = *ip6h; + } + break; + + default: + return -1; + } + + trace_printk( + "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n", + __func__, skb->head, skb->data, ip6h, skb->len, + skb->data_len); + break; + + default: + iph = ip_hdr(skb); + switch (entry.bfib1.pkt_type) { + case IPV6_6RD: /* 6RD LAN->WAN */ + entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0; + entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1; + entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2; + entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3; + + entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0; + entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1; + entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2; + entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3; + + entry.ipv6_6rd.sport = foe->ipv6_6rd.sport; + entry.ipv6_6rd.dport = foe->ipv6_6rd.dport; + entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr); + entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr); + entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph); + entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13); + entry.ipv6_6rd.ttl = iph->ttl; + entry.ipv6_6rd.dscp = iph->tos; + entry.ipv6_6rd.per_flow_6rd_id = 1; + entry.ipv6_6rd.vlan1 = hw_path->vlan_id; + if (hnat_priv->data->per_flow_accounting) + entry.ipv6_6rd.iblk2.mibf = 1; + break; + + default: + return -1; + } + } + + /* Fill Layer2 Info.*/ + entry = ppe_fill_L2_info(eth, entry, hw_path); + + /* Fill Info Blk*/ + entry = ppe_fill_info_blk(eth, entry, hw_path); + + if (IS_LAN(dev)) { + if (IS_DSA_LAN(dev)) + port_id = hnat_dsa_fill_stag(dev, &entry, hw_path, + ntohs(eth->h_proto), + mape); + + if (IS_BOND_MODE) + gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ? + NR_GMAC2_PORT : NR_GMAC1_PORT; + else + gmac = NR_GMAC1_PORT; + } else if (IS_WAN(dev)) { + if (IS_DSA_WAN(dev)) + port_id = hnat_dsa_fill_stag(dev,&entry, hw_path, + ntohs(eth->h_proto), + mape); + if (mape_toggle && mape == 1) { + gmac = NR_PDMA_PORT; + /* Set act_dp = wan_dev */ + entry.ipv4_hnapt.act_dp = dev->ifindex; + } else { + gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT; + } + } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) || + FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) { + if (!hnat_priv->data->whnat && IS_GMAC1_MODE) { + entry.bfib1.vpm = 1; + entry.bfib1.vlan_layer = 1; + + if (FROM_GE_LAN(skb)) + entry.ipv4_hnapt.vlan1 = 1; + else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) + entry.ipv4_hnapt.vlan1 = 2; + } + + trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n", + skb_hnat_iface(skb), dev->name); + /* To CPU then stolen by pre-routing hant hook of LAN/WAN + * Current setting is PDMA RX. + */ + gmac = NR_PDMA_PORT; + if (IS_IPV4_GRP(foe)) + entry.ipv4_hnapt.act_dp = dev->ifindex; + else + entry.ipv6_5t_route.act_dp = dev->ifindex; + } else { + printk_ratelimited(KERN_WARNING + "Unknown case of dp, iif=%x --> %s\n", + skb_hnat_iface(skb), dev->name); + + return 0; + } + + if (IS_HQOS_MODE) + qid = skb->mark & (MTK_QDMA_TX_MASK); + else if (IS_PPPQ_MODE && (IS_DSA_LAN(dev) || IS_DSA_WAN(dev))) + qid = port_id & MTK_QDMA_TX_MASK; + else + qid = 0; + + if (IS_IPV4_GRP(foe)) { + entry.ipv4_hnapt.iblk2.dp = gmac; + entry.ipv4_hnapt.iblk2.port_mg = + (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0; + + if (qos_toggle) { + if (hnat_priv->data->version == MTK_HNAT_V4) { + entry.ipv4_hnapt.iblk2.qid = qid & 0x7f; + } else { + /* qid[5:0]= port_mg[1:0]+ qid[3:0] */ + entry.ipv4_hnapt.iblk2.qid = qid & 0xf; + if (hnat_priv->data->version != MTK_HNAT_V1) + entry.ipv4_hnapt.iblk2.port_mg |= + ((qid >> 4) & 0x3); + + if (((IS_EXT(dev) && (FROM_GE_LAN(skb) || + FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) || + ((mape_toggle && mape == 1) && !FROM_EXT(skb))) && + (!whnat)) { + entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG); + entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb); + entry.bfib1.vlan_layer = 1; + } + } + + if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT || + (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev))) + entry.ipv4_hnapt.iblk2.fqos = 0; + else + entry.ipv4_hnapt.iblk2.fqos = 1; + } else { + entry.ipv4_hnapt.iblk2.fqos = 0; + } + } else { + entry.ipv6_5t_route.iblk2.dp = gmac; + entry.ipv6_5t_route.iblk2.port_mg = + (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0; + + if (qos_toggle) { + if (hnat_priv->data->version == MTK_HNAT_V4) { + entry.ipv6_5t_route.iblk2.qid = qid & 0x7f; + } else { + /* qid[5:0]= port_mg[1:0]+ qid[3:0] */ + entry.ipv6_5t_route.iblk2.qid = qid & 0xf; + if (hnat_priv->data->version != MTK_HNAT_V1) + entry.ipv6_5t_route.iblk2.port_mg |= + ((qid >> 4) & 0x3); + + if (IS_EXT(dev) && (FROM_GE_LAN(skb) || + FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) && + (!whnat)) { + entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG); + entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb); + entry.bfib1.vlan_layer = 1; + } + } + + if (FROM_EXT(skb) || + (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev))) + entry.ipv6_5t_route.iblk2.fqos = 0; + else + entry.ipv6_5t_route.iblk2.fqos = 1; + } else { + entry.ipv6_5t_route.iblk2.fqos = 0; + } + } + + /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined + * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and + * the entry is set to BIND state in mtk_sw_nat_hook_tx(). + */ + if (!whnat) { + entry.bfib1.ttl = 1; + entry.bfib1.state = BIND; + } + + wmb(); + memcpy(foe, &entry, sizeof(entry)); + /*reset statistic for this entry*/ + if (hnat_priv->data->per_flow_accounting) + memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)], + 0, sizeof(struct mib_entry)); + + skb_hnat_filled(skb) = HNAT_INFO_FILLED; + + return 0; +} + +int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no) +{ + struct foe_entry *entry; + struct ethhdr *eth; + struct hnat_bind_info_blk bfib1_tx; + + if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) || + !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb)) + return NF_ACCEPT; + + trace_printk( + "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n", + __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no, + skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb), + skb_hnat_wc_id(skb), skb_hnat_rx_id(skb)); + + if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) && + (gmac_no != NR_WHNAT_WDMA_PORT)) + return NF_ACCEPT; + + if (!skb_hnat_is_hashed(skb)) + return NF_ACCEPT; + + if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num || + skb_hnat_ppe(skb) >= CFG_PPE_NUM) + return NF_ACCEPT; + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + if (entry_hnat_is_bound(entry)) + return NF_ACCEPT; + + if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH) + return NF_ACCEPT; + + eth = eth_hdr(skb); + memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1)); + + /*not bind multicast if PPE mcast not enable*/ + if (!hnat_priv->data->mcast) { + if (is_multicast_ether_addr(eth->h_dest)) + return NF_ACCEPT; + + if (IS_IPV4_GRP(entry)) + entry->ipv4_hnapt.iblk2.mcast = 0; + else + entry->ipv6_5t_route.iblk2.mcast = 0; + } + + /* Some mt_wifi virtual interfaces, such as apcli, + * will change the smac for specail purpose. + */ + switch (bfib1_tx.pkt_type) { + case IPV4_HNAPT: + case IPV4_HNAT: + entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source)); + entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4])); + break; + case IPV4_DSLITE: + case IPV4_MAP_E: + case IPV6_6RD: + case IPV6_5T_ROUTE: + case IPV6_3T_ROUTE: + entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source)); + entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)ð->h_source[4])); + break; + } + + if (skb->vlan_tci) { + bfib1_tx.vlan_layer = 1; + bfib1_tx.vpm = 1; + if (IS_IPV4_GRP(entry)) { + entry->ipv4_hnapt.etype = htons(ETH_P_8021Q); + entry->ipv4_hnapt.vlan1 = skb->vlan_tci; + } else if (IS_IPV6_GRP(entry)) { + entry->ipv6_5t_route.etype = htons(ETH_P_8021Q); + entry->ipv6_5t_route.vlan1 = skb->vlan_tci; + } + } else { + bfib1_tx.vpm = 0; + bfib1_tx.vlan_layer = 0; + } + + /* MT7622 wifi hw_nat not support QoS */ + if (IS_IPV4_GRP(entry)) { + entry->ipv4_hnapt.iblk2.fqos = 0; + if ((hnat_priv->data->version == MTK_HNAT_V2 && + gmac_no == NR_WHNAT_WDMA_PORT) || + (hnat_priv->data->version == MTK_HNAT_V4 && + (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) { + entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb); + entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb); + entry->ipv4_hnapt.iblk2.winfoi = 1; +#else + entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb); + entry->ipv4_hnapt.iblk2w.winfoi = 1; + entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb); +#endif + } else { + if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) { + bfib1_tx.vpm = 1; + bfib1_tx.vlan_layer = 1; + + if (FROM_GE_LAN(skb)) + entry->ipv4_hnapt.vlan1 = 1; + else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) + entry->ipv4_hnapt.vlan1 = 2; + } + + if (IS_HQOS_MODE && + (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) { + bfib1_tx.vpm = 0; + bfib1_tx.vlan_layer = 1; + entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG); + entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb); + entry->ipv4_hnapt.iblk2.fqos = 1; + } + } + entry->ipv4_hnapt.iblk2.dp = gmac_no; + } else { + entry->ipv6_5t_route.iblk2.fqos = 0; + if ((hnat_priv->data->version == MTK_HNAT_V2 && + gmac_no == NR_WHNAT_WDMA_PORT) || + (hnat_priv->data->version == MTK_HNAT_V4 && + (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) { + entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb); + entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb); +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb); + entry->ipv6_5t_route.iblk2.winfoi = 1; +#else + entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb); + entry->ipv6_5t_route.iblk2w.winfoi = 1; + entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb); +#endif + } else { + if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) { + bfib1_tx.vpm = 1; + bfib1_tx.vlan_layer = 1; + + if (FROM_GE_LAN(skb)) + entry->ipv6_5t_route.vlan1 = 1; + else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) + entry->ipv6_5t_route.vlan1 = 2; + } + + if (IS_HQOS_MODE && + (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) { + bfib1_tx.vpm = 0; + bfib1_tx.vlan_layer = 1; + entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG); + entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb); + entry->ipv6_5t_route.iblk2.fqos = 1; + } + } + entry->ipv6_5t_route.iblk2.dp = gmac_no; + } + + bfib1_tx.ttl = 1; + bfib1_tx.state = BIND; + wmb(); + memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx)); + + return NF_ACCEPT; +} + +int mtk_sw_nat_hook_rx(struct sk_buff *skb) +{ + if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) { + skb_hnat_magic_tag(skb) = 0; + return NF_ACCEPT; + } + + skb_hnat_alg(skb) = 0; + skb_hnat_filled(skb) = 0; + skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; + + if (skb_hnat_iface(skb) == FOE_MAGIC_WED0) + skb_hnat_sport(skb) = NR_WDMA0_PORT; + else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1) + skb_hnat_sport(skb) = NR_WDMA1_PORT; + + return NF_ACCEPT; +} + +void mtk_ppe_dev_register_hook(struct net_device *dev) +{ + int i, number = 0; + struct extdev_entry *ext_entry; + + for (i = 1; i < MAX_IF_NUM; i++) { + if (hnat_priv->wifi_hook_if[i] == dev) { + pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n", + __func__, dev->name, i); + return; + } + if (!hnat_priv->wifi_hook_if[i]) { + if (find_extif_from_devname(dev->name)) { + extif_set_dev(dev); + goto add_wifi_hook_if; + } + + number = get_ext_device_number(); + if (number >= MAX_EXT_DEVS) { + pr_info("%s : extdev array is full. %s is not registered\n", + __func__, dev->name); + return; + } + + ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL); + if (!ext_entry) + return; + + strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1); + dev_hold(dev); + ext_entry->dev = dev; + ext_if_add(ext_entry); + +add_wifi_hook_if: + dev_hold(dev); + hnat_priv->wifi_hook_if[i] = dev; + + break; + } + } + pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i); +} + +void mtk_ppe_dev_unregister_hook(struct net_device *dev) +{ + int i; + + for (i = 1; i < MAX_IF_NUM; i++) { + if (hnat_priv->wifi_hook_if[i] == dev) { + hnat_priv->wifi_hook_if[i] = NULL; + dev_put(dev); + + break; + } + } + + extif_put_dev(dev); + pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i); +} + +static unsigned int mtk_hnat_accel_type(struct sk_buff *skb) +{ + struct dst_entry *dst; + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + const struct nf_conn_help *help; + + /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow + * is from local_out which is also filtered in sanity check. + */ + dst = skb_dst(skb); + if (dst && dst_xfrm(dst)) + return 0; + + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + return 1; + + /* rcu_read_lock()ed by nf_hook_slow */ + help = nfct_help(ct); + if (help && rcu_dereference(help->helper)) + return 0; + + return 1; +} + +static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry) +{ + struct iphdr *iph; + struct ethhdr *eth; + struct ipv6hdr *ip6h; + bool flag = false; + + eth = eth_hdr(skb); + switch (ntohs(eth->h_proto)) { + case ETH_P_IP: + iph = ip_hdr(skb); + if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos) + flag = true; + break; + case ETH_P_IPV6: + ip6h = ipv6_hdr(skb); + if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) && + (entry->ipv6_5t_route.iblk2.dscp != + (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4)))) + flag = true; + break; + default: + return; + } + + if (flag) { + if (debug_level >= 2) + pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb)); + memset(entry, 0, sizeof(struct foe_entry)); + hnat_cache_ebl(1); + } +} + +static void mtk_hnat_nf_update(struct sk_buff *skb) +{ + struct nf_conn *ct; + struct nf_conn_acct *acct; + struct nf_conn_counter *counter; + enum ip_conntrack_info ctinfo; + struct hnat_accounting diff; + + ct = nf_ct_get(skb, &ctinfo); + if (ct) { + if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff)) + return; + + acct = nf_conn_acct_find(ct); + if (acct) { + counter = acct->counter; + atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets); + atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes); + } + } +} + +static unsigned int mtk_hnat_nf_post_routing( + struct sk_buff *skb, const struct net_device *out, + unsigned int (*fn)(struct sk_buff *, const struct net_device *, + struct flow_offload_hw_path *), + const char *func) +{ + struct foe_entry *entry; + struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out, + .virt_dev = (struct net_device*)out }; + const struct net_device *arp_dev = out; + + if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) || + !IS_SPACE_AVAILABLE_HEAD(skb))) + return 0; + + if (unlikely(!skb_hnat_is_hashed(skb))) + return 0; + + if (out->netdev_ops->ndo_flow_offload_check) { + out->netdev_ops->ndo_flow_offload_check(&hw_path); + out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev; + } + + if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out)) + return 0; + + trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__, + skb_hnat_iface(skb), out->name, skb_hnat_reason(skb)); + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + + switch (skb_hnat_reason(skb)) { + case HIT_UNBIND_RATE_REACH: + if (entry_hnat_is_bound(entry)) + break; + + if (fn && !mtk_hnat_accel_type(skb)) + break; + + if (fn && fn(skb, arp_dev, &hw_path)) + break; + + skb_to_hnat_info(skb, out, entry, &hw_path); + break; + case HIT_BIND_KEEPALIVE_DUP_OLD_HDR: + /* update hnat count to nf_conntrack by keepalive */ + if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en) + mtk_hnat_nf_update(skb); + + if (fn && !mtk_hnat_accel_type(skb)) + break; + + /* update dscp for qos */ + mtk_hnat_dscp_update(skb, entry); + + /* update mcast timestamp*/ + if (hnat_priv->data->version == MTK_HNAT_V3 && + hnat_priv->data->mcast && entry->bfib1.sta == 1) + entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv); + + if (entry_hnat_is_bound(entry)) { + memset(skb_hnat_info(skb), 0, FOE_INFO_LEN); + + return -1; + } + break; + case HIT_BIND_MULTICAST_TO_CPU: + case HIT_BIND_MULTICAST_TO_GMAC_CPU: + /*do not forward to gdma again,if ppe already done it*/ + if (IS_LAN(out) || IS_WAN(out)) + return -1; + break; + } + + return 0; +} + +static unsigned int +mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct foe_entry *entry; + struct ipv6hdr *ip6h; + struct iphdr _iphdr; + const struct iphdr *iph; + struct tcpudphdr _ports; + const struct tcpudphdr *pptr; + int udp = 0; + + if (unlikely(!skb_hnat_is_hashed(skb))) + return NF_ACCEPT; + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) { + ip6h = ipv6_hdr(skb); + if (ip6h->nexthdr == NEXTHDR_IPIP) { + /* Map-E LAN->WAN: need to record orig info before fn. */ + if (mape_toggle) { + iph = skb_header_pointer(skb, IPV6_HDR_LEN, + sizeof(_iphdr), &_iphdr); + if (unlikely(!iph)) + return NF_ACCEPT; + + switch (iph->protocol) { + case IPPROTO_UDP: + udp = 1; + case IPPROTO_TCP: + break; + + default: + return NF_ACCEPT; + } + + pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4, + sizeof(_ports), &_ports); + if (unlikely(!pptr)) + return NF_ACCEPT; + + entry->bfib1.udp = udp; + + /* Map-E LAN->WAN record inner IPv4 header info. */ +#if defined(CONFIG_MEDIATEK_NETSYS_V2) + entry->bfib1.pkt_type = IPV4_MAP_E; + entry->ipv4_dslite.iblk2.dscp = iph->tos; + entry->ipv4_dslite.new_sip = ntohl(iph->saddr); + entry->ipv4_dslite.new_dip = ntohl(iph->daddr); + entry->ipv4_dslite.new_sport = ntohs(pptr->src); + entry->ipv4_dslite.new_dport = ntohs(pptr->dst); +#else + entry->ipv4_hnapt.iblk2.dscp = iph->tos; + entry->ipv4_hnapt.new_sip = ntohl(iph->saddr); + entry->ipv4_hnapt.new_dip = ntohl(iph->daddr); + entry->ipv4_hnapt.new_sport = ntohs(pptr->src); + entry->ipv4_hnapt.new_dport = ntohs(pptr->dst); +#endif + } else { + entry->bfib1.pkt_type = IPV4_DSLITE; + } + } + } + return NF_ACCEPT; +} + +static unsigned int +mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + post_routing_print(skb, state->in, state->out, __func__); + + if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop, + __func__)) + return NF_ACCEPT; + + trace_printk( + "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic, + skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + post_routing_print(skb, state->in, state->out, __func__); + + if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop, + __func__)) + return NF_ACCEPT; + + trace_printk( + "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic, + skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_pong_hqos_handler(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); + + if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) { + skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff; + skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU; + } + + if (skb_hnat_iface(skb) == FOE_MAGIC_EXT) + clr_from_extge(skb); + + /* packets from external devices -> xxx ,step 2, learning stage */ + if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle || + (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) { + if (!do_hnat_ext_to_ge2(skb, __func__)) + return NF_STOLEN; + goto drop; + } + + /* packets form ge -> external device */ + if (do_ge2ext_fast(state->in, skb)) { + if (!do_hnat_ge_to_ext(skb, __func__)) + return NF_STOLEN; + goto drop; + } + + return NF_ACCEPT; +drop: + printk_ratelimited(KERN_WARNING + "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, state->in->name, skb_hnat_iface(skb), + HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb), + skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + post_routing_print(skb, state->in, state->out, __func__); + + if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__)) + return NF_ACCEPT; + + trace_printk( + "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n", + __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic, + skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb), + skb_hnat_alg(skb)); + + return NF_DROP; +} + +static unsigned int +mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct sk_buff *new_skb; + struct foe_entry *entry; + struct iphdr *iph; + + if (!skb_hnat_is_hashed(skb)) + return NF_ACCEPT; + + entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; + + if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) { + new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN); + if (!new_skb) { + dev_info(hnat_priv->dev, "%s:drop\n", __func__); + return NF_DROP; + } + dev_kfree_skb(skb); + skb = new_skb; + } + + /* Make the flow from local not be bound. */ + iph = ip_hdr(skb); + if (iph->protocol == IPPROTO_IPV6) { + entry->udib1.pkt_type = IPV6_6RD; + hnat_set_head_frags(state, skb, 0, hnat_set_alg); + } else { + hnat_set_head_frags(state, skb, 1, hnat_set_alg); + } + + return NF_ACCEPT; +} + +static unsigned int mtk_hnat_br_nf_forward(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + if ((hnat_priv->data->version == MTK_HNAT_V2) && + unlikely(IS_EXT(state->in) && IS_EXT(state->out))) + hnat_set_head_frags(state, skb, 1, hnat_set_alg); + + return NF_ACCEPT; +} + +static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = { + { + .hook = mtk_hnat_ipv4_nf_pre_routing, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_PRE_ROUTING, + .priority = NF_IP_PRI_FIRST + 1, + }, + { + .hook = mtk_hnat_ipv6_nf_pre_routing, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_PRE_ROUTING, + .priority = NF_IP_PRI_FIRST + 1, + }, + { + .hook = mtk_hnat_ipv6_nf_post_routing, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_LAST, + }, + { + .hook = mtk_hnat_ipv6_nf_local_out, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_OUT, + .priority = NF_IP_PRI_LAST, + }, + { + .hook = mtk_hnat_ipv4_nf_post_routing, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_LAST, + }, + { + .hook = mtk_hnat_ipv4_nf_local_out, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_OUT, + .priority = NF_IP_PRI_LAST, + }, + { + .hook = mtk_hnat_br_nf_local_in, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_IN, + .priority = NF_BR_PRI_FIRST, + }, + { + .hook = mtk_hnat_br_nf_local_out, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_OUT, + .priority = NF_BR_PRI_LAST - 1, + }, + { + .hook = mtk_pong_hqos_handler, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_PRE_ROUTING, + .priority = NF_BR_PRI_FIRST + 1, + }, +}; + +int hnat_register_nf_hooks(void) +{ + return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops)); +} + +void hnat_unregister_nf_hooks(void) +{ + nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops)); +} + +int whnat_adjust_nf_hooks(void) +{ + struct nf_hook_ops *hook = mtk_hnat_nf_ops; + unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops); + + while (n-- > 0) { + if (hook[n].hook == mtk_hnat_br_nf_local_in) { + hook[n].hooknum = NF_BR_PRE_ROUTING; + hook[n].priority = NF_BR_PRI_FIRST + 1; + } else if (hook[n].hook == mtk_hnat_br_nf_local_out) { + hook[n].hooknum = NF_BR_POST_ROUTING; + } else if (hook[n].hook == mtk_pong_hqos_handler) { + hook[n].hook = mtk_hnat_br_nf_forward; + hook[n].hooknum = NF_BR_FORWARD; + hook[n].priority = NF_BR_PRI_LAST - 1; + } + } + + return 0; +} + +int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *unused) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); + + skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff; + skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU; + + do_hnat_ge_to_ext(skb, __func__); + + return 0; +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_stag.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_stag.c new file mode 100644 index 000000000..75c3a75c1 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/hnat_stag.c @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2020 MediaTek Inc. + * Author: Landen Chao + */ + +#include +#include +#include "hnat.h" + +u32 hnat_dsa_fill_stag(const struct net_device *netdev, + struct foe_entry *entry, + struct flow_offload_hw_path *hw_path, + u16 eth_proto, + int mape) +{ + const struct net_device *ndev; + const unsigned int *port_reg; + int port_index; + u16 sp_tag; + + if (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) + ndev = hw_path->dev; + else + ndev = netdev; + + port_reg = of_get_property(ndev->dev.of_node, "reg", NULL); + if (unlikely(!port_reg)) + return -EINVAL; + + port_index = be32_to_cpup(port_reg); + sp_tag = BIT(port_index); + + if (!entry->bfib1.vlan_layer) + entry->bfib1.vlan_layer = 1; + else + /* VLAN existence indicator */ + sp_tag |= BIT(8); + entry->bfib1.vpm = 0; + + switch (eth_proto) { + case ETH_P_IP: + if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE + || (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E)) + entry->ipv4_dslite.etype = sp_tag; + else + entry->ipv4_hnapt.etype = sp_tag; + break; + case ETH_P_IPV6: + /* In the case MAPE LAN --> WAN, binding entry is to CPU. + * Do not add special tag. + */ + if (!mape) + /* etype offset of ipv6 entries are the same. */ + entry->ipv6_5t_route.etype = sp_tag; + + break; + default: + pr_info("DSA + HNAT unsupport protocol\n"); + } + + return port_index; +} diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/nf_hnat_mtk.h b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/nf_hnat_mtk.h new file mode 100644 index 000000000..96bbe0636 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_hnat/nf_hnat_mtk.h @@ -0,0 +1,129 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014-2016 Sean Wang + * Copyright (C) 2016-2017 John Crispin + */ + +#ifndef NF_HNAT_MTK_H +#define NF_HNAT_MTK_H + +#include +#include +#include "../mtk_eth_soc.h" + +#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44])) +struct hnat_skb_cb2 { + __u32 magic; +}; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +struct hnat_desc { + u32 entry : 15; + u32 filled : 3; + u32 crsn : 5; + u32 resv1 : 3; + u32 sport : 4; + u32 resv2 : 1; + u32 alg : 1; + u32 iface : 8; + u32 wdmaid : 2; + u32 rxid : 2; + u32 wcid : 10; + u32 bssid : 6; + u32 resv5 : 20; + u32 magic_tag_protect : 16; +} __packed; +#else +struct hnat_desc { + u32 entry : 14; + u32 crsn : 5; + u32 sport : 4; + u32 alg : 1; + u32 iface : 4; + u32 filled : 3; + u32 resv : 1; + u32 magic_tag_protect : 16; + u32 wdmaid : 8; + u32 rxid : 2; + u32 wcid : 8; + u32 bssid : 6; +} __packed; +#endif + +#define HQOS_MAGIC_TAG 0x5678 +#define HAS_HQOS_MAGIC_TAG(skb) (qos_toggle && skb->protocol == HQOS_MAGIC_TAG) + +#define HNAT_MAGIC_TAG 0x6789 +#define HNAT_INFO_FILLED 0x7 +#define WIFI_INFO_LEN 3 +#define FOE_INFO_LEN (10 + WIFI_INFO_LEN) +#define IS_SPACE_AVAILABLE_HEAD(skb) \ + ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0))) + +#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head)) +#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic) +#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn) +#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry) +#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport) +#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg) +#define skb_hnat_iface(skb) (((struct hnat_desc *)(skb->head))->iface) +#define skb_hnat_filled(skb) (((struct hnat_desc *)(skb->head))->filled) +#define skb_hnat_magic_tag(skb) (((struct hnat_desc *)((skb)->head))->magic_tag_protect) +#define skb_hnat_wdma_id(skb) (((struct hnat_desc *)((skb)->head))->wdmaid) +#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid) +#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid) +#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid) +#define skb_hnat_ppe(skb) \ + ((skb_hnat_iface(skb) == FOE_MAGIC_WED1 && CFG_PPE_NUM > 1) ? 1 : 0) +#define do_ext2ge_fast_try(dev, skb) \ + ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb)) +#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688) +#define clr_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x0) +#define set_to_ppe(skb) (HNAT_SKB_CB2(skb)->magic = 0x78681415) +#define is_from_extge(skb) (HNAT_SKB_CB2(skb)->magic == 0x78786688) +#define is_hnat_info_filled(skb) (skb_hnat_filled(skb) == HNAT_INFO_FILLED) +#define is_magic_tag_valid(skb) (skb_hnat_magic_tag(skb) == HNAT_MAGIC_TAG) +#define set_from_mape(skb) (HNAT_SKB_CB2(skb)->magic = 0x78787788) +#define is_from_mape(skb) (HNAT_SKB_CB2(skb)->magic == 0x78787788) +#define is_unreserved_port(hdr) \ + ((ntohs(hdr->source) > 1023) && (ntohs(hdr->dest) > 1023)) + +#define TTL_0 0x02 +#define HAS_OPTION_HEADER 0x03 +#define NO_FLOW_IS_ASSIGNED 0x07 +#define IPV4_WITH_FRAGMENT 0x08 +#define IPV4_HNAPT_DSLITE_WITH_FRAGMENT 0x09 +#define IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP 0x0A +#define IPV6_5T_6RD_WITHOUT_TCP_UDP 0x0B +#define TCP_FIN_SYN_RST \ + 0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */ +#define UN_HIT 0x0D /* FOE Un-hit */ +#define HIT_UNBIND 0x0E /* FOE Hit unbind */ +#define HIT_UNBIND_RATE_REACH 0x0F +#define HIT_BIND_TCP_FIN 0x10 +#define HIT_BIND_TTL_1 0x11 +#define HIT_BIND_WITH_VLAN_VIOLATION 0x12 +#define HIT_BIND_KEEPALIVE_UC_OLD_HDR 0x13 +#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14 +#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15 +#define HIT_BIND_FORCE_TO_CPU 0x16 +#define HIT_BIND_WITH_OPTION_HEADER 0x17 +#define HIT_BIND_MULTICAST_TO_CPU 0x18 +#define HIT_BIND_MULTICAST_TO_GMAC_CPU 0x19 +#define HIT_PRE_BIND 0x1A +#define HIT_BIND_PACKET_SAMPLING 0x1B +#define HIT_BIND_EXCEED_MTU 0x1C + +u32 hnat_tx(struct sk_buff *skb); +u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd); +u32 hnat_reg(struct net_device *, void __iomem *); +u32 hnat_unreg(void); + +#endif diff --git a/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_sgmii.c b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_sgmii.c new file mode 100644 index 000000000..dacdf3cde --- /dev/null +++ b/target/linux/ramips/files/drivers/net/ethernet/mtk/mtk_sgmii.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* A library for MediaTek SGMII circuit + * + * Author: Sean Wang + * + */ + +#include +#include +#include + +#include "mtk_eth_soc.h" + +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) +{ + struct device_node *np; + int i; + + ss->ana_rgc3 = ana_rgc3; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + np = of_parse_phandle(r, "mediatek,sgmiisys", i); + if (!np) + break; + + ss->regmap[i] = syscon_node_to_regmap(np); + if (IS_ERR(ss->regmap[i])) + return PTR_ERR(ss->regmap[i]); + + ss->flags[i] &= ~(MTK_SGMII_PN_SWAP); + if (of_property_read_bool(np, "pn_swap")) + ss->flags[i] |= MTK_SGMII_PN_SWAP; + } + + return 0; +} + +int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, unsigned int id) +{ + unsigned int val; + + if (!ss->regmap[id]) + return -EINVAL; + + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER, + SGMII_LINK_TIMER_DEFAULT); + + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + val |= SGMII_REMOTE_FAULT_DIS; + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); + + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); + + if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP)) + regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL, + SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX); + + /* Release PHYA power down state */ + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0); + + return 0; +} + +int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, unsigned int id, + const struct phylink_link_state *state) +{ + unsigned int val; + + if (!ss->regmap[id]) + return -EINVAL; + + regmap_read(ss->regmap[id], ss->ana_rgc3, &val); + val &= ~RG_PHY_SPEED_MASK; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + val |= RG_PHY_SPEED_3_125G; + regmap_write(ss->regmap[id], ss->ana_rgc3, val); + + /* Disable SGMII AN */ + regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); + val &= ~SGMII_AN_ENABLE; + regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); + + /* SGMII force mode setting */ + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + val &= ~SGMII_IF_MODE_MASK; + + switch (state->speed) { + case SPEED_10: + val |= SGMII_SPEED_10; + break; + case SPEED_100: + val |= SGMII_SPEED_100; + break; + case SPEED_2500: + case SPEED_1000: + val |= SGMII_SPEED_1000; + break; + }; + + if (state->duplex == DUPLEX_FULL) + val |= SGMII_DUPLEX_FULL; + + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); + + if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP)) + regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL, + SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX); + + /* Release PHYA power down state */ + regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0); + + return 0; +} + +void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) +{ + struct mtk_sgmii *ss = eth->sgmii; + unsigned int val, sid; + + /* Decide how GMAC and SGMIISYS be mapped */ + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac_id; + + if (!ss->regmap[sid]) + return; + + regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val); +} diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Kconfig b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Kconfig new file mode 100644 index 000000000..4a6b200e5 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Kconfig @@ -0,0 +1,2 @@ +config MT753X_GSW + tristate "Driver for the MediaTek MT753x switch" diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Makefile b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Makefile new file mode 100644 index 000000000..e304fcb41 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for MediaTek MT753x gigabit switch +# + +obj-$(CONFIG_MT753X_GSW) += mt753x.o + +mt753x-$(CONFIG_SWCONFIG) += mt753x_swconfig.o + +mt753x-y += mt753x_mdio.o mt7530.o mt7531.o \ + mt753x_common.o mt753x_vlan.o mt753x_nl.o + diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.c new file mode 100644 index 000000000..7853e2799 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#include +#include + +#include "mt753x.h" +#include "mt753x_regs.h" + +/* MT7530 registers */ + +/* Unique fields of PMCR for MT7530 */ +#define FORCE_MODE BIT(15) + +/* Unique fields of GMACCR for MT7530 */ +#define VLAN_SUPT_NO_S 14 +#define VLAN_SUPT_NO_M 0x1c000 +#define LATE_COL_DROP BIT(13) + +/* Unique fields of (M)HWSTRAP for MT7530 */ +#define BOND_OPTION BIT(24) +#define P5_PHY0_SEL BIT(20) +#define CHG_TRAP BIT(16) +#define LOOPDET_DIS BIT(14) +#define P5_INTF_SEL_GMAC5 BIT(13) +#define SMI_ADDR_S 11 +#define SMI_ADDR_M 0x1800 +#define XTAL_FSEL_S 9 +#define XTAL_FSEL_M 0x600 +#define P6_INTF_DIS BIT(8) +#define P5_INTF_MODE_RGMII BIT(7) +#define P5_INTF_DIS_S BIT(6) +#define C_MDIO_BPS_S BIT(5) +#define EEPROM_EN_S BIT(4) + +/* PHY EEE Register bitmap of define */ +#define PHY_DEV07 0x07 +#define PHY_DEV07_REG_03C 0x3c + +/* PHY Extend Register 0x14 bitmap of define */ +#define PHY_EXT_REG_14 0x14 + +/* Fields of PHY_EXT_REG_14 */ +#define PHY_EN_DOWN_SHFIT BIT(4) + +/* PHY Token Ring Register 0x10 bitmap of define */ +#define PHY_TR_REG_10 0x10 + +/* PHY Token Ring Register 0x12 bitmap of define */ +#define PHY_TR_REG_12 0x12 + +/* PHY LPI PCS/DSP Control Register bitmap of define */ +#define PHY_LPI_REG_11 0x11 + +/* PHY DEV 0x1e Register bitmap of define */ +#define PHY_DEV1E 0x1e +#define PHY_DEV1E_REG_123 0x123 +#define PHY_DEV1E_REG_A6 0xa6 + +/* Values of XTAL_FSEL */ +#define XTAL_20MHZ 1 +#define XTAL_40MHZ 2 +#define XTAL_25MHZ 3 + +/* Top single control CR define */ +#define TOP_SIG_CTRL 0x7808 + +/* TOP_SIG_CTRL Register bitmap of define */ +#define OUTPUT_INTR_S 16 +#define OUTPUT_INTR_M 0x30000 + +#define P6ECR 0x7830 +#define P6_INTF_MODE_TRGMII BIT(0) + +#define TRGMII_TXCTRL 0x7a40 +#define TRAIN_TXEN BIT(31) +#define TXC_INV BIT(30) +#define TX_DOEO BIT(29) +#define TX_RST BIT(28) + +#define TRGMII_TD0_CTRL 0x7a50 +#define TRGMII_TD1_CTRL 0x7a58 +#define TRGMII_TD2_CTRL 0x7a60 +#define TRGMII_TD3_CTRL 0x7a68 +#define TRGMII_TXCTL_CTRL 0x7a70 +#define TRGMII_TCK_CTRL 0x7a78 +#define TRGMII_TD_CTRL(n) (0x7a50 + (n) * 8) +#define NUM_TRGMII_CTRL 6 +#define TX_DMPEDRV BIT(31) +#define TX_DM_SR BIT(15) +#define TX_DMERODT BIT(14) +#define TX_DMOECTL BIT(13) +#define TX_TAP_S 8 +#define TX_TAP_M 0xf00 +#define TX_TRAIN_WD_S 0 +#define TX_TRAIN_WD_M 0xff + +#define TRGMII_TD0_ODT 0x7a54 +#define TRGMII_TD1_ODT 0x7a5c +#define TRGMII_TD2_ODT 0x7a64 +#define TRGMII_TD3_ODT 0x7a6c +#define TRGMII_TXCTL_ODT 0x7574 +#define TRGMII_TCK_ODT 0x757c +#define TRGMII_TD_ODT(n) (0x7a54 + (n) * 8) +#define NUM_TRGMII_ODT 6 +#define TX_DM_DRVN_PRE_S 30 +#define TX_DM_DRVN_PRE_M 0xc0000000 +#define TX_DM_DRVP_PRE_S 28 +#define TX_DM_DRVP_PRE_M 0x30000000 +#define TX_DM_TDSEL_S 24 +#define TX_DM_TDSEL_M 0xf000000 +#define TX_ODTEN BIT(23) +#define TX_DME_PRE BIT(20) +#define TX_DM_DRVNT0 BIT(19) +#define TX_DM_DRVPT0 BIT(18) +#define TX_DM_DRVNTE BIT(17) +#define TX_DM_DRVPTE BIT(16) +#define TX_DM_ODTN_S 12 +#define TX_DM_ODTN_M 0x7000 +#define TX_DM_ODTP_S 8 +#define TX_DM_ODTP_M 0x700 +#define TX_DM_DRVN_S 4 +#define TX_DM_DRVN_M 0xf0 +#define TX_DM_DRVP_S 0 +#define TX_DM_DRVP_M 0x0f + +#define P5RGMIIRXCR 0x7b00 +#define CSR_RGMII_RCTL_CFG_S 24 +#define CSR_RGMII_RCTL_CFG_M 0x7000000 +#define CSR_RGMII_RXD_CFG_S 16 +#define CSR_RGMII_RXD_CFG_M 0x70000 +#define CSR_RGMII_EDGE_ALIGN BIT(8) +#define CSR_RGMII_RXC_90DEG_CFG_S 4 +#define CSR_RGMII_RXC_90DEG_CFG_M 0xf0 +#define CSR_RGMII_RXC_0DEG_CFG_S 0 +#define CSR_RGMII_RXC_0DEG_CFG_M 0x0f + +#define P5RGMIITXCR 0x7b04 +#define CSR_RGMII_TXEN_CFG_S 16 +#define CSR_RGMII_TXEN_CFG_M 0x70000 +#define CSR_RGMII_TXD_CFG_S 8 +#define CSR_RGMII_TXD_CFG_M 0x700 +#define CSR_RGMII_TXC_CFG_S 0 +#define CSR_RGMII_TXC_CFG_M 0x1f + +#define CHIP_REV 0x7ffc +#define CHIP_NAME_S 16 +#define CHIP_NAME_M 0xffff0000 +#define CHIP_REV_S 0 +#define CHIP_REV_M 0x0f + +/* MMD registers */ +#define CORE_PLL_GROUP2 0x401 +#define RG_SYSPLL_EN_NORMAL BIT(15) +#define RG_SYSPLL_VODEN BIT(14) +#define RG_SYSPLL_POSDIV_S 5 +#define RG_SYSPLL_POSDIV_M 0x60 + +#define CORE_PLL_GROUP4 0x403 +#define RG_SYSPLL_DDSFBK_EN BIT(12) +#define RG_SYSPLL_BIAS_EN BIT(11) +#define RG_SYSPLL_BIAS_LPF_EN BIT(10) + +#define CORE_PLL_GROUP5 0x404 +#define RG_LCDDS_PCW_NCPO1_S 0 +#define RG_LCDDS_PCW_NCPO1_M 0xffff + +#define CORE_PLL_GROUP6 0x405 +#define RG_LCDDS_PCW_NCPO0_S 0 +#define RG_LCDDS_PCW_NCPO0_M 0xffff + +#define CORE_PLL_GROUP7 0x406 +#define RG_LCDDS_PWDB BIT(15) +#define RG_LCDDS_ISO_EN BIT(13) +#define RG_LCCDS_C_S 4 +#define RG_LCCDS_C_M 0x70 +#define RG_LCDDS_PCW_NCPO_CHG BIT(3) + +#define CORE_PLL_GROUP10 0x409 +#define RG_LCDDS_SSC_DELTA_S 0 +#define RG_LCDDS_SSC_DELTA_M 0xfff + +#define CORE_PLL_GROUP11 0x40a +#define RG_LCDDS_SSC_DELTA1_S 0 +#define RG_LCDDS_SSC_DELTA1_M 0xfff + +#define CORE_GSWPLL_GCR_1 0x040d +#define GSWPLL_PREDIV_S 14 +#define GSWPLL_PREDIV_M 0xc000 +#define GSWPLL_POSTDIV_200M_S 12 +#define GSWPLL_POSTDIV_200M_M 0x3000 +#define GSWPLL_EN_PRE BIT(11) +#define GSWPLL_FBKSEL BIT(10) +#define GSWPLL_BP BIT(9) +#define GSWPLL_BR BIT(8) +#define GSWPLL_FBKDIV_200M_S 0 +#define GSWPLL_FBKDIV_200M_M 0xff + +#define CORE_GSWPLL_GCR_2 0x040e +#define GSWPLL_POSTDIV_500M_S 8 +#define GSWPLL_POSTDIV_500M_M 0x300 +#define GSWPLL_FBKDIV_500M_S 0 +#define GSWPLL_FBKDIV_500M_M 0xff + +#define TRGMII_GSW_CLK_CG 0x0410 +#define TRGMIICK_EN BIT(1) +#define GSWCK_EN BIT(0) + +static int mt7530_mii_read(struct gsw_mt753x *gsw, int phy, int reg) +{ + if (phy < MT753X_NUM_PHYS) + phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK; + + return mdiobus_read(gsw->host_bus, phy, reg); +} + +static void mt7530_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val) +{ + if (phy < MT753X_NUM_PHYS) + phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK; + + mdiobus_write(gsw->host_bus, phy, reg, val); +} + +static int mt7530_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg) +{ + u16 val; + + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->host_bus->mdio_lock); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG, + (MMD_ADDR << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M)); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG, + (MMD_DATA << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M)); + + val = gsw->host_bus->read(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG); + + mutex_unlock(&gsw->host_bus->mdio_lock); + + return val; +} + +static void mt7530_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, + u16 reg, u16 val) +{ + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->host_bus->mdio_lock); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG, + (MMD_ADDR << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M)); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG, + (MMD_DATA << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M)); + + gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, val); + + mutex_unlock(&gsw->host_bus->mdio_lock); +} + +static void mt7530_core_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val) +{ + gsw->mmd_write(gsw, 0, 0x1f, reg, val); +} + +static void mt7530_trgmii_setting(struct gsw_mt753x *gsw) +{ + u16 i; + + mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0780); + mdelay(1); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87); + mdelay(1); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87); + + /* PLL BIAS enable */ + mt7530_core_reg_write(gsw, CORE_PLL_GROUP4, + RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN); + mdelay(1); + + /* PLL LPF enable */ + mt7530_core_reg_write(gsw, CORE_PLL_GROUP4, + RG_SYSPLL_DDSFBK_EN | + RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN); + + /* sys PLL enable */ + mt7530_core_reg_write(gsw, CORE_PLL_GROUP2, + RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | + (1 << RG_SYSPLL_POSDIV_S)); + + /* LCDDDS PWDS */ + mt7530_core_reg_write(gsw, CORE_PLL_GROUP7, + (3 << RG_LCCDS_C_S) | + RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); + mdelay(1); + + /* Enable MT7530 TRGMII clock */ + mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN | TRGMIICK_EN); + + /* lower Tx Driving */ + for (i = 0 ; i < NUM_TRGMII_ODT; i++) + mt753x_reg_write(gsw, TRGMII_TD_ODT(i), + (4 << TX_DM_DRVP_S) | (4 << TX_DM_DRVN_S)); +} + +static void mt7530_rgmii_setting(struct gsw_mt753x *gsw) +{ + u32 val; + + mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0c80); + mdelay(1); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87); + mdelay(1); + mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87); + + val = mt753x_reg_read(gsw, TRGMII_TXCTRL); + val &= ~TXC_INV; + mt753x_reg_write(gsw, TRGMII_TXCTRL, val); + + mt753x_reg_write(gsw, TRGMII_TCK_CTRL, + (8 << TX_TAP_S) | (0x55 << TX_TRAIN_WD_S)); +} + +static int mt7530_mac_port_setup(struct gsw_mt753x *gsw) +{ + u32 hwstrap, p6ecr = 0, p5mcr, p6mcr, phyad; + + hwstrap = mt753x_reg_read(gsw, MHWSTRAP); + hwstrap &= ~(P6_INTF_DIS | P5_INTF_MODE_RGMII | P5_INTF_DIS_S); + hwstrap |= P5_INTF_SEL_GMAC5; + if (!gsw->port5_cfg.enabled) { + p5mcr = FORCE_MODE; + hwstrap |= P5_INTF_DIS_S; + } else { + p5mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) | + MAC_MODE | MAC_TX_EN | MAC_RX_EN | + BKOFF_EN | BACKPR_EN; + + if (gsw->port5_cfg.force_link) { + p5mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC | + FORCE_TX_FC; + p5mcr |= gsw->port5_cfg.speed << FORCE_SPD_S; + + if (gsw->port5_cfg.duplex) + p5mcr |= FORCE_DPX; + } + + switch (gsw->port5_cfg.phy_mode) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + break; + case PHY_INTERFACE_MODE_RGMII: + hwstrap |= P5_INTF_MODE_RGMII; + break; + default: + dev_info(gsw->dev, "%s is not supported by port5\n", + phy_modes(gsw->port5_cfg.phy_mode)); + p5mcr = FORCE_MODE; + hwstrap |= P5_INTF_DIS_S; + } + + /* Port5 to PHY direct mode */ + if (of_property_read_u32(gsw->port5_cfg.np, "phy-address", + &phyad)) + goto parse_p6; + + if (phyad != 0 && phyad != 4) { + dev_info(gsw->dev, + "Only PHY 0/4 can be connected to Port 5\n"); + goto parse_p6; + } + + hwstrap &= ~P5_INTF_SEL_GMAC5; + if (phyad == 0) + hwstrap |= P5_PHY0_SEL; + else + hwstrap &= ~P5_PHY0_SEL; + } + +parse_p6: + if (!gsw->port6_cfg.enabled) { + p6mcr = FORCE_MODE; + hwstrap |= P6_INTF_DIS; + } else { + p6mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) | + MAC_MODE | MAC_TX_EN | MAC_RX_EN | + BKOFF_EN | BACKPR_EN; + + if (gsw->port6_cfg.force_link) { + p6mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC | + FORCE_TX_FC; + p6mcr |= gsw->port6_cfg.speed << FORCE_SPD_S; + + if (gsw->port6_cfg.duplex) + p6mcr |= FORCE_DPX; + } + + switch (gsw->port6_cfg.phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + p6ecr = BIT(1); + break; + case PHY_INTERFACE_MODE_TRGMII: + /* set MT7530 central align */ + p6ecr = BIT(0); + break; + default: + dev_info(gsw->dev, "%s is not supported by port6\n", + phy_modes(gsw->port6_cfg.phy_mode)); + p6mcr = FORCE_MODE; + hwstrap |= P6_INTF_DIS; + } + } + + mt753x_reg_write(gsw, MHWSTRAP, hwstrap); + mt753x_reg_write(gsw, P6ECR, p6ecr); + + mt753x_reg_write(gsw, PMCR(5), p5mcr); + mt753x_reg_write(gsw, PMCR(6), p6mcr); + + return 0; +} + +static void mt7530_core_pll_setup(struct gsw_mt753x *gsw) +{ + u32 hwstrap; + + hwstrap = mt753x_reg_read(gsw, HWSTRAP); + + switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) { + case XTAL_40MHZ: + /* Disable MT7530 core clock */ + mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, 0); + + /* disable MT7530 PLL */ + mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1, + (2 << GSWPLL_POSTDIV_200M_S) | + (32 << GSWPLL_FBKDIV_200M_S)); + + /* For MT7530 core clock = 500Mhz */ + mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_2, + (1 << GSWPLL_POSTDIV_500M_S) | + (25 << GSWPLL_FBKDIV_500M_S)); + + /* Enable MT7530 PLL */ + mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1, + (2 << GSWPLL_POSTDIV_200M_S) | + (32 << GSWPLL_FBKDIV_200M_S) | + GSWPLL_EN_PRE); + + usleep_range(20, 40); + + /* Enable MT7530 core clock */ + mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN); + break; + default: + /* TODO: PLL settings for 20/25MHz */ + break; + } + + hwstrap = mt753x_reg_read(gsw, HWSTRAP); + hwstrap |= CHG_TRAP; + if (gsw->direct_phy_access) + hwstrap &= ~C_MDIO_BPS_S; + else + hwstrap |= C_MDIO_BPS_S; + + mt753x_reg_write(gsw, MHWSTRAP, hwstrap); + + if (gsw->port6_cfg.enabled && + gsw->port6_cfg.phy_mode == PHY_INTERFACE_MODE_TRGMII) { + mt7530_trgmii_setting(gsw); + } else { + /* RGMII */ + mt7530_rgmii_setting(gsw); + } + + /* delay setting for 10/1000M */ + mt753x_reg_write(gsw, P5RGMIIRXCR, + CSR_RGMII_EDGE_ALIGN | + (2 << CSR_RGMII_RXC_0DEG_CFG_S)); + mt753x_reg_write(gsw, P5RGMIITXCR, 0x14 << CSR_RGMII_TXC_CFG_S); +} + +static int mt7530_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev) +{ + u32 rev; + + rev = mt753x_reg_read(gsw, CHIP_REV); + + if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7530) { + if (crev) { + crev->rev = rev & CHIP_REV_M; + crev->name = "MT7530"; + } + + return 0; + } + + return -ENODEV; +} + +static void mt7530_phy_setting(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + /* Disable EEE */ + gsw->mmd_write(gsw, i, PHY_DEV07, PHY_DEV07_REG_03C, 0); + + /* Enable HW auto downshift */ + gsw->mii_write(gsw, i, 0x1f, 0x1); + val = gsw->mii_read(gsw, i, PHY_EXT_REG_14); + val |= PHY_EN_DOWN_SHFIT; + gsw->mii_write(gsw, i, PHY_EXT_REG_14, val); + + /* Increase SlvDPSready time */ + gsw->mii_write(gsw, i, 0x1f, 0x52b5); + gsw->mii_write(gsw, i, PHY_TR_REG_10, 0xafae); + gsw->mii_write(gsw, i, PHY_TR_REG_12, 0x2f); + gsw->mii_write(gsw, i, PHY_TR_REG_10, 0x8fae); + + /* Increase post_update_timer */ + gsw->mii_write(gsw, i, 0x1f, 0x3); + gsw->mii_write(gsw, i, PHY_LPI_REG_11, 0x4b); + gsw->mii_write(gsw, i, 0x1f, 0); + + /* Adjust 100_mse_threshold */ + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff); + + /* Disable mcc */ + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300); + } +} + +static inline bool get_phy_access_mode(const struct device_node *np) +{ + return of_property_read_bool(np, "mt7530,direct-phy-access"); +} + +static int mt7530_sw_init(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + gsw->direct_phy_access = get_phy_access_mode(gsw->dev->of_node); + + /* Force MT7530 to use (in)direct PHY access */ + val = mt753x_reg_read(gsw, HWSTRAP); + val |= CHG_TRAP; + if (gsw->direct_phy_access) + val &= ~C_MDIO_BPS_S; + else + val |= C_MDIO_BPS_S; + mt753x_reg_write(gsw, MHWSTRAP, val); + + /* Read PHY address base from HWSTRAP */ + gsw->phy_base = (((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3) + 8; + gsw->phy_base &= MT753X_SMI_ADDR_MASK; + + if (gsw->direct_phy_access) { + gsw->mii_read = mt7530_mii_read; + gsw->mii_write = mt7530_mii_write; + gsw->mmd_read = mt7530_mmd_read; + gsw->mmd_write = mt7530_mmd_write; + } else { + gsw->mii_read = mt753x_mii_read; + gsw->mii_write = mt753x_mii_write; + gsw->mmd_read = mt753x_mmd_ind_read; + gsw->mmd_write = mt753x_mmd_ind_write; + } + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + val = gsw->mii_read(gsw, i, MII_BMCR); + val |= BMCR_PDOWN; + gsw->mii_write(gsw, i, MII_BMCR, val); + } + + /* Force MAC link down before reset */ + mt753x_reg_write(gsw, PMCR(5), FORCE_MODE); + mt753x_reg_write(gsw, PMCR(6), FORCE_MODE); + + /* Switch soft reset */ + /* BUG: sw reset causes gsw int flooding */ + mt753x_reg_write(gsw, SYS_CTRL, SW_PHY_RST | SW_SYS_RST | SW_REG_RST); + usleep_range(10, 20); + + /* global mac control settings configuration */ + mt753x_reg_write(gsw, GMACCR, + LATE_COL_DROP | (15 << MTCC_LMT_S) | + (2 << MAX_RX_JUMBO_S) | RX_PKT_LEN_MAX_JUMBO); + + /* Output INTR selected */ + val = mt753x_reg_read(gsw, TOP_SIG_CTRL); + val &= ~OUTPUT_INTR_M; + val |= (3 << OUTPUT_INTR_S); + mt753x_reg_write(gsw, TOP_SIG_CTRL, val); + + mt7530_core_pll_setup(gsw); + mt7530_mac_port_setup(gsw); + + return 0; +} + +static int mt7530_sw_post_init(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + mt7530_phy_setting(gsw); + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + val = gsw->mii_read(gsw, i, MII_BMCR); + val &= ~BMCR_PDOWN; + gsw->mii_write(gsw, i, MII_BMCR, val); + } + + return 0; +} + +struct mt753x_sw_id mt7530_id = { + .model = MT7530, + .detect = mt7530_sw_detect, + .init = mt7530_sw_init, + .post_init = mt7530_sw_post_init +}; diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.h new file mode 100644 index 000000000..40243d4e5 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7530.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018 MediaTek Inc. + */ + +#ifndef _MT7530_H_ +#define _MT7530_H_ + +#include "mt753x.h" + +extern struct mt753x_sw_id mt7530_id; + +#endif /* _MT7530_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.c new file mode 100644 index 000000000..725304299 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Zhanguo Ju + */ + +#include +#include +#include + +#include "mt753x.h" +#include "mt753x_regs.h" + +/* MT7531 registers */ +#define SGMII_REG_BASE 0x5000 +#define SGMII_REG_PORT_BASE 0x1000 +#define SGMII_REG(p, r) (SGMII_REG_BASE + \ + (p) * SGMII_REG_PORT_BASE + (r)) +#define PCS_CONTROL_1(p) SGMII_REG(p, 0x00) +#define SGMII_MODE(p) SGMII_REG(p, 0x20) +#define QPHY_PWR_STATE_CTRL(p) SGMII_REG(p, 0xe8) +#define ANA_CKBG(p) SGMII_REG(p, 0x100) +#define ANA_DA_FORCE_MODE1(p) SGMII_REG(p, 0x110) +#define PHYA_CTRL_SIGNAL3(p) SGMII_REG(p, 0x128) +#define PHYA_ANA_SYSPLL(p) SGMII_REG(p, 0x158) + +/* Fields of PCS_CONTROL_1 */ +#define SGMII_LINK_STATUS BIT(18) +#define SGMII_AN_ENABLE BIT(12) +#define SGMII_AN_RESTART BIT(9) + +/* Fields of SGMII_MODE */ +#define SGMII_REMOTE_FAULT_DIS BIT(8) +#define SGMII_IF_MODE_FORCE_DUPLEX BIT(4) +#define SGMII_IF_MODE_FORCE_SPEED_S 0x2 +#define SGMII_IF_MODE_FORCE_SPEED_M 0x0c +#define SGMII_IF_MODE_ADVERT_AN BIT(1) + +/* Values of SGMII_IF_MODE_FORCE_SPEED */ +#define SGMII_IF_MODE_FORCE_SPEED_10 0 +#define SGMII_IF_MODE_FORCE_SPEED_100 1 +#define SGMII_IF_MODE_FORCE_SPEED_1000 2 + +/* Fields of QPHY_PWR_STATE_CTRL */ +#define PHYA_PWD BIT(4) + +/* Fields of ANA_CKBG */ +#define SSUSB_PLL_SSC_EN BIT(21) + +/* Fields of ANA_DA_FORCE_MODE1 */ +#define FORCE_PLL_SSC_EN BIT(30) + +/* Fields of PHYA_CTRL_SIGNAL3 */ +#define RG_TPHY_SPEED_S 2 +#define RG_TPHY_SPEED_M 0x0c + +/* Values of RG_TPHY_SPEED */ +#define RG_TPHY_SPEED_1000 0 +#define RG_TPHY_SPEED_2500 1 + +/* Fields of PHYA_ANA_SYSPLL */ +#define RG_VUSB10_ON BIT(29) + +/* Unique fields of (M)HWSTRAP for MT7531 */ +#define XTAL_FSEL_S 7 +#define XTAL_FSEL_M BIT(7) +#define PHY_EN BIT(6) +#define CHG_STRAP BIT(8) + +/* Efuse Register Define */ +#define GBE_EFUSE 0x7bc8 +#define GBE_SEL_EFUSE_EN BIT(0) + +/* PHY ENABLE Register bitmap define */ +#define PHY_DEV1F 0x1f +#define PHY_DEV1F_REG_44 0x44 +#define PHY_DEV1F_REG_104 0x104 +#define PHY_DEV1F_REG_10A 0x10a +#define PHY_DEV1F_REG_10B 0x10b +#define PHY_DEV1F_REG_10C 0x10c +#define PHY_DEV1F_REG_10D 0x10d +#define PHY_DEV1F_REG_268 0x268 +#define PHY_DEV1F_REG_269 0x269 +#define PHY_DEV1F_REG_26A 0x26A +#define PHY_DEV1F_REG_403 0x403 + +/* Fields of PHY_DEV1F_REG_403 */ +#define GBE_EFUSE_SETTING BIT(3) +#define PHY_EN_BYPASS_MODE BIT(4) +#define POWER_ON_OFF BIT(5) +#define PHY_PLL_M GENMASK(9, 8) +#define PHY_PLL_SEL(x) (((x) << 8) & GENMASK(9, 8)) + +/* PHY EEE Register bitmap of define */ +#define PHY_DEV07 0x07 +#define PHY_DEV07_REG_03C 0x3c + +/* PHY Extend Register 0x14 bitmap of define */ +#define PHY_EXT_REG_14 0x14 + +/* Fields of PHY_EXT_REG_14 */ +#define PHY_EN_DOWN_SHFIT BIT(4) + +/* PHY Extend Register 0x17 bitmap of define */ +#define PHY_EXT_REG_17 0x17 + +/* Fields of PHY_EXT_REG_17 */ +#define PHY_LINKDOWN_POWER_SAVING_EN BIT(4) + +/* PHY PMA Register 0x17 bitmap of define */ +#define SLV_DSP_READY_TIME_S 15 +#define SLV_DSP_READY_TIME_M (0xff << SLV_DSP_READY_TIME_S) + +/* PHY PMA Register 0x18 bitmap of define */ +#define ENABLE_RANDOM_UPDATE_TRIGGER BIT(8) + +/* PHY DEV 0x1e Register bitmap of define */ +#define PHY_DEV1E 0x1e +#define PHY_TX_MLT3_BASE 0x0 +#define PHY_DEV1E_REG_13 0x13 +#define PHY_DEV1E_REG_14 0x14 +#define PHY_DEV1E_REG_41 0x41 +#define PHY_DEV1E_REG_A6 0xa6 +#define PHY_DEV1E_REG_0C6 0x0c6 +#define PHY_DEV1E_REG_0FE 0x0fe +#define PHY_DEV1E_REG_123 0x123 +#define PHY_DEV1E_REG_141 0x141 +#define PHY_DEV1E_REG_189 0x189 +#define PHY_DEV1E_REG_234 0x234 + +/* Fields of PHY_DEV1E_REG_0C6 */ +#define PHY_POWER_SAVING_S 8 +#define PHY_POWER_SAVING_M 0x300 +#define PHY_POWER_SAVING_TX 0x0 + +/* Fields of PHY_DEV1E_REG_189 */ +#define DESCRAMBLER_CLEAR_EN 0x1 + +/* Fields of PHY_DEV1E_REG_234 */ +#define TR_OPEN_LOOP_EN BIT(0) + +/* Port debug count register */ +#define DBG_CNT_BASE 0x3018 +#define DBG_CNT_PORT_BASE 0x100 +#define DBG_CNT(p) (DBG_CNT_BASE + \ + (p) * DBG_CNT_PORT_BASE) +#define DIS_CLR BIT(31) + +/* Values of XTAL_FSEL_S */ +#define XTAL_40MHZ 0 +#define XTAL_25MHZ 1 + +#define PLLGP_EN 0x7820 +#define EN_COREPLL BIT(2) +#define SW_CLKSW BIT(1) +#define SW_PLLGP BIT(0) + +#define PLLGP_CR0 0x78a8 +#define RG_COREPLL_EN BIT(22) +#define RG_COREPLL_POSDIV_S 23 +#define RG_COREPLL_POSDIV_M 0x3800000 +#define RG_COREPLL_SDM_PCW_S 1 +#define RG_COREPLL_SDM_PCW_M 0x3ffffe +#define RG_COREPLL_SDM_PCW_CHG BIT(0) + +/* TOP Signals Status Register */ +#define TOP_SIG_SR 0x780c +#define PAD_MCM_SMI_EN BIT(0) +#define PAD_DUAL_SGMII_EN BIT(1) + +/* RGMII and SGMII PLL clock */ +#define ANA_PLLGP_CR2 0x78b0 +#define ANA_PLLGP_CR5 0x78bc + +/* GPIO mode define */ +#define GPIO_MODE_REGS(x) (0x7c0c + (((x) / 8) * 4)) +#define GPIO_MODE_S 4 + +/* GPIO GROUP IOLB SMT0 Control */ +#define SMT0_IOLB 0x7f04 +#define SMT_IOLB_5_SMI_MDC_EN BIT(5) + +/* Unique fields of PMCR for MT7531 */ +#define FORCE_MODE_EEE1G BIT(25) +#define FORCE_MODE_EEE100 BIT(26) +#define FORCE_MODE_TX_FC BIT(27) +#define FORCE_MODE_RX_FC BIT(28) +#define FORCE_MODE_DPX BIT(29) +#define FORCE_MODE_SPD BIT(30) +#define FORCE_MODE_LNK BIT(31) +#define FORCE_MODE BIT(15) + +#define CHIP_REV 0x781C +#define CHIP_NAME_S 16 +#define CHIP_NAME_M 0xffff0000 +#define CHIP_REV_S 0 +#define CHIP_REV_M 0x0f +#define CHIP_REV_E1 0x0 + +#define CLKGEN_CTRL 0x7500 +#define CLK_SKEW_OUT_S 8 +#define CLK_SKEW_OUT_M 0x300 +#define CLK_SKEW_IN_S 6 +#define CLK_SKEW_IN_M 0xc0 +#define RXCLK_NO_DELAY BIT(5) +#define TXCLK_NO_REVERSE BIT(4) +#define GP_MODE_S 1 +#define GP_MODE_M 0x06 +#define GP_CLK_EN BIT(0) + +#define CPGC_CTRL 0xB0 +#define COL_EN BIT(0) +#define COL_CLK_EN BIT(1) +#define COL_RST_N BIT(2) +#define COL_BUSY BIT(3) + +/* Values of GP_MODE */ +#define GP_MODE_RGMII 0 +#define GP_MODE_MII 1 +#define GP_MODE_REV_MII 2 + +/* Values of CLK_SKEW_IN */ +#define CLK_SKEW_IN_NO_CHANGE 0 +#define CLK_SKEW_IN_DELAY_100PPS 1 +#define CLK_SKEW_IN_DELAY_200PPS 2 +#define CLK_SKEW_IN_REVERSE 3 + +/* Values of CLK_SKEW_OUT */ +#define CLK_SKEW_OUT_NO_CHANGE 0 +#define CLK_SKEW_OUT_DELAY_100PPS 1 +#define CLK_SKEW_OUT_DELAY_200PPS 2 +#define CLK_SKEW_OUT_REVERSE 3 + +/* Proprietory Control Register of Internal Phy device 0x1e */ +#define RXADC_CONTROL_3 0xc2 +#define RXADC_LDO_CONTROL_2 0xd3 + +/* Proprietory Control Register of Internal Phy device 0x1f */ +#define TXVLD_DA_271 0x271 +#define TXVLD_DA_272 0x272 +#define TXVLD_DA_273 0x273 + +/* gpio pinmux pins and functions define */ +static int gpio_int_pins[] = {0}; +static int gpio_int_funcs[] = {1}; +static int gpio_mdc_pins[] = {11, 20}; +static int gpio_mdc_funcs[] = {2, 2}; +static int gpio_mdio_pins[] = {12, 21}; +static int gpio_mdio_funcs[] = {2, 2}; + +static int mt7531_set_port_sgmii_force_mode(struct gsw_mt753x *gsw, u32 port, + struct mt753x_port_cfg *port_cfg) +{ + u32 speed, port_base, val; + ktime_t timeout; + u32 timeout_us; + + if (port < 5 || port >= MT753X_NUM_PORTS) { + dev_info(gsw->dev, "port %d is not a SGMII port\n", port); + return -EINVAL; + } + + port_base = port - 5; + + switch (port_cfg->speed) { + case MAC_SPD_1000: + speed = RG_TPHY_SPEED_1000; + break; + case MAC_SPD_2500: + speed = RG_TPHY_SPEED_2500; + break; + default: + dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n", + port_cfg->speed, port); + + speed = RG_TPHY_SPEED_1000; + } + + /* Step 1: Speed select register setting */ + val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base)); + val &= ~RG_TPHY_SPEED_M; + val |= speed << RG_TPHY_SPEED_S; + mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val); + + /* Step 2 : Disable AN */ + val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base)); + val &= ~SGMII_AN_ENABLE; + mt753x_reg_write(gsw, PCS_CONTROL_1(port_base), val); + + /* Step 3: SGMII force mode setting */ + val = mt753x_reg_read(gsw, SGMII_MODE(port_base)); + val &= ~SGMII_IF_MODE_ADVERT_AN; + val &= ~SGMII_IF_MODE_FORCE_SPEED_M; + val |= SGMII_IF_MODE_FORCE_SPEED_1000 << SGMII_IF_MODE_FORCE_SPEED_S; + val |= SGMII_IF_MODE_FORCE_DUPLEX; + /* For sgmii force mode, 0 is full duplex and 1 is half duplex */ + if (port_cfg->duplex) + val &= ~SGMII_IF_MODE_FORCE_DUPLEX; + + mt753x_reg_write(gsw, SGMII_MODE(port_base), val); + + /* Step 4: XXX: Disable Link partner's AN and set force mode */ + + /* Step 5: XXX: Special setting for PHYA ==> reserved for flexible */ + + /* Step 6 : Release PHYA power down state */ + val = mt753x_reg_read(gsw, QPHY_PWR_STATE_CTRL(port_base)); + val &= ~PHYA_PWD; + mt753x_reg_write(gsw, QPHY_PWR_STATE_CTRL(port_base), val); + + /* Step 7 : Polling SGMII_LINK_STATUS */ + timeout_us = 2000000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base)); + val &= SGMII_LINK_STATUS; + + if (val) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + return 0; +} + +static int mt7531_set_port_sgmii_an_mode(struct gsw_mt753x *gsw, u32 port, + struct mt753x_port_cfg *port_cfg) +{ + u32 speed, port_base, val; + ktime_t timeout; + u32 timeout_us; + + if (port < 5 || port >= MT753X_NUM_PORTS) { + dev_info(gsw->dev, "port %d is not a SGMII port\n", port); + return -EINVAL; + } + + port_base = port - 5; + + switch (port_cfg->speed) { + case MAC_SPD_1000: + speed = RG_TPHY_SPEED_1000; + break; + case MAC_SPD_2500: + speed = RG_TPHY_SPEED_2500; + break; + default: + dev_info(gsw->dev, "invalid SGMII speed idx %d for port %d\n", + port_cfg->speed, port); + + speed = RG_TPHY_SPEED_1000; + } + + /* Step 1: Speed select register setting */ + val = mt753x_reg_read(gsw, PHYA_CTRL_SIGNAL3(port_base)); + val &= ~RG_TPHY_SPEED_M; + val |= speed << RG_TPHY_SPEED_S; + mt753x_reg_write(gsw, PHYA_CTRL_SIGNAL3(port_base), val); + + /* Step 2: Remote fault disable */ + val = mt753x_reg_read(gsw, SGMII_MODE(port)); + val |= SGMII_REMOTE_FAULT_DIS; + mt753x_reg_write(gsw, SGMII_MODE(port), val); + + /* Step 3: Setting Link partner's AN enable = 1 */ + + /* Step 4: Setting Link partner's device ability for speed/duplex */ + + /* Step 5: AN re-start */ + val = mt753x_reg_read(gsw, PCS_CONTROL_1(port)); + val |= SGMII_AN_RESTART; + mt753x_reg_write(gsw, PCS_CONTROL_1(port), val); + + /* Step 6: Special setting for PHYA ==> reserved for flexible */ + + /* Step 7 : Polling SGMII_LINK_STATUS */ + timeout_us = 2000000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = mt753x_reg_read(gsw, PCS_CONTROL_1(port_base)); + val &= SGMII_LINK_STATUS; + + if (val) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + return 0; +} + +static void mt7531_sgmii_ssc(struct gsw_mt753x *gsw, u32 port, int enable) +{ + u32 val; + u32 port_base = port - 5; + + if (enable) { + val = mt753x_reg_read(gsw, ANA_CKBG(port_base)); + val |= SSUSB_PLL_SSC_EN; + mt753x_reg_write(gsw, ANA_CKBG(port_base), val); + + val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base)); + val |= FORCE_PLL_SSC_EN; + mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val); + } else { + val = mt753x_reg_read(gsw, ANA_CKBG(port_base)); + val &= ~SSUSB_PLL_SSC_EN; + mt753x_reg_write(gsw, ANA_CKBG(port_base), val); + + val = mt753x_reg_read(gsw, ANA_DA_FORCE_MODE1(port_base)); + val &= ~FORCE_PLL_SSC_EN; + mt753x_reg_write(gsw, ANA_DA_FORCE_MODE1(port_base), val); + } +} + +static int mt7531_set_port_rgmii(struct gsw_mt753x *gsw, u32 port) +{ + u32 val; + + if (port != 5) { + dev_info(gsw->dev, "RGMII mode is not available for port %d\n", + port); + return -EINVAL; + } + + val = mt753x_reg_read(gsw, CLKGEN_CTRL); + val |= GP_CLK_EN; + val &= ~GP_MODE_M; + val |= GP_MODE_RGMII << GP_MODE_S; + val |= TXCLK_NO_REVERSE; + val |= RXCLK_NO_DELAY; + val &= ~CLK_SKEW_IN_M; + val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S; + val &= ~CLK_SKEW_OUT_M; + val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S; + mt753x_reg_write(gsw, CLKGEN_CTRL, val); + + return 0; +} + +static int mt7531_mac_port_setup(struct gsw_mt753x *gsw, u32 port, + struct mt753x_port_cfg *port_cfg) +{ + u32 pmcr; + u32 speed; + + if (port < 5 || port >= MT753X_NUM_PORTS) { + dev_info(gsw->dev, "port %d is not a MAC port\n", port); + return -EINVAL; + } + + if (port_cfg->enabled) { + pmcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) | + MAC_MODE | MAC_TX_EN | MAC_RX_EN | + BKOFF_EN | BACKPR_EN; + + if (port_cfg->force_link) { + /* PMCR's speed field 0x11 is reserved, + * sw should set 0x10 + */ + speed = port_cfg->speed; + if (port_cfg->speed == MAC_SPD_2500) + speed = MAC_SPD_1000; + + pmcr |= FORCE_MODE_LNK | FORCE_LINK | + FORCE_MODE_SPD | FORCE_MODE_DPX | + FORCE_MODE_RX_FC | FORCE_MODE_TX_FC | + FORCE_RX_FC | FORCE_TX_FC | + (speed << FORCE_SPD_S); + + if (port_cfg->duplex) + pmcr |= FORCE_DPX; + } + } else { + pmcr = FORCE_MODE_LNK; + } + + switch (port_cfg->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + mt7531_set_port_rgmii(gsw, port); + break; + case PHY_INTERFACE_MODE_SGMII: + if (port_cfg->force_link) + mt7531_set_port_sgmii_force_mode(gsw, port, port_cfg); + else + mt7531_set_port_sgmii_an_mode(gsw, port, port_cfg); + + mt7531_sgmii_ssc(gsw, port, port_cfg->ssc_on); + break; + default: + if (port_cfg->enabled) + dev_info(gsw->dev, "%s is not supported by port %d\n", + phy_modes(port_cfg->phy_mode), port); + + pmcr = FORCE_MODE_LNK; + } + + mt753x_reg_write(gsw, PMCR(port), pmcr); + + return 0; +} + +static void mt7531_core_pll_setup(struct gsw_mt753x *gsw) +{ + u32 val; + u32 top_sig; + u32 hwstrap; + u32 xtal; + + val = mt753x_reg_read(gsw, CHIP_REV); + top_sig = mt753x_reg_read(gsw, TOP_SIG_SR); + hwstrap = mt753x_reg_read(gsw, HWSTRAP); + if ((val & CHIP_REV_M) > 0) + xtal = (top_sig & PAD_MCM_SMI_EN) ? XTAL_40MHZ : XTAL_25MHZ; + else + xtal = (hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S; + + /* dump HW strap and XTAL */ + dev_info(gsw->dev, "HWSTRAP=0x%x XTAL=%dMHz\n", hwstrap, + (xtal == XTAL_25MHZ) ? 25 : 40); + + /* Only BE needs additional setting */ + if (top_sig & PAD_DUAL_SGMII_EN) + return; + + /* Disable Port5 SGMII clearly */ + val = mt753x_reg_read(gsw, PHYA_ANA_SYSPLL(0)); + val &= ~RG_VUSB10_ON; + mt753x_reg_write(gsw, PHYA_ANA_SYSPLL(0), val); + + switch (xtal) { + case XTAL_25MHZ: + /* Step 1 : Disable MT7531 COREPLL */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val &= ~EN_COREPLL; + mt753x_reg_write(gsw, PLLGP_EN, val); + + /* Step 2: switch to XTAL output */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= SW_CLKSW; + mt753x_reg_write(gsw, PLLGP_EN, val); + + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_EN; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Step 3: disable PLLGP and enable program PLLGP */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= SW_PLLGP; + mt753x_reg_write(gsw, PLLGP_EN, val); + + /* Step 4: program COREPLL output frequency to 500MHz */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_POSDIV_M; + val |= 2 << RG_COREPLL_POSDIV_S; + mt753x_reg_write(gsw, PLLGP_CR0, val); + usleep_range(25, 35); + + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_SDM_PCW_M; + val |= 0x140000 << RG_COREPLL_SDM_PCW_S; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Set feedback divide ratio update signal to high */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val |= RG_COREPLL_SDM_PCW_CHG; + mt753x_reg_write(gsw, PLLGP_CR0, val); + /* Wait for at least 16 XTAL clocks */ + usleep_range(10, 20); + + /* Step 5: set feedback divide ratio update signal to low */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_SDM_PCW_CHG; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Enable 325M clock for SGMII */ + mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000); + + /* Enable 250SSC clock for RGMII */ + mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000); + + /* Step 6: Enable MT7531 PLL */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val |= RG_COREPLL_EN; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= EN_COREPLL; + mt753x_reg_write(gsw, PLLGP_EN, val); + usleep_range(25, 35); + + break; + case XTAL_40MHZ: + /* Step 1 : Disable MT7531 COREPLL */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val &= ~EN_COREPLL; + mt753x_reg_write(gsw, PLLGP_EN, val); + + /* Step 2: switch to XTAL output */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= SW_CLKSW; + mt753x_reg_write(gsw, PLLGP_EN, val); + + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_EN; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Step 3: disable PLLGP and enable program PLLGP */ + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= SW_PLLGP; + mt753x_reg_write(gsw, PLLGP_EN, val); + + /* Step 4: program COREPLL output frequency to 500MHz */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_POSDIV_M; + val |= 2 << RG_COREPLL_POSDIV_S; + mt753x_reg_write(gsw, PLLGP_CR0, val); + usleep_range(25, 35); + + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_SDM_PCW_M; + val |= 0x190000 << RG_COREPLL_SDM_PCW_S; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Set feedback divide ratio update signal to high */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val |= RG_COREPLL_SDM_PCW_CHG; + mt753x_reg_write(gsw, PLLGP_CR0, val); + /* Wait for at least 16 XTAL clocks */ + usleep_range(10, 20); + + /* Step 5: set feedback divide ratio update signal to low */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val &= ~RG_COREPLL_SDM_PCW_CHG; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + /* Enable 325M clock for SGMII */ + mt753x_reg_write(gsw, ANA_PLLGP_CR5, 0xad0000); + + /* Enable 250SSC clock for RGMII */ + mt753x_reg_write(gsw, ANA_PLLGP_CR2, 0x4f40000); + + /* Step 6: Enable MT7531 PLL */ + val = mt753x_reg_read(gsw, PLLGP_CR0); + val |= RG_COREPLL_EN; + mt753x_reg_write(gsw, PLLGP_CR0, val); + + val = mt753x_reg_read(gsw, PLLGP_EN); + val |= EN_COREPLL; + mt753x_reg_write(gsw, PLLGP_EN, val); + usleep_range(25, 35); + break; + } +} + +static int mt7531_internal_phy_calibration(struct gsw_mt753x *gsw) +{ + return 0; +} + +static int mt7531_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev) +{ + u32 rev, topsig; + + rev = mt753x_reg_read(gsw, CHIP_REV); + + if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7531) { + if (crev) { + topsig = mt753x_reg_read(gsw, TOP_SIG_SR); + + crev->rev = rev & CHIP_REV_M; + crev->name = topsig & PAD_DUAL_SGMII_EN ? + "MT7531AE" : "MT7531BE"; + } + + return 0; + } + + return -ENODEV; +} + +static void pinmux_set_mux_7531(struct gsw_mt753x *gsw, u32 pin, u32 mode) +{ + u32 val; + + val = mt753x_reg_read(gsw, GPIO_MODE_REGS(pin)); + val &= ~(0xf << (pin & 7) * GPIO_MODE_S); + val |= mode << (pin & 7) * GPIO_MODE_S; + mt753x_reg_write(gsw, GPIO_MODE_REGS(pin), val); +} + +static int mt7531_set_gpio_pinmux(struct gsw_mt753x *gsw) +{ + u32 group = 0; + struct device_node *np = gsw->dev->of_node; + + /* Set GPIO 0 interrupt mode */ + pinmux_set_mux_7531(gsw, gpio_int_pins[0], gpio_int_funcs[0]); + + of_property_read_u32(np, "mediatek,mdio_master_pinmux", &group); + + /* group = 0: do nothing, 1: 1st group (AE), 2: 2nd group (BE) */ + if (group > 0 && group <= 2) { + group--; + pinmux_set_mux_7531(gsw, gpio_mdc_pins[group], + gpio_mdc_funcs[group]); + pinmux_set_mux_7531(gsw, gpio_mdio_pins[group], + gpio_mdio_funcs[group]); + } + + return 0; +} + +static void mt7531_phy_pll_setup(struct gsw_mt753x *gsw) +{ + u32 hwstrap; + u32 val; + + val = mt753x_reg_read(gsw, CHIP_REV); + if ((val & CHIP_REV_M) > 0) + return; + + hwstrap = mt753x_reg_read(gsw, HWSTRAP); + + switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) { + case XTAL_25MHZ: + /* disable pll auto calibration */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608); + + /* change pll sel */ + val = gsw->mmd_read(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_403); + val &= ~(PHY_PLL_M); + val |= PHY_PLL_SEL(3); + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val); + + /* set divider ratio */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_10A, 0x1009); + + /* set divider ratio */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0x7c6); + + /* capacitance and resistance adjustment */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_10C, 0xa8be); + + break; + case XTAL_40MHZ: + /* disable pll auto calibration */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_104, 0x608); + + /* change pll sel */ + val = gsw->mmd_read(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_403); + val &= ~(PHY_PLL_M); + val |= PHY_PLL_SEL(3); + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val); + + /* set divider ratio */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_10A, 0x1018); + + /* set divider ratio */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10B, 0xc676); + + /* capacitance and resistance adjustment */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, + PHY_DEV1F_REG_10C, 0xd8be); + break; + } + + /* power down pll. additional delay is not required via mdio access */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x10); + + /* power up pll */ + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_10D, 0x14); +} + +/* 12 registers for TX_MLT3 waveform tuning. + * 012 345 678 9ab + * 1 __ + * _/ \_ + * 0_/ \ + * \_ _/ + * -1 \__/ + */ +static void mt7531_phy_100m_eye_diag_setting(struct gsw_mt753x *gsw, u32 port) +{ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x0, 0x187); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x1, 0x1c9); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x2, 0x1c6); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x3, 0x182); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x4, 0x208); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x5, 0x205); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x6, 0x384); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x7, 0x3cb); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x8, 0x3c4); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x9, 0x30a); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xa, 0x00b); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xb, 0x002); +} + +static void mt7531_phy_setting(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + mt7531_phy_100m_eye_diag_setting(gsw, i); + + /* Enable HW auto downshift */ + gsw->mii_write(gsw, i, 0x1f, 0x1); + val = gsw->mii_read(gsw, i, PHY_EXT_REG_14); + val |= PHY_EN_DOWN_SHFIT; + gsw->mii_write(gsw, i, PHY_EXT_REG_14, val); + + /* Decrease SlvDPSready time */ + val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_17); + val &= ~SLV_DSP_READY_TIME_M; + val |= 0xc << SLV_DSP_READY_TIME_S; + mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_17, val); + + /* Enable Random Update Mechanism */ + val = mt753x_tr_read(gsw, i, PMA_CH, PMA_NOD, PMA_18); + val |= ENABLE_RANDOM_UPDATE_TRIGGER; + mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_18, val); + + /* PHY link down power saving enable */ + val = gsw->mii_read(gsw, i, PHY_EXT_REG_17); + val |= PHY_LINKDOWN_POWER_SAVING_EN; + gsw->mii_write(gsw, i, PHY_EXT_REG_17, val); + + val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6); + val &= ~PHY_POWER_SAVING_M; + val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S; + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_0C6, val); + + /* Timing Recovery for GbE slave mode */ + mt753x_tr_write(gsw, i, PMA_CH, PMA_NOD, PMA_01, 0x6fb90a); + mt753x_tr_write(gsw, i, DSP_CH, DSP_NOD, DSP_06, 0x2ebaef); + val = gsw->mmd_read(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234); + val |= TR_OPEN_LOOP_EN; + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_234, val); + + /* Enable Asymmetric Pause Capability */ + val = gsw->mii_read(gsw, i, MII_ADVERTISE); + val |= ADVERTISE_PAUSE_ASYM; + gsw->mii_write(gsw, i, MII_ADVERTISE, val); + } +} + +static void mt7531_adjust_line_driving(struct gsw_mt753x *gsw, u32 port) +{ + /* For ADC timing margin window for LDO calibration */ + gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_LDO_CONTROL_2, 0x2222); + + /* Adjust AD sample timing */ + gsw->mmd_write(gsw, port, PHY_DEV1E, RXADC_CONTROL_3, 0x4444); + + /* Adjust Line driver current for different mode */ + gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_271, 0x2ca5); + + /* Adjust Line driver current for different mode */ + gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_272, 0xc6b); + + /* Adjust Line driver gain for 10BT from 1000BT calibration result */ + gsw->mmd_write(gsw, port, PHY_DEV1F, TXVLD_DA_273, 0x3000); + + /* Adjust RX Echo path filter */ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_0FE, 0x2); + + /* Adjust RX HVGA bias current */ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_41, 0x3333); + + /* Adjust TX class AB driver 1 */ + gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_268, 0x384); + + /* Adjust TX class AB driver 2 */ + gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_269, 0x1114); + + /* Adjust DAC delay for TX Pairs */ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_13, 0x404); + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_14, 0x404); + + /* Adjust DAC digital delay for TX Delay */ + gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_44, 0xc0); + + /* Adjust Line driver compensation cap for stability concern due to + * increase current. + */ + gsw->mmd_write(gsw, port, PHY_DEV1F, PHY_DEV1F_REG_26A, 0x3333); +} + +static void mt7531_eee_setting(struct gsw_mt753x *gsw, u32 port) +{ + u32 val; + + /* Disable EEE */ + gsw->mmd_write(gsw, port, PHY_DEV07, PHY_DEV07_REG_03C, 0); + + /* Disable generate signal to clear the scramble_lock when lpi mode */ + val = gsw->mmd_read(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189); + val &= ~DESCRAMBLER_CLEAR_EN; + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_189, val); + + /* Roll back EEE Slave Mode */ + gsw->mmd_write(gsw, port, 0x1e, 0x2d1, 0); + mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_08, 0x1b); + mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_0f, 0); + mt753x_tr_write(gsw, port, DSP_CH, DSP_NOD, DSP_10, 0x5000); + + /* Adjust 100_mse_threshold */ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff); + + /* Disable mcc */ + gsw->mmd_write(gsw, port, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300); +} + +static void mt7531_afifo_reset(struct gsw_mt753x *gsw, int enable) +{ + int p; + u32 val; + + if (enable) { + for (p = 0; p < MT753X_NUM_PORTS; p++) { + val = mt753x_reg_read(gsw, DBG_CNT(p)); + val &= ~DIS_CLR; + mt753x_reg_write(gsw, DBG_CNT(p), val); + } + } else { + for (p = 0; p < MT753X_NUM_PORTS; p++) { + val = mt753x_reg_read(gsw, DBG_CNT(p)); + val |= DIS_CLR; + mt753x_reg_write(gsw, DBG_CNT(p), val); + } + } +} + +static int mt7531_sw_init(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + gsw->phy_base = (gsw->smi_addr + 1) & MT753X_SMI_ADDR_MASK; + + gsw->mii_read = mt753x_mii_read; + gsw->mii_write = mt753x_mii_write; + gsw->mmd_read = mt753x_mmd_read; + gsw->mmd_write = mt753x_mmd_write; + + gsw->hw_phy_cal = of_property_read_bool(gsw->dev->of_node, "mediatek,hw_phy_cal"); + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + val = gsw->mii_read(gsw, i, MII_BMCR); + val |= BMCR_ISOLATE; + gsw->mii_write(gsw, i, MII_BMCR, val); + } + + /* Force MAC link down before reset */ + mt753x_reg_write(gsw, PMCR(5), FORCE_MODE_LNK); + mt753x_reg_write(gsw, PMCR(6), FORCE_MODE_LNK); + + /* Switch soft reset */ + mt753x_reg_write(gsw, SYS_CTRL, SW_SYS_RST | SW_REG_RST); + usleep_range(10, 20); + + /* Enable MDC input Schmitt Trigger */ + val = mt753x_reg_read(gsw, SMT0_IOLB); + mt753x_reg_write(gsw, SMT0_IOLB, val | SMT_IOLB_5_SMI_MDC_EN); + + /* Set 7531 gpio pinmux */ + mt7531_set_gpio_pinmux(gsw); + + mt7531_core_pll_setup(gsw); + mt7531_mac_port_setup(gsw, 5, &gsw->port5_cfg); + mt7531_mac_port_setup(gsw, 6, &gsw->port6_cfg); + + /* Global mac control settings */ + mt753x_reg_write(gsw, GMACCR, + (15 << MTCC_LMT_S) | (15 << MAX_RX_JUMBO_S) | + RX_PKT_LEN_MAX_JUMBO); + + /* Enable Collision Poll */ + val = mt753x_reg_read(gsw, CPGC_CTRL); + val |= COL_CLK_EN; + mt753x_reg_write(gsw, CPGC_CTRL, val); + val |= COL_RST_N; + mt753x_reg_write(gsw, CPGC_CTRL, val); + val |= COL_EN; + mt753x_reg_write(gsw, CPGC_CTRL, val); + + /* Disable AFIFO reset for extra short IPG */ + mt7531_afifo_reset(gsw, 0); + + return 0; +} + +static int mt7531_sw_post_init(struct gsw_mt753x *gsw) +{ + int i; + u32 val; + + /* Let internal PHYs only Tx constant data in configure stage. */ + for (i = 0; i < MT753X_NUM_PHYS; i++) + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x200); + + /* Internal PHYs might be enabled by HW Bootstrapping, or bootloader. + * Turn off PHYs before setup PHY PLL. + */ + val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403); + val |= PHY_EN_BYPASS_MODE; + val |= POWER_ON_OFF; + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val); + + mt7531_phy_pll_setup(gsw); + + /* Enable Internal PHYs before phy setting */ + val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403); + val |= PHY_EN_BYPASS_MODE; + val &= ~POWER_ON_OFF; + gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val); + + mt7531_phy_setting(gsw); + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + val = gsw->mii_read(gsw, i, MII_BMCR); + val &= ~BMCR_ISOLATE; + gsw->mii_write(gsw, i, MII_BMCR, val); + } + + for (i = 0; i < MT753X_NUM_PHYS; i++) { + mt7531_adjust_line_driving(gsw, i); + mt7531_eee_setting(gsw, i); + } + + /* Restore internal PHYs normal Tx function after configure stage. */ + for (i = 0; i < MT753X_NUM_PHYS; i++) + gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_141, 0x0); + + mt7531_internal_phy_calibration(gsw); + + return 0; +} + +struct mt753x_sw_id mt7531_id = { + .model = MT7531, + .detect = mt7531_sw_detect, + .init = mt7531_sw_init, + .post_init = mt7531_sw_post_init +}; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhanguo Ju "); +MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch"); diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.h new file mode 100644 index 000000000..52c8a49fd --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt7531.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018 MediaTek Inc. + */ + +#ifndef _MT7531_H_ +#define _MT7531_H_ + +#include "mt753x.h" + +extern struct mt753x_sw_id mt7531_id; + +#endif /* _MT7531_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x.h new file mode 100644 index 000000000..732bda1d3 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#ifndef _MT753X_H_ +#define _MT753X_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SWCONFIG +#include +#endif + +#include "mt753x_vlan.h" + +#define MT753X_DFL_CPU_PORT 6 +#define MT753X_NUM_PHYS 5 + +#define MT753X_DFL_SMI_ADDR 0x1f +#define MT753X_SMI_ADDR_MASK 0x1f + +struct gsw_mt753x; + +enum mt753x_model { + MT7530 = 0x7530, + MT7531 = 0x7531 +}; + +struct mt753x_port_cfg { + struct device_node *np; + int phy_mode; + u32 enabled: 1; + u32 force_link: 1; + u32 speed: 2; + u32 duplex: 1; + bool ssc_on; + bool stag_on; +}; + +struct mt753x_phy { + struct gsw_mt753x *gsw; + struct net_device netdev; + struct phy_device *phydev; +}; + +struct gsw_mt753x { + u32 id; + + struct device *dev; + struct mii_bus *host_bus; + struct mii_bus *gphy_bus; + struct mutex mii_lock; /* MII access lock */ + u32 smi_addr; + u32 phy_base; + int direct_phy_access; + + enum mt753x_model model; + const char *name; + + struct mt753x_port_cfg port5_cfg; + struct mt753x_port_cfg port6_cfg; + + bool hw_phy_cal; + bool phy_status_poll; + struct mt753x_phy phys[MT753X_NUM_PHYS]; +// int phy_irqs[PHY_MAX_ADDR]; //FIXME + + int phy_link_sts; + + int irq; + int reset_pin; + struct work_struct irq_worker; + +#ifdef CONFIG_SWCONFIG + struct switch_dev swdev; + u32 cpu_port; +#endif + + int global_vlan_enable; + struct mt753x_vlan_entry vlan_entries[MT753X_NUM_VLANS]; + struct mt753x_port_entry port_entries[MT753X_NUM_PORTS]; + + int (*mii_read)(struct gsw_mt753x *gsw, int phy, int reg); + void (*mii_write)(struct gsw_mt753x *gsw, int phy, int reg, u16 val); + + int (*mmd_read)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg); + void (*mmd_write)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg, + u16 val); + + struct list_head list; +}; + +struct chip_rev { + const char *name; + u32 rev; +}; + +struct mt753x_sw_id { + enum mt753x_model model; + int (*detect)(struct gsw_mt753x *gsw, struct chip_rev *crev); + int (*init)(struct gsw_mt753x *gsw); + int (*post_init)(struct gsw_mt753x *gsw); +}; + +extern struct list_head mt753x_devs; + +struct gsw_mt753x *mt753x_get_gsw(u32 id); +struct gsw_mt753x *mt753x_get_first_gsw(void); +void mt753x_put_gsw(void); +void mt753x_lock_gsw(void); + +u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg); +void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val); + +int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg); +void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val); + +int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg); +void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg, + u16 val); + +int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg); +void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg, + u16 val); + +int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr); +void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr, + u32 data); + +void mt753x_irq_worker(struct work_struct *work); +void mt753x_irq_enable(struct gsw_mt753x *gsw); + +int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr); +int extphy_init(struct gsw_mt753x *gsw, int addr); + +/* MDIO Indirect Access Registers */ +#define MII_MMD_ACC_CTL_REG 0x0d +#define MMD_CMD_S 14 +#define MMD_CMD_M 0xc000 +#define MMD_DEVAD_S 0 +#define MMD_DEVAD_M 0x1f + +/* MMD_CMD: MMD commands */ +#define MMD_ADDR 0 +#define MMD_DATA 1 + +#define MII_MMD_ADDR_DATA_REG 0x0e + +/* Procedure of MT753x Internal Register Access + * + * 1. Internal Register Address + * + * The MT753x has a 16-bit register address and each register is 32-bit. + * This means the lowest two bits are not used as the register address is + * 4-byte aligned. + * + * Rest of the valid bits are divided into two parts: + * Bit 15..6 is the Page address + * Bit 5..2 is the low address + * + * ------------------------------------------------------------------- + * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 | + * |----------------------------------------|---------------|--------| + * | Page Address | Address | Unused | + * ------------------------------------------------------------------- + * + * 2. MDIO access timing + * + * The MT753x uses the following MDIO timing for a single register read + * + * Phase 1: Write Page Address + * ------------------------------------------------------------------- + * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | RSVD | PAGE_ADDR | + * ------------------------------------------------------------------- + * | 01 | 01 | 11111 | 1 | 1111 | xx | 00000 | REG_ADDR[15..6] | + * ------------------------------------------------------------------- + * + * Phase 2: Write low Address & Read low word + * ------------------------------------------------------------------- + * | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA | + * ------------------------------------------------------------------- + * | 01 | 10 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] | + * ------------------------------------------------------------------- + * + * Phase 3: Read high word + * ------------------------------------------------------------------- + * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA | + * ------------------------------------------------------------------- + * | 01 | 10 | 11111 | 1 | 0000 | xx | DATA[31..16] | + * ------------------------------------------------------------------- + * + * The MT753x uses the following MDIO timing for a single register write + * + * Phase 1: Write Page Address (The same as read) + * + * Phase 2: Write low Address and low word + * ------------------------------------------------------------------- + * | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA | + * ------------------------------------------------------------------- + * | 01 | 01 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] | + * ------------------------------------------------------------------- + * + * Phase 3: write high word + * ------------------------------------------------------------------- + * | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA | + * ------------------------------------------------------------------- + * | 01 | 01 | 11111 | 1 | 0000 | xx | DATA[31..16] | + * ------------------------------------------------------------------- + * + */ + +/* Internal Register Address fields */ +#define MT753X_REG_PAGE_ADDR_S 6 +#define MT753X_REG_PAGE_ADDR_M 0xffc0 +#define MT753X_REG_ADDR_S 2 +#define MT753X_REG_ADDR_M 0x3c +#endif /* _MT753X_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_common.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_common.c new file mode 100644 index 000000000..4015ddf12 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_common.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#include +#include + +#include "mt753x.h" +#include "mt753x_regs.h" + +void mt753x_irq_enable(struct gsw_mt753x *gsw) +{ + u32 val; + int i; + + /* Record initial PHY link status */ + for (i = 0; i < MT753X_NUM_PHYS; i++) { + val = gsw->mii_read(gsw, i, MII_BMSR); + if (val & BMSR_LSTATUS) + gsw->phy_link_sts |= BIT(i); + } + + val = BIT(MT753X_NUM_PHYS) - 1; + + mt753x_reg_write(gsw, SYS_INT_EN, val); +} + +static void display_port_link_status(struct gsw_mt753x *gsw, u32 port) +{ + u32 pmsr, speed_bits; + const char *speed; + + pmsr = mt753x_reg_read(gsw, PMSR(port)); + + speed_bits = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S; + + switch (speed_bits) { + case MAC_SPD_10: + speed = "10Mbps"; + break; + case MAC_SPD_100: + speed = "100Mbps"; + break; + case MAC_SPD_1000: + speed = "1Gbps"; + break; + case MAC_SPD_2500: + speed = "2.5Gbps"; + break; + } + + if (pmsr & MAC_LNK_STS) { + dev_info(gsw->dev, "Port %d Link is Up - %s/%s\n", + port, speed, (pmsr & MAC_DPX_STS) ? "Full" : "Half"); + } else { + dev_info(gsw->dev, "Port %d Link is Down\n", port); + } +} + +void mt753x_irq_worker(struct work_struct *work) +{ + struct gsw_mt753x *gsw; + u32 sts, physts, laststs; + int i; + + gsw = container_of(work, struct gsw_mt753x, irq_worker); + + sts = mt753x_reg_read(gsw, SYS_INT_STS); + + /* Check for changed PHY link status */ + for (i = 0; i < MT753X_NUM_PHYS; i++) { + if (!(sts & PHY_LC_INT(i))) + continue; + + laststs = gsw->phy_link_sts & BIT(i); + physts = !!(gsw->mii_read(gsw, i, MII_BMSR) & BMSR_LSTATUS); + physts <<= i; + + if (physts ^ laststs) { + gsw->phy_link_sts ^= BIT(i); + display_port_link_status(gsw, i); + } + } + + mt753x_reg_write(gsw, SYS_INT_STS, sts); + + enable_irq(gsw->irq); +} diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_mdio.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_mdio.c new file mode 100644 index 000000000..06a1114b8 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_mdio.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mt753x.h" +#include "mt753x_swconfig.h" +#include "mt753x_regs.h" +#include "mt753x_nl.h" +#include "mt7530.h" +#include "mt7531.h" + +static u32 mt753x_id; +struct list_head mt753x_devs; +static DEFINE_MUTEX(mt753x_devs_lock); + +static struct mt753x_sw_id *mt753x_sw_ids[] = { + &mt7530_id, + &mt7531_id, +}; + +u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg) +{ + u32 high, low; + + mutex_lock(&gsw->host_bus->mdio_lock); + + gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f, + (reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S); + + low = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr, + (reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S); + + high = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr, 0x10); + + mutex_unlock(&gsw->host_bus->mdio_lock); + + return (high << 16) | (low & 0xffff); +} + +void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val) +{ + mutex_lock(&gsw->host_bus->mdio_lock); + + gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f, + (reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S); + + gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, + (reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S, val & 0xffff); + + gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x10, val >> 16); + + mutex_unlock(&gsw->host_bus->mdio_lock); +} + +/* Indirect MDIO clause 22/45 access */ +static int mt753x_mii_rw(struct gsw_mt753x *gsw, int phy, int reg, u16 data, + u32 cmd, u32 st) +{ + ktime_t timeout; + u32 val, timeout_us; + int ret = 0; + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = mt753x_reg_read(gsw, PHY_IAC); + + if ((val & PHY_ACS_ST) == 0) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + val = (st << MDIO_ST_S) | + ((cmd << MDIO_CMD_S) & MDIO_CMD_M) | + ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) | + ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M); + + if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR) + val |= data & MDIO_RW_DATA_M; + + mt753x_reg_write(gsw, PHY_IAC, val | PHY_ACS_ST); + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = mt753x_reg_read(gsw, PHY_IAC); + + if ((val & PHY_ACS_ST) == 0) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + + if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) { + val = mt753x_reg_read(gsw, PHY_IAC); + ret = val & MDIO_RW_DATA_M; + } + + return ret; +} + +int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg) +{ + int val; + + if (phy < MT753X_NUM_PHYS) + phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + val = mt753x_mii_rw(gsw, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22); + mutex_unlock(&gsw->mii_lock); + + return val; +} + +void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val) +{ + if (phy < MT753X_NUM_PHYS) + phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + mt753x_mii_rw(gsw, phy, reg, val, MDIO_CMD_WRITE, MDIO_ST_C22); + mutex_unlock(&gsw->mii_lock); +} + +int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg) +{ + int val; + + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45); + val = mt753x_mii_rw(gsw, addr, devad, 0, MDIO_CMD_READ_C45, + MDIO_ST_C45); + mutex_unlock(&gsw->mii_lock); + + return val; +} + +void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg, + u16 val) +{ + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45); + mt753x_mii_rw(gsw, addr, devad, val, MDIO_CMD_WRITE, MDIO_ST_C45); + mutex_unlock(&gsw->mii_lock); +} + +int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg) +{ + u16 val; + + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + + mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG, + (MMD_ADDR << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M), + MDIO_CMD_WRITE, MDIO_ST_C22); + + mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg, + MDIO_CMD_WRITE, MDIO_ST_C22); + + mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG, + (MMD_DATA << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M), + MDIO_CMD_WRITE, MDIO_ST_C22); + + val = mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, 0, + MDIO_CMD_READ, MDIO_ST_C22); + + mutex_unlock(&gsw->mii_lock); + + return val; +} + +void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg, + u16 val) +{ + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + mutex_lock(&gsw->mii_lock); + + mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG, + (MMD_ADDR << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M), + MDIO_CMD_WRITE, MDIO_ST_C22); + + mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg, + MDIO_CMD_WRITE, MDIO_ST_C22); + + mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG, + (MMD_DATA << MMD_CMD_S) | + ((devad << MMD_DEVAD_S) & MMD_DEVAD_M), + MDIO_CMD_WRITE, MDIO_ST_C22); + + mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, val, + MDIO_CMD_WRITE, MDIO_ST_C22); + + mutex_unlock(&gsw->mii_lock); +} + +static inline int mt753x_get_duplex(const struct device_node *np) +{ + return of_property_read_bool(np, "full-duplex"); +} + +static void mt753x_load_port_cfg(struct gsw_mt753x *gsw) +{ + struct device_node *port_np; + struct device_node *fixed_link_node; + struct mt753x_port_cfg *port_cfg; + u32 port; + + for_each_child_of_node(gsw->dev->of_node, port_np) { + if (!of_device_is_compatible(port_np, "mediatek,mt753x-port")) + continue; + + if (!of_device_is_available(port_np)) + continue; + + if (of_property_read_u32(port_np, "reg", &port)) + continue; + + switch (port) { + case 5: + port_cfg = &gsw->port5_cfg; + break; + case 6: + port_cfg = &gsw->port6_cfg; + break; + default: + continue; + } + + if (port_cfg->enabled) { + dev_info(gsw->dev, "duplicated node for port%d\n", + port_cfg->phy_mode); + continue; + } + + port_cfg->np = port_np; + + port_cfg->phy_mode = of_get_phy_mode(port_np); + if (port_cfg->phy_mode < 0) { + dev_info(gsw->dev, "incorrect phy-mode %d\n", port); + continue; + } + + fixed_link_node = of_get_child_by_name(port_np, "fixed-link"); + if (fixed_link_node) { + u32 speed; + + port_cfg->force_link = 1; + port_cfg->duplex = mt753x_get_duplex(fixed_link_node); + + if (of_property_read_u32(fixed_link_node, "speed", + &speed)) { + speed = 0; + continue; + } + + of_node_put(fixed_link_node); + + switch (speed) { + case 10: + port_cfg->speed = MAC_SPD_10; + break; + case 100: + port_cfg->speed = MAC_SPD_100; + break; + case 1000: + port_cfg->speed = MAC_SPD_1000; + break; + case 2500: + port_cfg->speed = MAC_SPD_2500; + break; + default: + dev_info(gsw->dev, "incorrect speed %d\n", + speed); + continue; + } + } + + port_cfg->ssc_on = of_property_read_bool(port_cfg->np, + "mediatek,ssc-on"); + port_cfg->stag_on = of_property_read_bool(port_cfg->np, + "mediatek,stag-on"); + port_cfg->enabled = 1; + } +} + +void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr, + u32 data) +{ + ktime_t timeout; + u32 timeout_us; + u32 val; + + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE); + + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + if (!!(val & PHY_TR_PKT_XMT_STA)) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + goto out; + } + + gsw->mii_write(gsw, addr, PHY_TR_LOW_DATA, PHY_TR_LOW_VAL(data)); + gsw->mii_write(gsw, addr, PHY_TR_HIGH_DATA, PHY_TR_HIGH_VAL(data)); + val = PHY_TR_PKT_XMT_STA | (PHY_TR_WRITE << PHY_TR_WR_S) | + (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) | + (daddr << PHY_TR_DATA_ADDR_S); + gsw->mii_write(gsw, addr, PHY_TR_CTRL, val); + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + if (!!(val & PHY_TR_PKT_XMT_STA)) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) + goto out; + } +out: + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0); +} + +int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr) +{ + ktime_t timeout; + u32 timeout_us; + u32 val; + u8 val_h; + + if (addr < MT753X_NUM_PHYS) + addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK; + + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE); + + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + if (!!(val & PHY_TR_PKT_XMT_STA)) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) { + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0); + return -ETIMEDOUT; + } + } + + val = PHY_TR_PKT_XMT_STA | (PHY_TR_READ << PHY_TR_WR_S) | + (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) | + (daddr << PHY_TR_DATA_ADDR_S); + gsw->mii_write(gsw, addr, PHY_TR_CTRL, val); + + timeout_us = 100000; + timeout = ktime_add_us(ktime_get(), timeout_us); + while (1) { + val = gsw->mii_read(gsw, addr, PHY_TR_CTRL); + + if (!!(val & PHY_TR_PKT_XMT_STA)) + break; + + if (ktime_compare(ktime_get(), timeout) > 0) { + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0); + return -ETIMEDOUT; + } + } + + val = gsw->mii_read(gsw, addr, PHY_TR_LOW_DATA); + val_h = gsw->mii_read(gsw, addr, PHY_TR_HIGH_DATA); + val |= (val_h << 16); + + gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0); + + return val; +} + +static void mt753x_add_gsw(struct gsw_mt753x *gsw) +{ + mutex_lock(&mt753x_devs_lock); + gsw->id = mt753x_id++; + INIT_LIST_HEAD(&gsw->list); + list_add_tail(&gsw->list, &mt753x_devs); + mutex_unlock(&mt753x_devs_lock); +} + +static void mt753x_remove_gsw(struct gsw_mt753x *gsw) +{ + mutex_lock(&mt753x_devs_lock); + list_del(&gsw->list); + mutex_unlock(&mt753x_devs_lock); +} + + +struct gsw_mt753x *mt753x_get_gsw(u32 id) +{ + struct gsw_mt753x *dev; + + mutex_lock(&mt753x_devs_lock); + + list_for_each_entry(dev, &mt753x_devs, list) { + if (dev->id == id) + return dev; + } + + mutex_unlock(&mt753x_devs_lock); + + return NULL; +} + +struct gsw_mt753x *mt753x_get_first_gsw(void) +{ + struct gsw_mt753x *dev; + + mutex_lock(&mt753x_devs_lock); + + list_for_each_entry(dev, &mt753x_devs, list) + return dev; + + mutex_unlock(&mt753x_devs_lock); + + return NULL; +} + +void mt753x_put_gsw(void) +{ + mutex_unlock(&mt753x_devs_lock); +} + +void mt753x_lock_gsw(void) +{ + mutex_lock(&mt753x_devs_lock); +} + +static int mt753x_hw_reset(struct gsw_mt753x *gsw) +{ + struct device_node *np = gsw->dev->of_node; + struct reset_control *rstc; + int mcm; + int ret = -EINVAL; + + mcm = of_property_read_bool(np, "mediatek,mcm"); + if (mcm) { + rstc = devm_reset_control_get(gsw->dev, "mcm"); + ret = IS_ERR(rstc); + if (IS_ERR(rstc)) { + dev_err(gsw->dev, "Missing reset ctrl of switch\n"); + return ret; + } + + reset_control_assert(rstc); + msleep(30); + reset_control_deassert(rstc); + + gsw->reset_pin = -1; + return 0; + } + + gsw->reset_pin = of_get_named_gpio(np, "reset-gpios", 0); + if (gsw->reset_pin < 0) { + dev_err(gsw->dev, "Missing reset pin of switch\n"); + return ret; + } + + ret = devm_gpio_request(gsw->dev, gsw->reset_pin, "mt753x-reset"); + if (ret) { + dev_info(gsw->dev, "Failed to request gpio %d\n", + gsw->reset_pin); + return ret; + } + + gpio_direction_output(gsw->reset_pin, 0); + msleep(30); + gpio_set_value(gsw->reset_pin, 1); + msleep(500); + + return 0; +} +#if 1 //XDXDXDXD +static int mt753x_mdio_read(struct mii_bus *bus, int addr, int reg) +{ + struct gsw_mt753x *gsw = bus->priv; + + return gsw->mii_read(gsw, addr, reg); +} + +static int mt753x_mdio_write(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct gsw_mt753x *gsw = bus->priv; + + gsw->mii_write(gsw, addr, reg, val); + + return 0; +} + +static const struct net_device_ops mt753x_dummy_netdev_ops = { +}; + +static void mt753x_phy_link_handler(struct net_device *dev) +{ + struct mt753x_phy *phy = container_of(dev, struct mt753x_phy, netdev); + struct phy_device *phydev = phy->phydev; + struct gsw_mt753x *gsw = phy->gsw; + u32 port = phy - gsw->phys; + + if (phydev->link) { + dev_info(gsw->dev, + "Port %d Link is Up - %s/%s - flow control %s\n", + port, phy_speed_to_str(phydev->speed), + (phydev->duplex == DUPLEX_FULL) ? "Full" : "Half", + phydev->pause ? "rx/tx" : "off"); + } else { + dev_info(gsw->dev, "Port %d Link is Down\n", port); + } +} + +static void mt753x_connect_internal_phys(struct gsw_mt753x *gsw, + struct device_node *mii_np) +{ + struct device_node *phy_np; + struct mt753x_phy *phy; + int phy_mode; + u32 phyad; + + if (!mii_np) + return; + + for_each_child_of_node(mii_np, phy_np) { + if (of_property_read_u32(phy_np, "reg", &phyad)) + continue; + + if (phyad >= MT753X_NUM_PHYS) + continue; + + phy_mode = of_get_phy_mode(phy_np); + if (phy_mode < 0) { + dev_info(gsw->dev, "incorrect phy-mode %d for PHY %d\n", + phy_mode, phyad); + continue; + } + + phy = &gsw->phys[phyad]; + phy->gsw = gsw; + + init_dummy_netdev(&phy->netdev); + phy->netdev.netdev_ops = &mt753x_dummy_netdev_ops; + + phy->phydev = of_phy_connect(&phy->netdev, phy_np, + mt753x_phy_link_handler, 0, phy_mode); + if (!phy->phydev) { + dev_info(gsw->dev, "could not connect to PHY %d\n", + phyad); + continue; + } + + phy_start(phy->phydev); + } +} + +static void mt753x_disconnect_internal_phys(struct gsw_mt753x *gsw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(gsw->phys); i++) { + if (gsw->phys[i].phydev) { + phy_stop(gsw->phys[i].phydev); + phy_disconnect(gsw->phys[i].phydev); + gsw->phys[i].phydev = NULL; + } + } +} + +static int mt753x_mdio_register(struct gsw_mt753x *gsw) +{ + struct device_node *mii_np; + int i, ret; + + mii_np = of_get_child_by_name(gsw->dev->of_node, "mdio-bus"); + if (mii_np && !of_device_is_available(mii_np)) { + ret = -ENODEV; + goto err_put_node; + } + + gsw->gphy_bus = devm_mdiobus_alloc(gsw->dev); + if (!gsw->gphy_bus) { + ret = -ENOMEM; + goto err_put_node; + } + + gsw->gphy_bus->name = "mt753x_mdio"; + gsw->gphy_bus->read = mt753x_mdio_read; + gsw->gphy_bus->write = mt753x_mdio_write; + gsw->gphy_bus->priv = gsw; + gsw->gphy_bus->parent = gsw->dev; + gsw->gphy_bus->phy_mask = BIT(MT753X_NUM_PHYS) - 1; +// gsw->gphy_bus->irq = gsw->phy_irqs; + + for (i = 0; i < PHY_MAX_ADDR; i++) + gsw->gphy_bus->irq[i] = PHY_POLL; + + if (mii_np) + snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "%s@%s", + mii_np->name, gsw->dev->of_node->name); + else + snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "mdio@%s", + gsw->dev->of_node->name); + + ret = of_mdiobus_register(gsw->gphy_bus, mii_np); + + if (ret) { + devm_mdiobus_free(gsw->dev, gsw->gphy_bus); + gsw->gphy_bus = NULL; + } else { + if (gsw->phy_status_poll) + mt753x_connect_internal_phys(gsw, mii_np); + } + +err_put_node: + if (mii_np) + of_node_put(mii_np); + + return ret; +} +#endif + +static irqreturn_t mt753x_irq_handler(int irq, void *dev) +{ + struct gsw_mt753x *gsw = dev; + + disable_irq_nosync(gsw->irq); + + schedule_work(&gsw->irq_worker); + + return IRQ_HANDLED; +} + +static int mt753x_probe(struct platform_device *pdev) +{ + struct gsw_mt753x *gsw; + struct mt753x_sw_id *sw; + struct device_node *np = pdev->dev.of_node; + struct device_node *mdio; + struct mii_bus *mdio_bus; + int ret = -EINVAL; + struct chip_rev rev; + struct mt753x_mapping *map; + int i; + + mdio = of_parse_phandle(np, "mediatek,mdio", 0); + if (!mdio) + return -EINVAL; + + mdio_bus = of_mdio_find_bus(mdio); + if (!mdio_bus) + return -EPROBE_DEFER; + + gsw = devm_kzalloc(&pdev->dev, sizeof(struct gsw_mt753x), GFP_KERNEL); + if (!gsw) + return -ENOMEM; + + gsw->host_bus = mdio_bus; + gsw->dev = &pdev->dev; + mutex_init(&gsw->mii_lock); + + /* Switch hard reset */ + if (mt753x_hw_reset(gsw)) + goto fail; + + /* Fetch the SMI address dirst */ + if (of_property_read_u32(np, "mediatek,smi-addr", &gsw->smi_addr)) + gsw->smi_addr = MT753X_DFL_SMI_ADDR; + + /* Get LAN/WAN port mapping */ + map = mt753x_find_mapping(np); + if (map) { + mt753x_apply_mapping(gsw, map); + gsw->global_vlan_enable = 1; + dev_info(gsw->dev, "LAN/WAN VLAN setting=%s\n", map->name); + } + + /* Load MAC port configurations */ + mt753x_load_port_cfg(gsw); + + /* Check for valid switch and then initialize */ + for (i = 0; i < ARRAY_SIZE(mt753x_sw_ids); i++) { + if (!mt753x_sw_ids[i]->detect(gsw, &rev)) { + sw = mt753x_sw_ids[i]; + + gsw->name = rev.name; + gsw->model = sw->model; + + dev_info(gsw->dev, "Switch is MediaTek %s rev %d", + gsw->name, rev.rev); + + /* Initialize the switch */ + ret = sw->init(gsw); + if (ret) + goto fail; + + break; + } + } + + if (i >= ARRAY_SIZE(mt753x_sw_ids)) { + dev_err(gsw->dev, "No mt753x switch found\n"); + goto fail; + } + + gsw->irq = platform_get_irq(pdev, 0); + if (gsw->irq >= 0) { + ret = devm_request_irq(gsw->dev, gsw->irq, mt753x_irq_handler, + 0, dev_name(gsw->dev), gsw); + if (ret) { + dev_err(gsw->dev, "Failed to request irq %d\n", + gsw->irq); + goto fail; + } + + INIT_WORK(&gsw->irq_worker, mt753x_irq_worker); + } + + platform_set_drvdata(pdev, gsw); + + gsw->phy_status_poll = of_property_read_bool(gsw->dev->of_node, + "mediatek,phy-poll"); + + mt753x_add_gsw(gsw); +#if 1 //XDXD + mt753x_mdio_register(gsw); +#endif + + mt753x_swconfig_init(gsw); + + if (sw->post_init) + sw->post_init(gsw); + + if (gsw->irq >= 0) + mt753x_irq_enable(gsw); + + return 0; + +fail: + devm_kfree(&pdev->dev, gsw); + + return ret; +} + +static int mt753x_remove(struct platform_device *pdev) +{ + struct gsw_mt753x *gsw = platform_get_drvdata(pdev); + + if (gsw->irq >= 0) + cancel_work_sync(&gsw->irq_worker); + + if (gsw->reset_pin >= 0) + devm_gpio_free(&pdev->dev, gsw->reset_pin); + +#ifdef CONFIG_SWCONFIG + mt753x_swconfig_destroy(gsw); +#endif + +#if 1 //XDXD + mt753x_disconnect_internal_phys(gsw); + + mdiobus_unregister(gsw->gphy_bus); +#endif + + mt753x_remove_gsw(gsw); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id mt753x_ids[] = { + { .compatible = "mediatek,mt753x" }, + { }, +}; + +MODULE_DEVICE_TABLE(of, mt753x_ids); + +static struct platform_driver mt753x_driver = { + .probe = mt753x_probe, + .remove = mt753x_remove, + .driver = { + .name = "mt753x", + .of_match_table = mt753x_ids, + }, +}; + +static int __init mt753x_init(void) +{ + int ret; + + INIT_LIST_HEAD(&mt753x_devs); + ret = platform_driver_register(&mt753x_driver); + + mt753x_nl_init(); + + return ret; +} +module_init(mt753x_init); + +static void __exit mt753x_exit(void) +{ + mt753x_nl_exit(); + + platform_driver_unregister(&mt753x_driver); +} +module_exit(mt753x_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Weijie Gao "); +MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch"); diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.c new file mode 100644 index 000000000..a04c701fd --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Sirui Zhao + */ + +#include +#include +#include +#include +#include + +#include "mt753x.h" +#include "mt753x_nl.h" + +struct mt753x_nl_cmd_item { + enum mt753x_cmd cmd; + bool require_dev; + int (*process)(struct genl_info *info, struct gsw_mt753x *gsw); + u32 nr_required_attrs; + const enum mt753x_attr *required_attrs; +}; + +static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info); + +static const struct nla_policy mt753x_nl_cmd_policy[] = { + [MT753X_ATTR_TYPE_MESG] = { .type = NLA_STRING }, + [MT753X_ATTR_TYPE_PHY] = { .type = NLA_S32 }, + [MT753X_ATTR_TYPE_REG] = { .type = NLA_S32 }, + [MT753X_ATTR_TYPE_VAL] = { .type = NLA_S32 }, + [MT753X_ATTR_TYPE_DEV_NAME] = { .type = NLA_S32 }, + [MT753X_ATTR_TYPE_DEV_ID] = { .type = NLA_S32 }, + [MT753X_ATTR_TYPE_DEVAD] = { .type = NLA_S32 }, +}; + +static const struct genl_ops mt753x_nl_ops[] = { + { + .cmd = MT753X_CMD_REQUEST, + .doit = mt753x_nl_response, +// .policy = mt753x_nl_cmd_policy, + .flags = GENL_ADMIN_PERM, + }, { + .cmd = MT753X_CMD_READ, + .doit = mt753x_nl_response, +// .policy = mt753x_nl_cmd_policy, + .flags = GENL_ADMIN_PERM, + }, { + .cmd = MT753X_CMD_WRITE, + .doit = mt753x_nl_response, +// .policy = mt753x_nl_cmd_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static struct genl_family mt753x_nl_family = { + .name = MT753X_GENL_NAME, + .version = MT753X_GENL_VERSION, + .maxattr = MT753X_NR_ATTR_TYPE, + .ops = mt753x_nl_ops, + .n_ops = ARRAY_SIZE(mt753x_nl_ops), + .policy = mt753x_nl_cmd_policy, +}; + +static int mt753x_nl_list_devs(char *buff, int size) +{ + struct gsw_mt753x *gsw; + int len, total = 0; + char buf[80]; + + memset(buff, 0, size); + + mt753x_lock_gsw(); + + list_for_each_entry(gsw, &mt753x_devs, list) { + len = snprintf(buf, sizeof(buf), + "id: %d, model: %s, node: %s\n", + gsw->id, gsw->name, gsw->dev->of_node->name); + strncat(buff, buf, size - total); + total += len; + } + + mt753x_put_gsw(); + + return total; +} + +static int mt753x_nl_prepare_reply(struct genl_info *info, u8 cmd, + struct sk_buff **skbp) +{ + struct sk_buff *msg; + void *reply; + + if (!info) + return -EINVAL; + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + /* Construct send-back message header */ + reply = genlmsg_put(msg, info->snd_portid, info->snd_seq, + &mt753x_nl_family, 0, cmd); + if (!reply) { + nlmsg_free(msg); + return -EINVAL; + } + + *skbp = msg; + return 0; +} + +static int mt753x_nl_send_reply(struct sk_buff *skb, struct genl_info *info) +{ + struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); + void *reply = genlmsg_data(genlhdr); + + /* Finalize a generic netlink message (update message header) */ + genlmsg_end(skb, reply); + + /* reply to a request */ + return genlmsg_reply(skb, info); +} + +static s32 mt753x_nl_get_s32(struct genl_info *info, enum mt753x_attr attr, + s32 defval) +{ + struct nlattr *na; + + na = info->attrs[attr]; + if (na) + return nla_get_s32(na); + + return defval; +} + +static int mt753x_nl_get_u32(struct genl_info *info, enum mt753x_attr attr, + u32 *val) +{ + struct nlattr *na; + + na = info->attrs[attr]; + if (na) { + *val = nla_get_u32(na); + return 0; + } + + return -1; +} + +static struct gsw_mt753x *mt753x_nl_parse_find_gsw(struct genl_info *info) +{ + struct gsw_mt753x *gsw; + struct nlattr *na; + int gsw_id; + + na = info->attrs[MT753X_ATTR_TYPE_DEV_ID]; + if (na) { + gsw_id = nla_get_s32(na); + if (gsw_id >= 0) + gsw = mt753x_get_gsw(gsw_id); + else + gsw = mt753x_get_first_gsw(); + } else { + gsw = mt753x_get_first_gsw(); + } + + return gsw; +} + +static int mt753x_nl_get_swdevs(struct genl_info *info, struct gsw_mt753x *gsw) +{ + struct sk_buff *rep_skb = NULL; + char dev_info[512]; + int ret; + + ret = mt753x_nl_list_devs(dev_info, sizeof(dev_info)); + if (!ret) { + pr_info("No switch registered\n"); + return -EINVAL; + } + + ret = mt753x_nl_prepare_reply(info, MT753X_CMD_REPLY, &rep_skb); + if (ret < 0) + goto err; + + ret = nla_put_string(rep_skb, MT753X_ATTR_TYPE_MESG, dev_info); + if (ret < 0) + goto err; + + return mt753x_nl_send_reply(rep_skb, info); + +err: + if (rep_skb) + nlmsg_free(rep_skb); + + return ret; +} + +static int mt753x_nl_reply_read(struct genl_info *info, struct gsw_mt753x *gsw) +{ + struct sk_buff *rep_skb = NULL; + s32 phy, devad, reg; + int value; + int ret = 0; + + phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1); + devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1); + reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1); + + if (reg < 0) + goto err; + + ret = mt753x_nl_prepare_reply(info, MT753X_CMD_READ, &rep_skb); + if (ret < 0) + goto err; + + if (phy >= 0) { + if (devad < 0) + value = gsw->mii_read(gsw, phy, reg); + else + value = gsw->mmd_read(gsw, phy, devad, reg); + } else { + value = mt753x_reg_read(gsw, reg); + } + + ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg); + if (ret < 0) + goto err; + + ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value); + if (ret < 0) + goto err; + + return mt753x_nl_send_reply(rep_skb, info); + +err: + if (rep_skb) + nlmsg_free(rep_skb); + + return ret; +} + +static int mt753x_nl_reply_write(struct genl_info *info, struct gsw_mt753x *gsw) +{ + struct sk_buff *rep_skb = NULL; + s32 phy, devad, reg; + u32 value; + int ret = 0; + + phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1); + devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1); + reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1); + + if (mt753x_nl_get_u32(info, MT753X_ATTR_TYPE_VAL, &value)) + goto err; + + if (reg < 0) + goto err; + + ret = mt753x_nl_prepare_reply(info, MT753X_CMD_WRITE, &rep_skb); + if (ret < 0) + goto err; + + if (phy >= 0) { + if (devad < 0) + gsw->mii_write(gsw, phy, reg, value); + else + gsw->mmd_write(gsw, phy, devad, reg, value); + } else { + mt753x_reg_write(gsw, reg, value); + } + + ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg); + if (ret < 0) + goto err; + + ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value); + if (ret < 0) + goto err; + + return mt753x_nl_send_reply(rep_skb, info); + +err: + if (rep_skb) + nlmsg_free(rep_skb); + + return ret; +} + +static const enum mt753x_attr mt753x_nl_cmd_read_attrs[] = { + MT753X_ATTR_TYPE_REG +}; + +static const enum mt753x_attr mt753x_nl_cmd_write_attrs[] = { + MT753X_ATTR_TYPE_REG, + MT753X_ATTR_TYPE_VAL +}; + +static const struct mt753x_nl_cmd_item mt753x_nl_cmds[] = { + { + .cmd = MT753X_CMD_REQUEST, + .require_dev = false, + .process = mt753x_nl_get_swdevs + }, { + .cmd = MT753X_CMD_READ, + .require_dev = true, + .process = mt753x_nl_reply_read, + .required_attrs = mt753x_nl_cmd_read_attrs, + .nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_read_attrs), + }, { + .cmd = MT753X_CMD_WRITE, + .require_dev = true, + .process = mt753x_nl_reply_write, + .required_attrs = mt753x_nl_cmd_write_attrs, + .nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_write_attrs), + } +}; + +static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info) +{ + struct genlmsghdr *hdr = nlmsg_data(info->nlhdr); + const struct mt753x_nl_cmd_item *cmditem = NULL; + struct gsw_mt753x *gsw = NULL; + u32 sat_req_attrs = 0; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(mt753x_nl_cmds); i++) { + if (hdr->cmd == mt753x_nl_cmds[i].cmd) { + cmditem = &mt753x_nl_cmds[i]; + break; + } + } + + if (!cmditem) { + pr_info("mt753x-nl: unknown cmd %u\n", hdr->cmd); + return -EINVAL; + } + + for (i = 0; i < cmditem->nr_required_attrs; i++) { + if (info->attrs[cmditem->required_attrs[i]]) + sat_req_attrs++; + } + + if (sat_req_attrs != cmditem->nr_required_attrs) { + pr_info("mt753x-nl: missing required attr(s) for cmd %u\n", + hdr->cmd); + return -EINVAL; + } + + if (cmditem->require_dev) { + gsw = mt753x_nl_parse_find_gsw(info); + if (!gsw) { + pr_info("mt753x-nl: failed to find switch dev\n"); + return -EINVAL; + } + } + + ret = cmditem->process(info, gsw); + + mt753x_put_gsw(); + + return ret; +} + +int __init mt753x_nl_init(void) +{ + int ret; + + ret = genl_register_family(&mt753x_nl_family); + if (ret) { + pr_info("mt753x-nl: genl_register_family_with_ops failed\n"); + return ret; + } + + return 0; +} + +void __exit mt753x_nl_exit(void) +{ + genl_unregister_family(&mt753x_nl_family); +} diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.h new file mode 100644 index 000000000..85dc9e791 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_nl.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Sirui Zhao + */ + +#ifndef _MT753X_NL_H_ +#define _MT753X_NL_H_ + +#define MT753X_GENL_NAME "mt753x" +#define MT753X_GENL_VERSION 0x1 + +enum mt753x_cmd { + MT753X_CMD_UNSPEC = 0, + MT753X_CMD_REQUEST, + MT753X_CMD_REPLY, + MT753X_CMD_READ, + MT753X_CMD_WRITE, + + __MT753X_CMD_MAX, +}; + +enum mt753x_attr { + MT753X_ATTR_TYPE_UNSPEC = 0, + MT753X_ATTR_TYPE_MESG, + MT753X_ATTR_TYPE_PHY, + MT753X_ATTR_TYPE_DEVAD, + MT753X_ATTR_TYPE_REG, + MT753X_ATTR_TYPE_VAL, + MT753X_ATTR_TYPE_DEV_NAME, + MT753X_ATTR_TYPE_DEV_ID, + + __MT753X_ATTR_TYPE_MAX, +}; + +#define MT753X_NR_ATTR_TYPE (__MT753X_ATTR_TYPE_MAX - 1) + +#ifdef __KERNEL__ +int __init mt753x_nl_init(void); +void __exit mt753x_nl_exit(void); +#endif /* __KERNEL__ */ + +#endif /* _MT753X_NL_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_regs.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_regs.h new file mode 100644 index 000000000..65c48aff6 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_regs.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#ifndef _MT753X_REGS_H_ +#define _MT753X_REGS_H_ + +#include + +/* Values of Egress TAG Control */ +#define ETAG_CTRL_UNTAG 0 +#define ETAG_CTRL_TAG 2 +#define ETAG_CTRL_SWAP 1 +#define ETAG_CTRL_STACK 3 + +#define VTCR 0x90 +#define VAWD1 0x94 +#define VAWD2 0x98 + +/* Fields of VTCR */ +#define VTCR_BUSY BIT(31) +#define IDX_INVLD BIT(16) +#define VTCR_FUNC_S 12 +#define VTCR_FUNC_M 0xf000 +#define VTCR_VID_S 0 +#define VTCR_VID_M 0xfff + +/* Values of VTCR_FUNC */ +#define VTCR_READ_VLAN_ENTRY 0 +#define VTCR_WRITE_VLAN_ENTRY 1 +#define VTCR_INVD_VLAN_ENTRY 2 +#define VTCR_ENABLE_VLAN_ENTRY 3 +#define VTCR_READ_ACL_ENTRY 4 +#define VTCR_WRITE_ACL_ENTRY 5 +#define VTCR_READ_TRTCM_TABLE 6 +#define VTCR_WRITE_TRTCM_TABLE 7 +#define VTCR_READ_ACL_MASK_ENTRY 8 +#define VTCR_WRITE_ACL_MASK_ENTRY 9 +#define VTCR_READ_ACL_RULE_ENTRY 10 +#define VTCR_WRITE_ACL_RULE_ENTRY 11 +#define VTCR_READ_ACL_RATE_ENTRY 12 +#define VTCR_WRITE_ACL_RATE_ENTRY 13 + +/* VLAN entry fields */ +/* VAWD1 */ +#define PORT_STAG BIT(31) +#define IVL_MAC BIT(30) +#define EG_CON BIT(29) +#define VTAG_EN BIT(28) +#define COPY_PRI BIT(27) +#define USER_PRI_S 24 +#define USER_PRI_M 0x7000000 +#define PORT_MEM_S 16 +#define PORT_MEM_M 0xff0000 +#define S_TAG1_S 4 +#define S_TAG1_M 0xfff0 +#define FID_S 1 +#define FID_M 0x0e +#define VENTRY_VALID BIT(0) + +/* VAWD2 */ +#define S_TAG2_S 16 +#define S_TAG2_M 0xffff0000 +#define PORT_ETAG_S(p) ((p) * 2) +#define PORT_ETAG_M 0x03 + +#define PORT_CTRL_BASE 0x2000 +#define PORT_CTRL_PORT_OFFSET 0x100 +#define PORT_CTRL_REG(p, r) (PORT_CTRL_BASE + \ + (p) * PORT_CTRL_PORT_OFFSET + (r)) +#define CKGCR(p) PORT_CTRL_REG(p, 0x00) +#define PCR(p) PORT_CTRL_REG(p, 0x04) +#define PIC(p) PORT_CTRL_REG(p, 0x08) +#define PSC(p) PORT_CTRL_REG(p, 0x0c) +#define PVC(p) PORT_CTRL_REG(p, 0x10) +#define PPBV1(p) PORT_CTRL_REG(p, 0x14) +#define PPBV2(p) PORT_CTRL_REG(p, 0x18) +#define BSR(p) PORT_CTRL_REG(p, 0x1c) +#define STAG01 PORT_CTRL_REG(p, 0x20) +#define STAG23 PORT_CTRL_REG(p, 0x24) +#define STAG45 PORT_CTRL_REG(p, 0x28) +#define STAG67 PORT_CTRL_REG(p, 0x2c) + +#define PPBV(p, g) (PPBV1(p) + ((g) / 2) * 4) + +/* Fields of PCR */ +#define MLDV2_EN BIT(30) +#define EG_TAG_S 28 +#define EG_TAG_M 0x30000000 +#define PORT_PRI_S 24 +#define PORT_PRI_M 0x7000000 +#define PORT_MATRIX_S 16 +#define PORT_MATRIX_M 0xff0000 +#define UP2DSCP_EN BIT(12) +#define UP2TAG_EN BIT(11) +#define ACL_EN BIT(10) +#define PORT_TX_MIR BIT(9) +#define PORT_RX_MIR BIT(8) +#define ACL_MIR BIT(7) +#define MIS_PORT_FW_S 4 +#define MIS_PORT_FW_M 0x70 +#define VLAN_MIS BIT(2) +#define PORT_VLAN_S 0 +#define PORT_VLAN_M 0x03 + +/* Values of PORT_VLAN */ +#define PORT_MATRIX_MODE 0 +#define FALLBACK_MODE 1 +#define CHECK_MODE 2 +#define SECURITY_MODE 3 + +/* Fields of PVC */ +#define STAG_VPID_S 16 +#define STAG_VPID_M 0xffff0000 +#define DIS_PVID BIT(15) +#define FORCE_PVID BIT(14) +#define PT_VPM BIT(12) +#define PT_OPTION BIT(11) +#define PVC_EG_TAG_S 8 +#define PVC_EG_TAG_M 0x700 +#define VLAN_ATTR_S 6 +#define VLAN_ATTR_M 0xc0 +#define PVC_PORT_STAG BIT(5) +#define BC_LKYV_EN BIT(4) +#define MC_LKYV_EN BIT(3) +#define UC_LKYV_EN BIT(2) +#define ACC_FRM_S 0 +#define ACC_FRM_M 0x03 + +/* Values of VLAN_ATTR */ +#define VA_USER_PORT 0 +#define VA_STACK_PORT 1 +#define VA_TRANSLATION_PORT 2 +#define VA_TRANSPARENT_PORT 3 + +/* Fields of PPBV */ +#define GRP_PORT_PRI_S(g) (((g) % 2) * 16 + 13) +#define GRP_PORT_PRI_M 0x07 +#define GRP_PORT_VID_S(g) (((g) % 2) * 16) +#define GRP_PORT_VID_M 0xfff + +#define PORT_MAC_CTRL_BASE 0x3000 +#define PORT_MAC_CTRL_PORT_OFFSET 0x100 +#define PORT_MAC_CTRL_REG(p, r) (PORT_MAC_CTRL_BASE + \ + (p) * PORT_MAC_CTRL_PORT_OFFSET + (r)) +#define PMCR(p) PORT_MAC_CTRL_REG(p, 0x00) +#define PMEEECR(p) PORT_MAC_CTRL_REG(p, 0x04) +#define PMSR(p) PORT_MAC_CTRL_REG(p, 0x08) +#define PINT_EN(p) PORT_MAC_CTRL_REG(p, 0x10) +#define PINT_STS(p) PORT_MAC_CTRL_REG(p, 0x14) + +#define GMACCR (PORT_MAC_CTRL_BASE + 0xe0) +#define TXCRC_EN BIT(19) +#define RXCRC_EN BIT(18) +#define PRMBL_LMT_EN BIT(17) +#define MTCC_LMT_S 9 +#define MTCC_LMT_M 0x1e00 +#define MAX_RX_JUMBO_S 2 +#define MAX_RX_JUMBO_M 0x3c +#define MAX_RX_PKT_LEN_S 0 +#define MAX_RX_PKT_LEN_M 0x3 + +/* Values of MAX_RX_PKT_LEN */ +#define RX_PKT_LEN_1518 0 +#define RX_PKT_LEN_1536 1 +#define RX_PKT_LEN_1522 2 +#define RX_PKT_LEN_MAX_JUMBO 3 + +/* Fields of PMCR */ +#define IPG_CFG_S 18 +#define IPG_CFG_M 0xc0000 +#define EXT_PHY BIT(17) +#define MAC_MODE BIT(16) +#define MAC_TX_EN BIT(14) +#define MAC_RX_EN BIT(13) +#define MAC_PRE BIT(11) +#define BKOFF_EN BIT(9) +#define BACKPR_EN BIT(8) +#define FORCE_EEE1G BIT(7) +#define FORCE_EEE1000 BIT(6) +#define FORCE_RX_FC BIT(5) +#define FORCE_TX_FC BIT(4) +#define FORCE_SPD_S 2 +#define FORCE_SPD_M 0x0c +#define FORCE_DPX BIT(1) +#define FORCE_LINK BIT(0) + +/* Fields of PMSR */ +#define EEE1G_STS BIT(7) +#define EEE100_STS BIT(6) +#define RX_FC_STS BIT(5) +#define TX_FC_STS BIT(4) +#define MAC_SPD_STS_S 2 +#define MAC_SPD_STS_M 0x0c +#define MAC_DPX_STS BIT(1) +#define MAC_LNK_STS BIT(0) + +/* Values of MAC_SPD_STS */ +#define MAC_SPD_10 0 +#define MAC_SPD_100 1 +#define MAC_SPD_1000 2 +#define MAC_SPD_2500 3 + +/* Values of IPG_CFG */ +#define IPG_96BIT 0 +#define IPG_96BIT_WITH_SHORT_IPG 1 +#define IPG_64BIT 2 + +#define MIB_COUNTER_BASE 0x4000 +#define MIB_COUNTER_PORT_OFFSET 0x100 +#define MIB_COUNTER_REG(p, r) (MIB_COUNTER_BASE + \ + (p) * MIB_COUNTER_PORT_OFFSET + (r)) + +#define STATS_TDPC 0x00 +#define STATS_TCRC 0x04 +#define STATS_TUPC 0x08 +#define STATS_TMPC 0x0C +#define STATS_TBPC 0x10 +#define STATS_TCEC 0x14 +#define STATS_TSCEC 0x18 +#define STATS_TMCEC 0x1C +#define STATS_TDEC 0x20 +#define STATS_TLCEC 0x24 +#define STATS_TXCEC 0x28 +#define STATS_TPPC 0x2C +#define STATS_TL64PC 0x30 +#define STATS_TL65PC 0x34 +#define STATS_TL128PC 0x38 +#define STATS_TL256PC 0x3C +#define STATS_TL512PC 0x40 +#define STATS_TL1024PC 0x44 +#define STATS_TOC 0x48 +#define STATS_RDPC 0x60 +#define STATS_RFPC 0x64 +#define STATS_RUPC 0x68 +#define STATS_RMPC 0x6C +#define STATS_RBPC 0x70 +#define STATS_RAEPC 0x74 +#define STATS_RCEPC 0x78 +#define STATS_RUSPC 0x7C +#define STATS_RFEPC 0x80 +#define STATS_ROSPC 0x84 +#define STATS_RJEPC 0x88 +#define STATS_RPPC 0x8C +#define STATS_RL64PC 0x90 +#define STATS_RL65PC 0x94 +#define STATS_RL128PC 0x98 +#define STATS_RL256PC 0x9C +#define STATS_RL512PC 0xA0 +#define STATS_RL1024PC 0xA4 +#define STATS_ROC 0xA8 +#define STATS_RDPC_CTRL 0xB0 +#define STATS_RDPC_ING 0xB4 +#define STATS_RDPC_ARL 0xB8 + +#define SYS_CTRL 0x7000 +#define SW_PHY_RST BIT(2) +#define SW_SYS_RST BIT(1) +#define SW_REG_RST BIT(0) + +#define SYS_INT_EN 0x7008 +#define SYS_INT_STS 0x700c +#define MAC_PC_INT BIT(16) +#define PHY_INT(p) BIT((p) + 8) +#define PHY_LC_INT(p) BIT(p) + +#define PHY_IAC 0x701c +#define PHY_ACS_ST BIT(31) +#define MDIO_REG_ADDR_S 25 +#define MDIO_REG_ADDR_M 0x3e000000 +#define MDIO_PHY_ADDR_S 20 +#define MDIO_PHY_ADDR_M 0x1f00000 +#define MDIO_CMD_S 18 +#define MDIO_CMD_M 0xc0000 +#define MDIO_ST_S 16 +#define MDIO_ST_M 0x30000 +#define MDIO_RW_DATA_S 0 +#define MDIO_RW_DATA_M 0xffff + +/* MDIO_CMD: MDIO commands */ +#define MDIO_CMD_ADDR 0 +#define MDIO_CMD_WRITE 1 +#define MDIO_CMD_READ 2 +#define MDIO_CMD_READ_C45 3 + +/* MDIO_ST: MDIO start field */ +#define MDIO_ST_C45 0 +#define MDIO_ST_C22 1 + +#define HWSTRAP 0x7800 +#define MHWSTRAP 0x7804 + +/* Internal GPHY Page Control Register */ +#define PHY_CL22_PAGE_CTRL 0x1f +#define PHY_TR_PAGE 0x52b5 + +/* Internal GPHY Token Ring Access Registers */ +#define PHY_TR_CTRL 0x10 +#define PHY_TR_LOW_DATA 0x11 +#define PHY_TR_HIGH_DATA 0x12 + +/* Fields of PHY_TR_CTRL */ +#define PHY_TR_PKT_XMT_STA BIT(15) +#define PHY_TR_WR_S 13 +#define PHY_TR_CH_ADDR_S 11 +#define PHY_TR_NODE_ADDR_S 7 +#define PHY_TR_DATA_ADDR_S 1 + +enum phy_tr_wr { + PHY_TR_WRITE = 0, + PHY_TR_READ = 1, +}; + +/* Helper macro for GPHY Token Ring Access */ +#define PHY_TR_LOW_VAL(x) ((x) & 0xffff) +#define PHY_TR_HIGH_VAL(x) (((x) & 0xff0000) >> 16) + +/* Token Ring Channels */ +#define PMA_CH 0x1 +#define DSP_CH 0x2 + +/* Token Ring Nodes */ +#define PMA_NOD 0xf +#define DSP_NOD 0xd + +/* Token Ring register range */ +enum tr_pma_reg_addr { + PMA_MIN = 0x0, + PMA_01 = 0x1, + PMA_17 = 0x17, + PMA_18 = 0x18, + PMA_MAX = 0x3d, +}; + +enum tr_dsp_reg_addr { + DSP_MIN = 0x0, + DSP_06 = 0x6, + DSP_08 = 0x8, + DSP_0f = 0xf, + DSP_10 = 0x10, + DSP_MAX = 0x3e, +}; +#endif /* _MT753X_REGS_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c new file mode 100644 index 000000000..7a0595251 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mt753x.h" +#include "mt753x_swconfig.h" +#include "mt753x_regs.h" + +#define MT753X_PORT_MIB_TXB_ID 18 /* TxByte */ +#define MT753X_PORT_MIB_RXB_ID 37 /* RxByte */ + +#define MIB_DESC(_s, _o, _n) \ + { \ + .size = (_s), \ + .offset = (_o), \ + .name = (_n), \ + } + +struct mt753x_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +static const struct mt753x_mib_desc mt753x_mibs[] = { + MIB_DESC(1, STATS_TDPC, "TxDrop"), + MIB_DESC(1, STATS_TCRC, "TxCRC"), + MIB_DESC(1, STATS_TUPC, "TxUni"), + MIB_DESC(1, STATS_TMPC, "TxMulti"), + MIB_DESC(1, STATS_TBPC, "TxBroad"), + MIB_DESC(1, STATS_TCEC, "TxCollision"), + MIB_DESC(1, STATS_TSCEC, "TxSingleCol"), + MIB_DESC(1, STATS_TMCEC, "TxMultiCol"), + MIB_DESC(1, STATS_TDEC, "TxDefer"), + MIB_DESC(1, STATS_TLCEC, "TxLateCol"), + MIB_DESC(1, STATS_TXCEC, "TxExcCol"), + MIB_DESC(1, STATS_TPPC, "TxPause"), + MIB_DESC(1, STATS_TL64PC, "Tx64Byte"), + MIB_DESC(1, STATS_TL65PC, "Tx65Byte"), + MIB_DESC(1, STATS_TL128PC, "Tx128Byte"), + MIB_DESC(1, STATS_TL256PC, "Tx256Byte"), + MIB_DESC(1, STATS_TL512PC, "Tx512Byte"), + MIB_DESC(1, STATS_TL1024PC, "Tx1024Byte"), + MIB_DESC(2, STATS_TOC, "TxByte"), + MIB_DESC(1, STATS_RDPC, "RxDrop"), + MIB_DESC(1, STATS_RFPC, "RxFiltered"), + MIB_DESC(1, STATS_RUPC, "RxUni"), + MIB_DESC(1, STATS_RMPC, "RxMulti"), + MIB_DESC(1, STATS_RBPC, "RxBroad"), + MIB_DESC(1, STATS_RAEPC, "RxAlignErr"), + MIB_DESC(1, STATS_RCEPC, "RxCRC"), + MIB_DESC(1, STATS_RUSPC, "RxUnderSize"), + MIB_DESC(1, STATS_RFEPC, "RxFragment"), + MIB_DESC(1, STATS_ROSPC, "RxOverSize"), + MIB_DESC(1, STATS_RJEPC, "RxJabber"), + MIB_DESC(1, STATS_RPPC, "RxPause"), + MIB_DESC(1, STATS_RL64PC, "Rx64Byte"), + MIB_DESC(1, STATS_RL65PC, "Rx65Byte"), + MIB_DESC(1, STATS_RL128PC, "Rx128Byte"), + MIB_DESC(1, STATS_RL256PC, "Rx256Byte"), + MIB_DESC(1, STATS_RL512PC, "Rx512Byte"), + MIB_DESC(1, STATS_RL1024PC, "Rx1024Byte"), + MIB_DESC(2, STATS_ROC, "RxByte"), + MIB_DESC(1, STATS_RDPC_CTRL, "RxCtrlDrop"), + MIB_DESC(1, STATS_RDPC_ING, "RxIngDrop"), + MIB_DESC(1, STATS_RDPC_ARL, "RxARLDrop") +}; + +enum { + /* Global attributes. */ + MT753X_ATTR_ENABLE_VLAN, +}; + +static int mt753x_get_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + val->value.i = gsw->global_vlan_enable; + + return 0; +} + +static int mt753x_set_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + gsw->global_vlan_enable = val->value.i != 0; + + return 0; +} + +static int mt753x_get_port_pvid(struct switch_dev *dev, int port, int *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + if (port >= MT753X_NUM_PORTS) + return -EINVAL; + + *val = mt753x_reg_read(gsw, PPBV1(port)); + *val &= GRP_PORT_VID_M; + + return 0; +} + +static int mt753x_set_port_pvid(struct switch_dev *dev, int port, int pvid) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + if (port >= MT753X_NUM_PORTS) + return -EINVAL; + + if (pvid < MT753X_MIN_VID || pvid > MT753X_MAX_VID) + return -EINVAL; + + gsw->port_entries[port].pvid = pvid; + + return 0; +} + +static int mt753x_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + u32 member; + u32 etags; + int i; + + val->len = 0; + + if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS) + return -EINVAL; + + mt753x_vlan_ctrl(gsw, VTCR_READ_VLAN_ENTRY, val->port_vlan); + + member = mt753x_reg_read(gsw, VAWD1); + member &= PORT_MEM_M; + member >>= PORT_MEM_S; + + etags = mt753x_reg_read(gsw, VAWD2); + + for (i = 0; i < MT753X_NUM_PORTS; i++) { + struct switch_port *p; + int etag; + + if (!(member & BIT(i))) + continue; + + p = &val->value.ports[val->len++]; + p->id = i; + + etag = (etags >> PORT_ETAG_S(i)) & PORT_ETAG_M; + + if (etag == ETAG_CTRL_TAG) + p->flags |= BIT(SWITCH_PORT_FLAG_TAGGED); + else if (etag != ETAG_CTRL_UNTAG) + dev_info(gsw->dev, + "vlan egress tag control neither untag nor tag.\n"); + } + + return 0; +} + +static int mt753x_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + u8 member = 0; + u8 etags = 0; + int i; + + if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS || + val->len > MT753X_NUM_PORTS) + return -EINVAL; + + for (i = 0; i < val->len; i++) { + struct switch_port *p = &val->value.ports[i]; + + if (p->id >= MT753X_NUM_PORTS) + return -EINVAL; + + member |= BIT(p->id); + + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) + etags |= BIT(p->id); + } + + gsw->vlan_entries[val->port_vlan].member = member; + gsw->vlan_entries[val->port_vlan].etags = etags; + + return 0; +} + +static int mt753x_set_vid(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + int vlan; + u16 vid; + + vlan = val->port_vlan; + vid = (u16)val->value.i; + + if (vlan < 0 || vlan >= MT753X_NUM_VLANS) + return -EINVAL; + + if (vid < MT753X_MIN_VID || vid > MT753X_MAX_VID) + return -EINVAL; + + gsw->vlan_entries[vlan].vid = vid; + return 0; +} + +static int mt753x_get_vid(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + val->value.i = val->port_vlan; + return 0; +} + +static int mt753x_get_port_link(struct switch_dev *dev, int port, + struct switch_port_link *link) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + u32 speed, pmsr; + + if (port < 0 || port >= MT753X_NUM_PORTS) + return -EINVAL; + + pmsr = mt753x_reg_read(gsw, PMSR(port)); + + link->link = pmsr & MAC_LNK_STS; + link->duplex = pmsr & MAC_DPX_STS; + speed = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S; + + switch (speed) { + case MAC_SPD_10: + link->speed = SWITCH_PORT_SPEED_10; + break; + case MAC_SPD_100: + link->speed = SWITCH_PORT_SPEED_100; + break; + case MAC_SPD_1000: + link->speed = SWITCH_PORT_SPEED_1000; + break; + case MAC_SPD_2500: + /* TODO: swconfig has no support for 2500 now */ + link->speed = SWITCH_PORT_SPEED_UNKNOWN; + break; + } + + return 0; +} + +static int mt753x_set_port_link(struct switch_dev *dev, int port, + struct switch_port_link *link) +{ +#ifndef MODULE + if (port >= MT753X_NUM_PHYS) + return -EINVAL; + + return switch_generic_set_link(dev, port, link); +#else + return -ENOTSUPP; +#endif +} + +static u64 get_mib_counter(struct gsw_mt753x *gsw, int i, int port) +{ + unsigned int offset; + u64 lo, hi, hi2; + + offset = mt753x_mibs[i].offset; + + if (mt753x_mibs[i].size == 1) + return mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset)); + + do { + hi = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4)); + lo = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset)); + hi2 = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4)); + } while (hi2 != hi); + + return (hi << 32) | lo; +} + +static int mt753x_get_port_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + static char buf[4096]; + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + int i, len = 0; + + if (val->port_vlan >= MT753X_NUM_PORTS) + return -EINVAL; + + len += snprintf(buf + len, sizeof(buf) - len, + "Port %d MIB counters\n", val->port_vlan); + + for (i = 0; i < ARRAY_SIZE(mt753x_mibs); ++i) { + u64 counter; + + len += snprintf(buf + len, sizeof(buf) - len, + "%-11s: ", mt753x_mibs[i].name); + counter = get_mib_counter(gsw, i, val->port_vlan); + len += snprintf(buf + len, sizeof(buf) - len, "%llu\n", + counter); + } + + val->value.s = buf; + val->len = len; + return 0; +} + +static int mt753x_get_port_stats(struct switch_dev *dev, int port, + struct switch_port_stats *stats) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + if (port < 0 || port >= MT753X_NUM_PORTS) + return -EINVAL; + + stats->tx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_TXB_ID, port); + stats->rx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_RXB_ID, port); + + return 0; +} + +static void mt753x_port_isolation(struct gsw_mt753x *gsw) +{ + int i; + + for (i = 0; i < MT753X_NUM_PORTS; i++) + mt753x_reg_write(gsw, PCR(i), + BIT(gsw->cpu_port) << PORT_MATRIX_S); + + mt753x_reg_write(gsw, PCR(gsw->cpu_port), PORT_MATRIX_M); + + for (i = 0; i < MT753X_NUM_PORTS; i++) { + u32 pvc_mode = 0x8100 << STAG_VPID_S; + + if ((gsw->port5_cfg.stag_on && i == 5) || + (gsw->port6_cfg.stag_on && i == 6)) + pvc_mode |= PVC_PORT_STAG; + else + pvc_mode |= (VA_TRANSPARENT_PORT << VLAN_ATTR_S); + + mt753x_reg_write(gsw, PVC(i), pvc_mode); + } +} + +static int mt753x_apply_config(struct switch_dev *dev) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + if (!gsw->global_vlan_enable) { + mt753x_port_isolation(gsw); + return 0; + } + + mt753x_apply_vlan_config(gsw); + + return 0; +} + +static int mt753x_reset_switch(struct switch_dev *dev) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + int i; + + memset(gsw->port_entries, 0, sizeof(gsw->port_entries)); + memset(gsw->vlan_entries, 0, sizeof(gsw->vlan_entries)); + + /* set default vid of each vlan to the same number of vlan, so the vid + * won't need be set explicitly. + */ + for (i = 0; i < MT753X_NUM_VLANS; i++) + gsw->vlan_entries[i].vid = i; + + return 0; +} + +static int mt753x_phy_read16(struct switch_dev *dev, int addr, u8 reg, + u16 *value) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + *value = gsw->mii_read(gsw, addr, reg); + + return 0; +} + +static int mt753x_phy_write16(struct switch_dev *dev, int addr, u8 reg, + u16 value) +{ + struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev); + + gsw->mii_write(gsw, addr, reg, value); + + return 0; +} + +static const struct switch_attr mt753x_global[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_vlan", + .description = "VLAN mode (1:enabled)", + .max = 1, + .id = MT753X_ATTR_ENABLE_VLAN, + .get = mt753x_get_vlan_enable, + .set = mt753x_set_vlan_enable, + } +}; + +static const struct switch_attr mt753x_port[] = { + { + .type = SWITCH_TYPE_STRING, + .name = "mib", + .description = "Get MIB counters for port", + .get = mt753x_get_port_mib, + .set = NULL, + }, +}; + +static const struct switch_attr mt753x_vlan[] = { + { + .type = SWITCH_TYPE_INT, + .name = "vid", + .description = "VLAN ID (0-4094)", + .set = mt753x_set_vid, + .get = mt753x_get_vid, + .max = 4094, + }, +}; + +static const struct switch_dev_ops mt753x_swdev_ops = { + .attr_global = { + .attr = mt753x_global, + .n_attr = ARRAY_SIZE(mt753x_global), + }, + .attr_port = { + .attr = mt753x_port, + .n_attr = ARRAY_SIZE(mt753x_port), + }, + .attr_vlan = { + .attr = mt753x_vlan, + .n_attr = ARRAY_SIZE(mt753x_vlan), + }, + .get_vlan_ports = mt753x_get_vlan_ports, + .set_vlan_ports = mt753x_set_vlan_ports, + .get_port_pvid = mt753x_get_port_pvid, + .set_port_pvid = mt753x_set_port_pvid, + .get_port_link = mt753x_get_port_link, + .set_port_link = mt753x_set_port_link, + .get_port_stats = mt753x_get_port_stats, + .apply_config = mt753x_apply_config, + .reset_switch = mt753x_reset_switch, + .phy_read16 = mt753x_phy_read16, + .phy_write16 = mt753x_phy_write16, +}; + +int mt753x_swconfig_init(struct gsw_mt753x *gsw) +{ + struct device_node *np = gsw->dev->of_node; + struct switch_dev *swdev; + int ret; + + if (of_property_read_u32(np, "mediatek,cpuport", &gsw->cpu_port)) + gsw->cpu_port = MT753X_DFL_CPU_PORT; + + swdev = &gsw->swdev; + + swdev->name = gsw->name; + swdev->alias = gsw->name; + swdev->cpu_port = gsw->cpu_port; + swdev->ports = MT753X_NUM_PORTS; + swdev->vlans = MT753X_NUM_VLANS; + swdev->ops = &mt753x_swdev_ops; + + ret = register_switch(swdev, NULL); + if (ret) { + dev_notice(gsw->dev, "Failed to register switch %s\n", + swdev->name); + return ret; + } + + mt753x_apply_config(swdev); + + return 0; +} + +void mt753x_swconfig_destroy(struct gsw_mt753x *gsw) +{ + unregister_switch(&gsw->swdev); +} diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.h new file mode 100644 index 000000000..f000364ee --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_swconfig.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Weijie Gao + */ + +#ifndef _MT753X_SWCONFIG_H_ +#define _MT753X_SWCONFIG_H_ + +#ifdef CONFIG_SWCONFIG +#include +#include "mt753x.h" + +int mt753x_swconfig_init(struct gsw_mt753x *gsw); +void mt753x_swconfig_destroy(struct gsw_mt753x *gsw); +#else +static inline int mt753x_swconfig_init(struct gsw_mt753x *gsw) +{ + mt753x_apply_vlan_config(gsw); + + return 0; +} + +static inline void mt753x_swconfig_destroy(struct gsw_mt753x *gsw) +{ +} +#endif + +#endif /* _MT753X_SWCONFIG_H_ */ diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.c b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.c new file mode 100644 index 000000000..966709757 --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + */ + +#include "mt753x.h" +#include "mt753x_regs.h" + +struct mt753x_mapping mt753x_def_mapping[] = { + { + .name = "llllw", + .pvids = { 1, 1, 1, 1, 2, 2, 1 }, + .members = { 0, 0x4f, 0x30 }, + .etags = { 0, 0, 0 }, + .vids = { 0, 1, 2 }, + }, { + .name = "wllll", + .pvids = { 2, 1, 1, 1, 1, 2, 1 }, + .members = { 0, 0x5e, 0x21 }, + .etags = { 0, 0, 0 }, + .vids = { 0, 1, 2 }, + }, { + .name = "lwlll", + .pvids = { 1, 2, 1, 1, 1, 2, 1 }, + .members = { 0, 0x5d, 0x22 }, + .etags = { 0, 0, 0 }, + .vids = { 0, 1, 2 }, + }, { + .name = "lllll", + .pvids = { 1, 1, 1, 1, 1, 1, 1 }, + .members = { 0, 0x7f }, + .etags = { 0, 0 }, + .vids = { 0, 1 }, + }, +}; + +void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val) +{ + int i; + + mt753x_reg_write(gsw, VTCR, + VTCR_BUSY | ((cmd << VTCR_FUNC_S) & VTCR_FUNC_M) | + (val & VTCR_VID_M)); + + for (i = 0; i < 300; i++) { + u32 val = mt753x_reg_read(gsw, VTCR); + + if ((val & VTCR_BUSY) == 0) + break; + + usleep_range(1000, 1100); + } + + if (i == 300) + dev_info(gsw->dev, "vtcr timeout\n"); +} + +static void mt753x_write_vlan_entry(struct gsw_mt753x *gsw, int vlan, u16 vid, + u8 ports, u8 etags) +{ + int port; + u32 val; + + /* vlan port membership */ + if (ports) + mt753x_reg_write(gsw, VAWD1, + IVL_MAC | VTAG_EN | VENTRY_VALID | + ((ports << PORT_MEM_S) & PORT_MEM_M)); + else + mt753x_reg_write(gsw, VAWD1, 0); + + /* egress mode */ + val = 0; + for (port = 0; port < MT753X_NUM_PORTS; port++) { + if (etags & BIT(port)) + val |= ETAG_CTRL_TAG << PORT_ETAG_S(port); + else + val |= ETAG_CTRL_UNTAG << PORT_ETAG_S(port); + } + mt753x_reg_write(gsw, VAWD2, val); + + /* write to vlan table */ + mt753x_vlan_ctrl(gsw, VTCR_WRITE_VLAN_ENTRY, vid); +} + +void mt753x_apply_vlan_config(struct gsw_mt753x *gsw) +{ + int i, j; + u8 tag_ports; + u8 untag_ports; + + /* set all ports as security mode */ + for (i = 0; i < MT753X_NUM_PORTS; i++) + mt753x_reg_write(gsw, PCR(i), + PORT_MATRIX_M | SECURITY_MODE); + + /* check if a port is used in tag/untag vlan egress mode */ + tag_ports = 0; + untag_ports = 0; + + for (i = 0; i < MT753X_NUM_VLANS; i++) { + u8 member = gsw->vlan_entries[i].member; + u8 etags = gsw->vlan_entries[i].etags; + + if (!member) + continue; + + for (j = 0; j < MT753X_NUM_PORTS; j++) { + if (!(member & BIT(j))) + continue; + + if (etags & BIT(j)) + tag_ports |= 1u << j; + else + untag_ports |= 1u << j; + } + } + + /* set all untag-only ports as transparent and the rest as user port */ + for (i = 0; i < MT753X_NUM_PORTS; i++) { + u32 pvc_mode = 0x8100 << STAG_VPID_S; + + if (untag_ports & BIT(i) && !(tag_ports & BIT(i))) + pvc_mode = (0x8100 << STAG_VPID_S) | + (VA_TRANSPARENT_PORT << VLAN_ATTR_S); + + if ((gsw->port5_cfg.stag_on && i == 5) || + (gsw->port6_cfg.stag_on && i == 6)) + pvc_mode = (0x8100 << STAG_VPID_S) | PVC_PORT_STAG; + + mt753x_reg_write(gsw, PVC(i), pvc_mode); + } + + /* first clear the switch vlan table */ + for (i = 0; i < MT753X_NUM_VLANS; i++) + mt753x_write_vlan_entry(gsw, i, i, 0, 0); + + /* now program only vlans with members to avoid + * clobbering remapped entries in later iterations + */ + for (i = 0; i < MT753X_NUM_VLANS; i++) { + u16 vid = gsw->vlan_entries[i].vid; + u8 member = gsw->vlan_entries[i].member; + u8 etags = gsw->vlan_entries[i].etags; + + if (member) + mt753x_write_vlan_entry(gsw, i, vid, member, etags); + } + + /* Port Default PVID */ + for (i = 0; i < MT753X_NUM_PORTS; i++) { + int vlan = gsw->port_entries[i].pvid; + u16 pvid = 0; + u32 val; + + if (vlan < MT753X_NUM_VLANS && gsw->vlan_entries[vlan].member) + pvid = gsw->vlan_entries[vlan].vid; + + val = mt753x_reg_read(gsw, PPBV1(i)); + val &= ~GRP_PORT_VID_M; + val |= pvid; + mt753x_reg_write(gsw, PPBV1(i), val); + } +} + +struct mt753x_mapping *mt753x_find_mapping(struct device_node *np) +{ + const char *map; + int i; + + if (of_property_read_string(np, "mediatek,portmap", &map)) + return NULL; + + for (i = 0; i < ARRAY_SIZE(mt753x_def_mapping); i++) + if (!strcmp(map, mt753x_def_mapping[i].name)) + return &mt753x_def_mapping[i]; + + return NULL; +} + +void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map) +{ + int i = 0; + + for (i = 0; i < MT753X_NUM_PORTS; i++) + gsw->port_entries[i].pvid = map->pvids[i]; + + for (i = 0; i < MT753X_NUM_VLANS; i++) { + gsw->vlan_entries[i].member = map->members[i]; + gsw->vlan_entries[i].etags = map->etags[i]; + gsw->vlan_entries[i].vid = map->vids[i]; + } +} diff --git a/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.h b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.h new file mode 100644 index 000000000..c726b8eac --- /dev/null +++ b/target/linux/ramips/files/drivers/net/phy/mtk/mt753x/mt753x_vlan.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018 MediaTek Inc. + */ + +#ifndef _MT753X_VLAN_H_ +#define _MT753X_VLAN_H_ + +#define MT753X_NUM_PORTS 7 +#define MT753X_NUM_VLANS 4095 +#define MT753X_MAX_VID 4095 +#define MT753X_MIN_VID 0 + +struct gsw_mt753x; + +struct mt753x_port_entry { + u16 pvid; +}; + +struct mt753x_vlan_entry { + u16 vid; + u8 member; + u8 etags; +}; + +struct mt753x_mapping { + char *name; + u16 pvids[MT753X_NUM_PORTS]; + u8 members[MT753X_NUM_VLANS]; + u8 etags[MT753X_NUM_VLANS]; + u16 vids[MT753X_NUM_VLANS]; +}; + +extern struct mt753x_mapping mt753x_defaults[]; + +void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val); +void mt753x_apply_vlan_config(struct gsw_mt753x *gsw); +struct mt753x_mapping *mt753x_find_mapping(struct device_node *np); +void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map); +#endif /* _MT753X_VLAN_H_ */ diff --git a/target/linux/ramips/files/include/net/ra_nat.h b/target/linux/ramips/files/include/net/ra_nat.h new file mode 100644 index 000000000..91e67eb76 --- /dev/null +++ b/target/linux/ramips/files/include/net/ra_nat.h @@ -0,0 +1,558 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2019 MediaTek Inc. + * Author: Harry Huang + */ + +#ifndef _RA_NAT_WANTED +#define _RA_NAT_WANTED + +#include +#include + +#ifndef NEXTHDR_IPIP +#define NEXTHDR_IPIP 4 +#endif + +#define hwnat_vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define hwnat_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) + +#if defined(CONFIG_HW_NAT) +extern void hwnat_magic_tag_set_zero(struct sk_buff *skb); +extern void hwnat_check_magic_tag(struct sk_buff *skb); +extern void hwnat_set_headroom_zero(struct sk_buff *skb); +extern void hwnat_set_tailroom_zero(struct sk_buff *skb); +extern void hwnat_copy_headroom(u8 *data, struct sk_buff *skb); +extern void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb); +extern void hwnat_setup_dma_ops(struct device *dev, bool coherent); +#else + +static inline void hwnat_magic_tag_set_zero(struct sk_buff *skb) +{ +} + +static inline void hwnat_check_magic_tag(struct sk_buff *skb) +{ +} + +static inline void hwnat_set_headroom_zero(struct sk_buff *skb) +{ +} + +static inline void hwnat_set_tailroom_zero(struct sk_buff *skb) +{ +} + +static inline void hwnat_copy_headroom(u8 *data, struct sk_buff *skb) +{ +} + +static inline void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb) +{ +} + +#endif + +enum foe_cpu_reason { + TTL_0 = 0x02, /* IPv4(IPv6) TTL(hop limit) = 0 */ + /* IPv4(IPv6) has option(extension) header */ + HAS_OPTION_HEADER = 0x03, + NO_FLOW_IS_ASSIGNED = 0x07, /* No flow is assigned */ + /* IPv4 HNAT doesn't support IPv4 /w fragment */ + IPV4_WITH_FRAGMENT = 0x08, + /* IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment */ + IPV4_HNAPT_DSLITE_WITH_FRAGMENT = 0x09, + /* IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport */ + IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP = 0x0A, + /* IPv6 5T-route/6RD can't find TCP/UDP sport/dport */ + IPV6_5T_6RD_WITHOUT_TCP_UDP = 0x0B, + /* Ingress packet is TCP fin/syn/rst */ + /*(for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */ + TCP_FIN_SYN_RST = 0x0C, + UN_HIT = 0x0D, /* FOE Un-hit */ + HIT_UNBIND = 0x0E, /* FOE Hit unbind */ + /* FOE Hit unbind & rate reach */ + HIT_UNBIND_RATE_REACH = 0x0F, + HIT_BIND_TCP_FIN = 0x10, /* Hit bind PPE TCP FIN entry */ + /* Hit bind PPE entry and TTL(hop limit) = 1 */ + /* and TTL(hot limit) - 1 */ + HIT_BIND_TTL_1 = 0x11, + /* Hit bind and VLAN replacement violation */ + /*(Ingress 1(0) VLAN layers and egress 4(3 or 4) VLAN layers) */ + HIT_BIND_WITH_VLAN_VIOLATION = 0x12, + /* Hit bind and keep alive with unicast old-header packet */ + HIT_BIND_KEEPALIVE_UC_OLD_HDR = 0x13, + /* Hit bind and keep alive with multicast new-header packet */ + HIT_BIND_KEEPALIVE_MC_NEW_HDR = 0x14, + /* Hit bind and keep alive with duplicate old-header packet */ + HIT_BIND_KEEPALIVE_DUP_OLD_HDR = 0x15, + /* FOE Hit bind & force to CPU */ + HIT_BIND_FORCE_TO_CPU = 0x16, + /* Hit bind and remove tunnel IP header, */ + /* but inner IP has option/next header */ + HIT_BIND_WITH_OPTION_HEADER = 0x17, + /* Hit bind and exceed MTU */ + HIT_BIND_EXCEED_MTU = 0x1C, + HIT_BIND_PACKET_SAMPLING = 0x1B, /* PS packet */ + /* Switch clone multicast packet to CPU */ + HIT_BIND_MULTICAST_TO_CPU = 0x18, + /* Switch clone multicast packet to GMAC1 & CPU */ + HIT_BIND_MULTICAST_TO_GMAC_CPU = 0x19, + HIT_PRE_BIND = 0x1A /* Pre-bind */ +}; + +#define MAX_IF_NUM 64 + +struct dmad_rx_descinfo4 { + uint32_t foe_entry_num:15; + uint32_t rsv0:3; + uint32_t CRSN:5; + uint32_t rsv1:3; + uint32_t SPORT:4; + uint32_t ppe:1; + uint32_t ALG:1; + uint32_t IF:8; + uint32_t WDMAID:2; + uint32_t RXID:2; + uint32_t WCID:10; + uint32_t BSSID:6; + uint32_t rsv3:4; + uint16_t minfo:1; + uint16_t ntype:3; + uint16_t chid:8; + uint16_t rsv4:4; + u16 MAGIC_TAG_PROTECT; +} __packed; + +struct pdma_rx_desc_info4 { + u16 MAGIC_TAG_PROTECT; + uint32_t foe_entry_num:14; + uint32_t CRSN:5; + uint32_t SPORT:4; + uint32_t rsv:6; + uint32_t foe_entry_num_1:1; + uint32_t ppe:1; + uint32_t ALG:1; + uint32_t IF:8; + uint32_t WDMAID:2; + uint32_t RXID:2; + uint32_t WCID:10; + uint32_t BSSID:6; + uint32_t rsv2:4; + uint16_t minfo:1; + uint16_t ntype:3; + uint16_t chid:8; + uint16_t rsv3:4; +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + u16 SOURCE; + u16 DEST; +#endif +} __packed; + +#if defined(CONFIG_MEDIATEK_NETSYS_V2) +struct head_rx_descinfo4 { + uint32_t foe_entry_num:14; + uint32_t CRSN:5; + uint32_t SPORT:4; + uint32_t rsv:6; + uint32_t foe_entry_num_1:1; + uint32_t ppe:1; + uint32_t ALG:1; + uint32_t IF:8; + uint32_t WDMAID:2; + uint32_t RXID:2; + uint32_t WCID:10; + uint32_t BSSID:6; + uint32_t rsv2:4; + uint16_t minfo:1; + uint16_t ntype:3; + uint16_t chid:8; + uint16_t rsv3:4; +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + u16 SOURCE; + u16 DEST; +#endif + u16 MAGIC_TAG_PROTECT; +} __packed; +#else +struct head_rx_descinfo4 { + uint32_t foe_entry_num:14; + uint32_t CRSN:5; + uint32_t SPORT:3; + uint32_t rsv:1; + uint32_t ALG:1; + uint32_t IF:4; + uint32_t rsv2:4; + uint32_t MAGIC_TAG_PROTECT: 16; + uint32_t WDMAID:2; + uint32_t RXID:2; + uint32_t WCID:10; + uint32_t BSSID:6; +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + u16 SOURCE; + u16 DEST; +#endif +} __packed; +#endif + +struct cb_rx_desc_info4 { + u16 MAGIC_TAG_PROTECT0; + uint32_t foe_entry_num:15; + uint32_t CRSN:5; + uint32_t SPORT:4; + uint32_t ALG:1; + uint32_t rsv:7; + uint16_t IF:8; + uint16_t WDMAID:2; + uint16_t RXID:2; + uint16_t WCID:10; + uint16_t BSSID:6; + uint16_t rsv1:4; + uint16_t minfo:1; + uint16_t ntype:3; + uint16_t chid:8; + uint16_t rsv2:4; +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + u16 SOURCE; + u16 DEST; +#endif + u16 MAGIC_TAG_PROTECT1; +} __packed; + + + +#define FOE_INFO_LEN 12 +#define WIFI_INFO_LEN 6 + + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_INFO_LEN (6 + 4 + WIFI_INFO_LEN) +#define FOE_MAGIC_FASTPATH 0x77 +#define FOE_MAGIC_L2TPPATH 0x78 +#endif + +#define FOE_MAGIC_PCI 0x73 +#define FOE_MAGIC_WLAN 0x74 +#define FOE_MAGIC_GE 0x75 +#define FOE_MAGIC_PPE 0x76 +#define FOE_MAGIC_WED0 0x78 +#define FOE_MAGIC_WED1 0x79 +#define FOE_MAGIC_MED 0x80 +#define FOE_MAGIC_EDMA0 0x81 +#define FOE_MAGIC_EDMA1 0x82 +#define TAG_PROTECT 0x6789 +#define USE_HEAD_ROOM 0 +#define USE_TAIL_ROOM 1 +#define USE_CB 2 +#define ALL_INFO_ERROR 3 + +/**************************DMAD FORMAT********************************/ +#define FOE_TAG_PROTECT(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT) + +#define FOE_ENTRY_NUM(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->foe_entry_num) +#define FOE_ALG(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->ALG) +#define FOE_AI(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->CRSN) +#define FOE_SP(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->SPORT) +#define FOE_MAGIC_TAG(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->IF) +#define FOE_WDMA_ID(skb) \ + (((struct dmad_rx_descinfo4 *)((skb)->head))->WDMAID) +#define FOE_RX_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->RXID) +#define FOE_WC_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->WCID) +#define FOE_BSS_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->BSSID) +#define FOE_PPE(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->ppe) + +/***********************HEAD FORMAT*************************************/ + +#define FOE_TAG_PROTECT_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT) +#define FOE_ENTRY_NUM_LSB_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num) +#define FOE_ENTRY_NUM_MSB_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1) + +#define FOE_ENTRY_NUM_HEAD(skb) \ + (((FOE_ENTRY_NUM_MSB_HEAD(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_HEAD(skb)) + + +#define FOE_ALG_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->ALG) +#define FOE_AI_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->CRSN) +#define FOE_SP_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->SPORT) +#define FOE_MAGIC_TAG_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->IF) + + +#define FOE_WDMA_ID_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->WDMAID) +#define FOE_RX_ID_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->RXID) +#define FOE_WC_ID_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->WCID) +#define FOE_BSS_ID_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->BSSID) +#define FOE_PPE_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->PPE) + +/****************************TAIL FORMAT***************************************/ +#define FOE_TAG_PROTECT_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->MAGIC_TAG_PROTECT) +#define FOE_ENTRY_NUM_LSB_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num) + +#define FOE_ENTRY_NUM_MSB_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num_1) +#define FOE_ENTRY_NUM_TAIL(skb) \ + (((FOE_ENTRY_NUM_MSB_TAIL(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_TAIL(skb)) +#define FOE_ALG_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ALG) +#define FOE_AI_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->CRSN) +#define FOE_SP_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SPORT) +#define FOE_MAGIC_TAG_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF) + +#define FOE_WDMA_ID_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WDMAID) +#define FOE_RX_ID_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->RXID) +#define FOE_WC_ID_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WCID) +#define FOE_BSS_ID_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->BSSID) + +#define FOE_PPE_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ppe) +/*********************************************************************/ +#define FOE_WDMA_ID_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->head))->WDMAID) +#define FOE_RX_ID_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->head))->RXID) +#define FOE_WC_ID_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->head))->WCID) +#define FOE_BSS_ID_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->head))->BSSID) + +#define FOE_MINFO(skb) (((struct head_rx_descinfo4 *)((skb)->head))->minfo) +#define FOE_MINFO_NTYPE(skb) (((struct head_rx_descinfo4 *)((skb)->head))->ntype) +#define FOE_MINFO_CHID(skb) (((struct head_rx_descinfo4 *)((skb)->head))->chid) +#define FOE_MINFO_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->minfo) +#define FOE_MINFO_NTYPE_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->ntype) +#define FOE_MINFO_CHID_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->chid) + +#define FOE_MINFO_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->minfo) +#define FOE_MINFO_NTYPE_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ntype) +#define FOE_MINFO_CHID_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->chid) + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_SOURCE(skb) (((struct head_rx_descinfo4 *)((skb)->head))->SOURCE) +#define FOE_DEST(skb) (((struct head_rx_descinfo4 *)((skb)->head))->DEST) +#endif + +#define IS_SPACE_AVAILABLE_HEAD(skb) \ + ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0))) +#define IS_SPACE_AVAILABLE_HEAD(skb) \ + ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0))) +#define FOE_INFO_START_ADDR_HEAD(skb) (skb->head) + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_SOURCE_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->SOURCE) +#define FOE_DEST_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->DEST) +#endif + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_SOURCE_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->SOURCE) +#define FOE_DEST_HEAD(skb) \ + (((struct head_rx_descinfo4 *)((skb)->head))->DEST) +#endif +#define IS_SPACE_AVAILABLE_TAIL(skb) \ + (((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0)) +#define IS_SPACE_AVAILABLE_TAIL(skb) \ + (((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0)) +#define FOE_INFO_START_ADDR_TAIL(skb) \ + ((unsigned char *)(long)(skb_end_pointer(skb) - FOE_INFO_LEN)) + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_SOURCE_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SOURCE) +#define FOE_DEST_TAIL(skb) \ + (((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->DEST) +#endif + +/* change the position of skb_CB if necessary */ +#define CB_OFFSET 40 +#define IS_SPACE_AVAILABLE_CB(skb) 1 +#define FOE_INFO_START_ADDR_CB(skb) (skb->cb + CB_OFFSET) +#define FOE_TAG_PROTECT_CB0(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT0) +#define FOE_TAG_PROTECT_CB1(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT1) +#define FOE_ENTRY_NUM_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->foe_entry_num) +#define FOE_ALG_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->ALG) +#define FOE_AI_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->CRSN) +#define FOE_SP_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SPORT) +#define FOE_MAGIC_TAG_CB(skb) \ + (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->IF) + +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) +#define FOE_SOURCE_CB(skb) (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SOURCE) +#define FOE_DEST_CB(skb) (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->DEST) +#endif + +#define IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb) \ + (FOE_TAG_PROTECT_HEAD(skb) == TAG_PROTECT) +#define IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb) \ + (FOE_TAG_PROTECT_TAIL(skb) == TAG_PROTECT) +#define IS_MAGIC_TAG_PROTECT_VALID_CB(skb) \ + ((FOE_TAG_PROTECT_CB0(skb) == TAG_PROTECT) && \ + (FOE_TAG_PROTECT_CB0(skb) == FOE_TAG_PROTECT_CB1(skb))) + +#define IS_IF_PCIE_WLAN_HEAD(skb) \ + ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \ + (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \ + (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) + +#define IS_IF_PCIE_WLAN_TAIL(skb) \ + ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \ + (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN)) + +#define IS_IF_PCIE_WLAN_CB(skb) \ + ((FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_PCI) || \ + (FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_WLAN)) + +/* macros */ +#define magic_tag_set_zero(skb) \ +{ \ + if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \ + (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \ + (FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) { \ + if (IS_SPACE_AVAILABLE_HEAD(skb)) \ + FOE_MAGIC_TAG_HEAD(skb) = 0; \ + } \ + if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \ + (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \ + (FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) { \ + if (IS_SPACE_AVAILABLE_TAIL(skb)) \ + FOE_MAGIC_TAG_TAIL(skb) = 0; \ + } \ +} + +static inline void hwnat_set_l2tp_unhit(struct iphdr *iph, struct sk_buff *skb) +{ +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + /* only clear headeroom for TCP OR not L2TP packets */ + if ((iph->protocol == 0x6) || (ntohs(udp_hdr(skb)->dest) != 1701)) { + if (IS_SPACE_AVAILABLE_HEAD(skb)) { + FOE_MAGIC_TAG(skb) = 0; + FOE_AI(skb) = UN_HIT; + } + } +#endif +} + +static inline void hwnat_set_l2tp_fast_path(u32 l2tp_fast_path, u32 pptp_fast_path) +{ +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + l2tp_fast_path = 1; + pptp_fast_path = 0; +#endif +} + +static inline void hwnat_clear_l2tp_fast_path(u32 l2tp_fast_path) +{ +#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP) + l2tp_fast_path = 0; +#endif +} + +/* #define CONFIG_HW_NAT_IPI */ +#if defined(CONFIG_HW_NAT_IPI) +extern int debug_level; +int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, + struct rps_dev_flow **rflowp); +uint32_t ppe_extif_rx_handler(struct sk_buff *skb); +int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry); +extern unsigned int ipidbg[num_possible_cpus()][10]; +extern unsigned int ipidbg2[num_possible_cpus()][10]; +/* #define HNAT_IPI_RXQUEUE 1 */ +#define HNAT_IPI_DQ 1 +#define HNAT_IPI_HASH_NORMAL 0 +#define HNAT_IPI_HASH_VTAG 1 +#define HNAT_IPI_HASH_FROM_EXTIF 2 +#define HNAT_IPI_HASH_FROM_GMAC 4 + +struct hnat_ipi_s { +#if defined(HNAT_IPI_DQ) + struct sk_buff_head skb_input_queue; + struct sk_buff_head skb_process_queue; +#elif defined(HNAT_IPI_RXQUEUE) + atomic_t rx_queue_num; + unsigned int rx_queue_ridx; + unsigned int rx_queue_widx; + struct sk_buff **rx_queue; +#else + /* unsigned int dummy0[0]; */ + struct sk_buff_head skb_ipi_queue; + /* unsigned int dummy1[8]; */ +#endif + unsigned long time_rec, recv_time; + unsigned int ipi_accum; + /*hwnat ipi use*/ + spinlock_t ipilock; + struct tasklet_struct smp_func_call_tsk; +} ____cacheline_aligned_in_smp; + +struct hnat_ipi_stat { + unsigned long drop_pkt_num_from_extif; + unsigned long drop_pkt_num_from_ppehit; + unsigned int smp_call_cnt_from_extif; + unsigned int smp_call_cnt_from_ppehit; + atomic_t cpu_status; + /* atomic_t cpu_status_from_extif; */ + /* atomic_t cpu_status_from_ppehit; */ + + /* atomic_t hook_status_from_extif; */ + /* atomic_t hook_status_from_ppehit; */ +} ____cacheline_aligned_in_smp; + +#define cpu_status_from_extif cpu_status +#define cpu_status_from_ppehit cpu_status + +struct hnat_ipi_cfg { + unsigned int enable_from_extif; + unsigned int enable_from_ppehit; + unsigned int queue_thresh_from_extif; + unsigned int queue_thresh_from_ppehit; + unsigned int drop_pkt_from_extif; + unsigned int drop_pkt_from_ppehit; + unsigned int ipi_cnt_mod_from_extif; + unsigned int ipi_cnt_mod_from_ppehit; +} ____cacheline_aligned_in_smp; + +int hnat_ipi_init(void); +int hnat_ipi_de_init(void); +#endif + +#define QDMA_RX 5 +#define PDMA_RX 0 + + +#endif diff --git a/target/linux/ramips/patches-5.4/901-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch b/target/linux/ramips/patches-5.4/901-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch new file mode 100644 index 000000000..72719c828 --- /dev/null +++ b/target/linux/ramips/patches-5.4/901-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch @@ -0,0 +1,22 @@ +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 5ba1c72f..f4239459 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -69,6 +69,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1666,6 +1667,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, + skb_shinfo(skb), + offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); + ++ /*headroom copy*/ ++ memcpy(data, skb->head, FOE_INFO_LEN); ++ + /* + * if shinfo is shared we must drop the old head gracefully, but if it + * is not we can just drop the old head and let the existing refcount diff --git a/target/linux/ramips/patches-5.4/902-mtkhnat-add-support-for-virtual-interface-acceleration.patch b/target/linux/ramips/patches-5.4/902-mtkhnat-add-support-for-virtual-interface-acceleration.patch new file mode 100644 index 000000000..150087a56 --- /dev/null +++ b/target/linux/ramips/patches-5.4/902-mtkhnat-add-support-for-virtual-interface-acceleration.patch @@ -0,0 +1,127 @@ +diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h +index 3d73c0c..960ade1 100644 +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -92,9 +92,12 @@ struct flow_offload { + #define FLOW_OFFLOAD_PATH_VLAN BIT(1) + #define FLOW_OFFLOAD_PATH_PPPOE BIT(2) + #define FLOW_OFFLOAD_PATH_DSA BIT(3) ++#define FLOW_OFFLOAD_PATH_DSLITE BIT(4) ++#define FLOW_OFFLOAD_PATH_6RD BIT(5) + + struct flow_offload_hw_path { + struct net_device *dev; ++ struct net_device *virt_dev; + u32 flags; + + u8 eth_src[ETH_ALEN]; +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c +index be6801524..c51af70f6 100644 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -761,6 +761,7 @@ static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path) + path->flags |= FLOW_OFFLOAD_PATH_VLAN; + path->vlan_proto = vlan->vlan_proto; + path->vlan_id = vlan->vlan_id; ++ path->virt_dev = dev; + path->dev = vlan->real_dev; + + if (vlan->real_dev->netdev_ops->ndo_flow_offload_check) +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 1b7e3141c..da4e34f74 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -57,6 +57,11 @@ + #include + #include + ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++#include ++#include ++#endif ++ + MODULE_AUTHOR("Ville Nuorvala"); + MODULE_DESCRIPTION("IPv6 tunneling device"); + MODULE_LICENSE("GPL"); +@@ -1880,6 +1885,22 @@ int ip6_tnl_get_iflink(const struct net_device *dev) + } + EXPORT_SYMBOL(ip6_tnl_get_iflink); + ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path) ++{ ++ struct net_device *dev = path->dev; ++ struct ip6_tnl *tnl = netdev_priv(dev); ++ ++ if (path->flags & FLOW_OFFLOAD_PATH_DSLITE) ++ return -EEXIST; ++ ++ path->flags |= FLOW_OFFLOAD_PATH_DSLITE; ++ path->dev = tnl->dev; ++ ++ return 0; ++} ++#endif /* CONFIG_NF_FLOW_TABLE */ ++ + int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops, + unsigned int num) + { +@@ -1941,6 +1962,9 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { + .ndo_change_mtu = ip6_tnl_change_mtu, + .ndo_get_stats = ip6_get_stats, + .ndo_get_iflink = ip6_tnl_get_iflink, ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ .ndo_flow_offload_check = ipip6_dev_flow_offload_check, ++#endif + }; + + #define IPXIPX_FEATURES (NETIF_F_SG | \ +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index 98954830c..42b6e8c4c 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -52,6 +52,11 @@ + #include + #include + ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++#include ++#include ++#endif ++ + /* + This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c + +@@ -1345,6 +1350,22 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + return err; + } + ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path) ++{ ++ struct net_device *dev = path->dev; ++ struct ip_tunnel *tnl = netdev_priv(dev); ++ ++ if (path->flags & FLOW_OFFLOAD_PATH_6RD) ++ return -EEXIST; ++ ++ path->flags |= FLOW_OFFLOAD_PATH_6RD; ++ path->dev = tnl->dev; ++ ++ return 0; ++} ++#endif /* CONFIG_NF_FLOW_TABLE */ ++ + static const struct net_device_ops ipip6_netdev_ops = { + .ndo_init = ipip6_tunnel_init, + .ndo_uninit = ipip6_tunnel_uninit, +@@ -1352,6 +1373,9 @@ static const struct net_device_ops ipip6_netdev_ops = { + .ndo_do_ioctl = ipip6_tunnel_ioctl, + .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_iflink = ip_tunnel_get_iflink, ++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) ++ .ndo_flow_offload_check = ipip6_dev_flow_offload_check, ++#endif + }; + + static void ipip6_dev_free(struct net_device *dev) diff --git a/target/linux/ramips/patches-5.4/903-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch b/target/linux/ramips/patches-5.4/903-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch new file mode 100644 index 000000000..a9774b189 --- /dev/null +++ b/target/linux/ramips/patches-5.4/903-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch @@ -0,0 +1,176 @@ +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1354,9 +1354,21 @@ static int mtk_poll_rx(struct napi_struc + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && +- (trxd.rxd2 & RX_DMA_VTAG)) +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), +- RX_DMA_VID(trxd.rxd3)); ++ (trxd.rxd2 & RX_DMA_VTAG)) { ++ __vlan_hwaccel_put_tag(skb, ++ htons(RX_DMA_VPID(trxd.rxd3)), ++ RX_DMA_TCI(trxd.rxd3)); ++ ++ /* If netdev is attached to dsa switch, the special ++ * tag inserted in VLAN field by switch hardware can ++ * be offload by RX HW VLAN offload. Clears the VLAN ++ * information from @skb to avoid unexpected 8021d ++ * handler before packet enter dsa framework. ++ */ ++ if (netdev_uses_dsa(netdev)) ++ __vlan_hwaccel_clear_tag(skb); ++ } ++ + if (mtk_offload_check_rx(eth, skb, trxd.rxd4) == 0) { + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); +@@ -2050,19 +2062,32 @@ static netdev_features_t mtk_fix_feature + } + } + ++ if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) { ++ netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n"); ++ ++ features &= ~NETIF_F_HW_VLAN_CTAG_TX; ++ } ++ + return features; + } + + static int mtk_set_features(struct net_device *dev, netdev_features_t features) + { ++ struct mtk_mac *mac = netdev_priv(dev); ++ struct mtk_eth *eth = mac->hw; + int err = 0; + +- if (!((dev->features ^ features) & NETIF_F_LRO)) ++ if (!((dev->features ^ features) & MTK_SET_FEATURES)) + return 0; + + if (!(features & NETIF_F_LRO)) + mtk_hwlro_netdev_disable(dev); + ++ if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) ++ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); ++ else ++ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); ++ + return err; + } + +@@ -2326,6 +2351,15 @@ static int mtk_open(struct net_device *d + + mtk_gdm_config(eth, gdm_config); + ++ /* Indicates CDM to parse the MTK special tag from CPU */ ++ if (netdev_uses_dsa(dev)) { ++ u32 val; ++ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); ++ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); ++ val = mtk_r32(eth, MTK_CDMP_IG_CTRL); ++ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); ++ } ++ + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); +@@ -2500,7 +2534,7 @@ static void mtk_dim_tx(struct work_struc + + static int mtk_hw_init(struct mtk_eth *eth) + { +- int i, val, ret; ++ int i, ret; + + if (test_and_set_bit(MTK_HW_INIT, ð->state)) + return 0; +@@ -2555,12 +2589,6 @@ static int mtk_hw_init(struct mtk_eth *e + for (i = 0; i < MTK_MAC_COUNT; i++) + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); + +- /* Indicates CDM to parse the MTK special tag from CPU +- * which also is working out for untag packets. +- */ +- val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); +- mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); +- + /* Enable RX VLan Offloading */ + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -42,6 +42,8 @@ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_IPV6_CSUM) ++#define MTK_SET_FEATURES (NETIF_F_LRO | \ ++ NETIF_F_HW_VLAN_CTAG_RX) + #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) + #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) + +@@ -78,6 +80,10 @@ + #define MTK_CDMQ_IG_CTRL 0x1400 + #define MTK_CDMQ_STAG_EN BIT(0) + ++/* CDMP Ingress Control Register */ ++#define MTK_CDMP_IG_CTRL 0x400 ++#define MTK_CDMP_STAG_EN BIT(0) ++ + /* CDMP Exgress Control Register */ + #define MTK_CDMP_EG_CTRL 0x404 + +@@ -307,7 +313,9 @@ + #define RX_DMA_VTAG BIT(15) + + /* QDMA descriptor rxd3 */ +-#define RX_DMA_VID(_x) ((_x) & 0xfff) ++#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK) ++#define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) ++#define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff) + + /* QDMA descriptor rxd4 */ + #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) +--- a/net/dsa/tag_mtk.c ++++ b/net/dsa/tag_mtk.c +@@ -73,22 +73,28 @@ static struct sk_buff *mtk_tag_rcv(struc + bool is_multicast_skb = is_multicast_ether_addr(dest) && + !is_broadcast_ether_addr(dest); + +- if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) +- return NULL; ++ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) { ++ hdr = ntohs(skb->vlan_proto); ++ skb->vlan_proto = 0; ++ skb->vlan_tci = 0; ++ } else { ++ if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) ++ return NULL; + +- /* The MTK header is added by the switch between src addr +- * and ethertype at this point, skb->data points to 2 bytes +- * after src addr so header should be 2 bytes right before. +- */ +- phdr = (__be16 *)(skb->data - 2); +- hdr = ntohs(*phdr); ++ /* The MTK header is added by the switch between src addr ++ * and ethertype at this point, skb->data points to 2 bytes ++ * after src addr so header should be 2 bytes right before. ++ */ ++ phdr = (__be16 *)(skb->data - 2); ++ hdr = ntohs(*phdr); + +- /* Remove MTK tag and recalculate checksum. */ +- skb_pull_rcsum(skb, MTK_HDR_LEN); ++ /* Remove MTK tag and recalculate checksum. */ ++ skb_pull_rcsum(skb, MTK_HDR_LEN); + +- memmove(skb->data - ETH_HLEN, +- skb->data - ETH_HLEN - MTK_HDR_LEN, +- 2 * ETH_ALEN); ++ memmove(skb->data - ETH_HLEN, ++ skb->data - ETH_HLEN - MTK_HDR_LEN, ++ 2 * ETH_ALEN); ++ } + + /* Get source port information */ + port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK); diff --git a/target/linux/ramips/patches-5.4/980-add-mtk-vendor-ethernet-drivers.patch b/target/linux/ramips/patches-5.4/980-add-mtk-vendor-ethernet-drivers.patch new file mode 100644 index 000000000..972f70756 --- /dev/null +++ b/target/linux/ramips/patches-5.4/980-add-mtk-vendor-ethernet-drivers.patch @@ -0,0 +1,41 @@ +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -125,6 +125,7 @@ + source "drivers/net/ethernet/microchip/Kconfig" + source "drivers/net/ethernet/moxa/Kconfig" + source "drivers/net/ethernet/mscc/Kconfig" ++source "drivers/net/ethernet/mtk/Kconfig" + source "drivers/net/ethernet/myricom/Kconfig" + + config FEALNX +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -73,6 +73,7 @@ + obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ + obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ + obj-$(CONFIG_NET_VENDOR_RALINK) += ralink/ ++obj-$(CONFIG_NET_VENDOR_RAW_MEDIATEK) += mtk/ + obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ + obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/ + obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -577,6 +577,8 @@ + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. + ++source "drivers/net/phy/mtk/mt753x/Kconfig" ++ + endif # PHYLIB + + config MICREL_KS8995MA +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -99,6 +99,7 @@ + obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o + obj-$(CONFIG_MICROSEMI_PHY) += mscc.o + obj-$(CONFIG_NATIONAL_PHY) += national.o ++obj-$(CONFIG_MT753X_GSW) += mtk/mt753x/ + obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o + obj-$(CONFIG_QSEMI_PHY) += qsemi.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o