ramips: add MediaTek Vendor Ethernet driver

This commit is contained in:
AmadeusGhost 2022-05-25 17:18:08 +08:00 committed by AmadeusGhost
parent f6f103b1cb
commit 177dd50bc6
40 changed files with 20670 additions and 0 deletions

View File

@ -0,0 +1,30 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_RAW_MEDIATEK
bool "MediaTek ethernet driver"
depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
if NET_VENDOR_RAW_MEDIATEK
config NET_MEDIATEK_SOC_GE
tristate "MediaTek SoC Gigabit Ethernet support"
select PHYLINK
---help---
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
config MEDIATEK_NETSYS_V2
tristate "MediaTek Ethernet NETSYS V2 support"
depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC_GE
---help---
This options enable MTK Ethernet NETSYS V2 support
config NET_MEDIATEK_HNAT
tristate "MediaTek HW NAT support"
depends on NET_MEDIATEK_SOC_GE && NF_CONNTRACK && IP_NF_NAT
---help---
This driver supports the hardward Network Address Translation
in the MediaTek MT2701/MT7622/MT7629/MT7621 chipset family.
endif #NET_VENDOR_RAW_MEDIATEK

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the Mediatek SoCs built-in ethernet macs
#
obj-$(CONFIG_NET_MEDIATEK_SOC_GE) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,287 @@
/*
* Copyright (C) 2018 MediaTek Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
* Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
*/
#ifndef MTK_ETH_DBG_H
#define MTK_ETH_DBG_H
/* Debug Purpose Register */
#define MTK_PSE_FQFC_CFG 0x100
#define MTK_FE_CDM1_FSM 0x220
#define MTK_FE_CDM2_FSM 0x224
#define MTK_FE_CDM3_FSM 0x238
#define MTK_FE_CDM4_FSM 0x298
#define MTK_FE_GDM1_FSM 0x228
#define MTK_FE_GDM2_FSM 0x22C
#define MTK_FE_PSE_FREE 0x240
#define MTK_FE_DROP_FQ 0x244
#define MTK_FE_DROP_FC 0x248
#define MTK_FE_DROP_PPE 0x24C
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
#define MTK_SGMII_FALSE_CARRIER_CNT(x) (0x10060028 + ((x) * 0x10000))
#define MTK_SGMII_EFUSE 0x11D008C8
#define MTK_WED_RTQM_GLO_CFG 0x15010B00
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_PSE_IQ_STA(x) (0x180 + (x) * 0x4)
#define MTK_PSE_OQ_STA(x) (0x1A0 + (x) * 0x4)
#else
#define MTK_PSE_IQ_STA(x) (0x110 + (x) * 0x4)
#define MTK_PSE_OQ_STA(x) (0x118 + (x) * 0x4)
#endif
#define MTKETH_MII_READ 0x89F3
#define MTKETH_MII_WRITE 0x89F4
#define MTKETH_ESW_REG_READ 0x89F1
#define MTKETH_ESW_REG_WRITE 0x89F2
#define MTKETH_MII_READ_CL45 0x89FC
#define MTKETH_MII_WRITE_CL45 0x89FD
#define REG_ESW_MAX 0xFC
#define PROCREG_ESW_CNT "esw_cnt"
#define PROCREG_TXRING "tx_ring"
#define PROCREG_HWTXRING "hwtx_ring"
#define PROCREG_RXRING "rx_ring"
#define PROCREG_DIR "mtketh"
#define PROCREG_DBG_REGS "dbg_regs"
#define PROCREG_HW_LRO_STATS "hw_lro_stats"
#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
#define PROCREG_RESET_EVENT "reset_event"
/* HW LRO flush reason */
#define MTK_HW_LRO_AGG_FLUSH (1)
#define MTK_HW_LRO_AGE_FLUSH (2)
#define MTK_HW_LRO_NOT_IN_SEQ_FLUSH (3)
#define MTK_HW_LRO_TIMESTAMP_FLUSH (4)
#define MTK_HW_LRO_NON_RULE_FLUSH (5)
#define SET_PDMA_RXRING_MAX_AGG_CNT(eth, x, y) \
{ \
u32 reg_val1 = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \
u32 reg_val2 = mtk_r32(eth, MTK_LRO_CTRL_DW3_CFG(x)); \
reg_val1 &= ~MTK_LRO_RING_AGG_CNT_L_MASK; \
reg_val2 &= ~MTK_LRO_RING_AGG_CNT_H_MASK; \
reg_val1 |= ((y) & 0x3f) << MTK_LRO_RING_AGG_CNT_L_OFFSET; \
reg_val2 |= (((y) >> 6) & 0x03) << \
MTK_LRO_RING_AGG_CNT_H_OFFSET; \
mtk_w32(eth, reg_val1, MTK_LRO_CTRL_DW2_CFG(x)); \
mtk_w32(eth, reg_val2, MTK_LRO_CTRL_DW3_CFG(x)); \
}
#define SET_PDMA_RXRING_AGG_TIME(eth, x, y) \
{ \
u32 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \
reg_val &= ~MTK_LRO_RING_AGG_TIME_MASK; \
reg_val |= ((y) & 0xffff) << MTK_LRO_RING_AGG_TIME_OFFSET; \
mtk_w32(eth, reg_val, MTK_LRO_CTRL_DW2_CFG(x)); \
}
#define SET_PDMA_RXRING_AGE_TIME(eth, x, y) \
{ \
u32 reg_val1 = mtk_r32(eth, MTK_LRO_CTRL_DW1_CFG(x)); \
u32 reg_val2 = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \
reg_val1 &= ~MTK_LRO_RING_AGE_TIME_L_MASK; \
reg_val2 &= ~MTK_LRO_RING_AGE_TIME_H_MASK; \
reg_val1 |= ((y) & 0x3ff) << MTK_LRO_RING_AGE_TIME_L_OFFSET; \
reg_val2 |= (((y) >> 10) & 0x03f) << \
MTK_LRO_RING_AGE_TIME_H_OFFSET; \
mtk_w32(eth, reg_val1, MTK_LRO_CTRL_DW1_CFG(x)); \
mtk_w32(eth, reg_val2, MTK_LRO_CTRL_DW2_CFG(x)); \
}
#define SET_PDMA_LRO_BW_THRESHOLD(eth, x) \
{ \
u32 reg_val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW2); \
reg_val = (x); \
mtk_w32(eth, reg_val, MTK_PDMA_LRO_CTRL_DW2); \
}
#define SET_PDMA_RXRING_VALID(eth, x, y) \
{ \
u32 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(x)); \
reg_val &= ~(0x1 << MTK_RX_PORT_VALID_OFFSET); \
reg_val |= ((y) & 0x1) << MTK_RX_PORT_VALID_OFFSET; \
mtk_w32(eth, reg_val, MTK_LRO_CTRL_DW2_CFG(x)); \
}
struct mtk_lro_alt_v1_info0 {
u32 dtp : 16;
u32 stp : 16;
};
struct mtk_lro_alt_v1_info1 {
u32 sip0 : 32;
};
struct mtk_lro_alt_v1_info2 {
u32 sip1 : 32;
};
struct mtk_lro_alt_v1_info3 {
u32 sip2 : 32;
};
struct mtk_lro_alt_v1_info4 {
u32 sip3 : 32;
};
struct mtk_lro_alt_v1_info5 {
u32 vlan_vid0 : 32;
};
struct mtk_lro_alt_v1_info6 {
u32 vlan_vid1 : 16;
u32 vlan_vid_vld : 4;
u32 cnt : 12;
};
struct mtk_lro_alt_v1_info7 {
u32 dw_len : 32;
};
struct mtk_lro_alt_v1_info8 {
u32 dip_id : 2;
u32 ipv6 : 1;
u32 ipv4 : 1;
u32 resv : 27;
u32 valid : 1;
};
struct mtk_lro_alt_v1 {
struct mtk_lro_alt_v1_info0 alt_info0;
struct mtk_lro_alt_v1_info1 alt_info1;
struct mtk_lro_alt_v1_info2 alt_info2;
struct mtk_lro_alt_v1_info3 alt_info3;
struct mtk_lro_alt_v1_info4 alt_info4;
struct mtk_lro_alt_v1_info5 alt_info5;
struct mtk_lro_alt_v1_info6 alt_info6;
struct mtk_lro_alt_v1_info7 alt_info7;
struct mtk_lro_alt_v1_info8 alt_info8;
};
struct mtk_lro_alt_v2_info0 {
u32 v2_id_h:3;
u32 v1_id:12;
u32 v0_id:12;
u32 v3_valid:1;
u32 v2_valid:1;
u32 v1_valid:1;
u32 v0_valid:1;
u32 valid:1;
};
struct mtk_lro_alt_v2_info1 {
u32 sip3_h:9;
u32 v6_valid:1;
u32 v4_valid:1;
u32 v3_id:12;
u32 v2_id_l:9;
};
struct mtk_lro_alt_v2_info2 {
u32 sip2_h:9;
u32 sip3_l:23;
};
struct mtk_lro_alt_v2_info3 {
u32 sip1_h:9;
u32 sip2_l:23;
};
struct mtk_lro_alt_v2_info4 {
u32 sip0_h:9;
u32 sip1_l:23;
};
struct mtk_lro_alt_v2_info5 {
u32 dip3_h:9;
u32 sip0_l:23;
};
struct mtk_lro_alt_v2_info6 {
u32 dip2_h:9;
u32 dip3_l:23;
};
struct mtk_lro_alt_v2_info7 {
u32 dip1_h:9;
u32 dip2_l:23;
};
struct mtk_lro_alt_v2_info8 {
u32 dip0_h:9;
u32 dip1_l:23;
};
struct mtk_lro_alt_v2_info9 {
u32 sp_h:9;
u32 dip0_l:23;
};
struct mtk_lro_alt_v2_info10 {
u32 resv:9;
u32 dp:16;
u32 sp_l:7;
};
struct mtk_lro_alt_v2 {
struct mtk_lro_alt_v2_info0 alt_info0;
struct mtk_lro_alt_v2_info1 alt_info1;
struct mtk_lro_alt_v2_info2 alt_info2;
struct mtk_lro_alt_v2_info3 alt_info3;
struct mtk_lro_alt_v2_info4 alt_info4;
struct mtk_lro_alt_v2_info5 alt_info5;
struct mtk_lro_alt_v2_info6 alt_info6;
struct mtk_lro_alt_v2_info7 alt_info7;
struct mtk_lro_alt_v2_info8 alt_info8;
struct mtk_lro_alt_v2_info9 alt_info9;
struct mtk_lro_alt_v2_info10 alt_info10;
};
struct mtk_esw_reg {
unsigned int off;
unsigned int val;
};
struct mtk_mii_ioctl_data {
u16 phy_id;
u16 reg_num;
unsigned int val_in;
unsigned int val_out;
};
#if defined(CONFIG_NET_DSA_MT7530) || defined(CONFIG_MT753X_GSW)
static inline bool mt7530_exist(struct mtk_eth *eth)
{
return true;
}
#else
static inline bool mt7530_exist(struct mtk_eth *eth)
{
return false;
}
#endif
extern u32 _mtk_mdio_read(struct mtk_eth *eth, u16 phy_addr, u16 phy_reg);
extern u32 _mtk_mdio_write(struct mtk_eth *eth, u16 phy_addr,
u16 phy_register, u16 write_data);
extern u32 mtk_cl45_ind_read(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data);
extern u32 mtk_cl45_ind_write(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data);
extern atomic_t force;
int debug_proc_init(struct mtk_eth *eth);
void debug_proc_exit(void);
int mtketh_debugfs_init(struct mtk_eth *eth);
void mtketh_debugfs_exit(struct mtk_eth *eth);
int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd);
void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd);
#endif /* MTK_ETH_DBG_H */

View File

@ -0,0 +1,304 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018-2019 MediaTek Inc.
/* A library for configuring path from GMAC/GDM to target PHY
*
* Author: Sean Wang <sean.wang@mediatek.com>
*
*/
#include <linux/phy.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
struct mtk_eth_muxc {
const char *name;
int cap_bit;
int (*set_path)(struct mtk_eth *eth, int path);
};
static const char *mtk_eth_path_name(int path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
return "gmac1_rgmii";
case MTK_ETH_PATH_GMAC1_TRGMII:
return "gmac1_trgmii";
case MTK_ETH_PATH_GMAC1_SGMII:
return "gmac1_sgmii";
case MTK_ETH_PATH_GMAC2_RGMII:
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
return "gdm1_esw";
default:
return "unknown path";
}
}
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
{
bool updated = true;
u32 val, mask, set;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
mask = ~(u32)MTK_MUX_TO_ESW;
set = 0;
break;
case MTK_ETH_PATH_GDM1_ESW:
mask = ~(u32)MTK_MUX_TO_ESW;
set = MTK_MUX_TO_ESW;
break;
default:
updated = false;
break;
};
if (updated) {
val = mtk_r32(eth, MTK_MAC_MISC);
val = (val & mask) | set;
mtk_w32(eth, val, MTK_MAC_MISC);
}
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_GEPHY:
val = ~(u32)GEPHY_MAC_SEL;
break;
default:
updated = false;
break;
}
if (updated)
regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
{
unsigned int val = 0,mask=0,reg=0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_SGMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) {
reg = USB_PHY_SWITCH_REG;
val = SGMII_QPHY_SEL;
mask = QPHY_SEL_MASK;
} else {
reg = INFRA_MISC2;
val = CO_QPHY_SEL;
mask = val;
}
break;
default:
updated = false;
break;
}
if (updated)
regmap_update_bits(eth->infra, reg, mask, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
spin_lock(&eth->syscfg0_lock);
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
val = SYSCFG0_SGMII_GMAC1;
break;
case MTK_ETH_PATH_GMAC2_SGMII:
val = SYSCFG0_SGMII_GMAC2;
break;
case MTK_ETH_PATH_GMAC1_RGMII:
case MTK_ETH_PATH_GMAC2_RGMII:
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= SYSCFG0_SGMII_MASK;
if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
(path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
val = 0;
else
updated = false;
break;
default:
updated = false;
break;
};
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, val);
spin_unlock(&eth->syscfg0_lock);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
spin_lock(&eth->syscfg0_lock);
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
val |= SYSCFG0_SGMII_GMAC1_V2;
break;
case MTK_ETH_PATH_GMAC2_GEPHY:
val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
break;
case MTK_ETH_PATH_GMAC2_SGMII:
val |= SYSCFG0_SGMII_GMAC2_V2;
break;
default:
updated = false;
};
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, val);
spin_unlock(&eth->syscfg0_lock);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static const struct mtk_eth_muxc mtk_eth_muxc[] = {
{
.name = "mux_gdm1_to_gmac1_esw",
.cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
.set_path = set_mux_gdm1_to_gmac1_esw,
}, {
.name = "mux_gmac2_gmac0_to_gephy",
.cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
.set_path = set_mux_gmac2_gmac0_to_gephy,
}, {
.name = "mux_u3_gmac2_to_qphy",
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
}, {
.name = "mux_gmac12_to_gephy_sgmii",
.cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
.set_path = set_mux_gmac12_to_gephy_sgmii,
},
};
static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
{
int i, err = 0;
if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
dev_err(eth->dev, "path %s isn't support on the SoC\n",
mtk_eth_path_name(path));
return -EINVAL;
}
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
return 0;
/* Setup MUX in path fabric */
for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
err = mtk_eth_muxc[i].set_path(eth, path);
if (err)
goto out;
} else {
dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
mtk_eth_muxc[i].name);
}
}
out:
return err;
}
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
/* Setup proper MUXes along the path */
err = mtk_eth_mux_setup(eth, path);
if (err)
return err;
return 0;
}
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
if (!path)
return -EINVAL;
/* Setup proper MUXes along the path */
err = mtk_eth_mux_setup(eth, path);
if (err)
return err;
return 0;
}
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;
/* Setup proper MUXes along the path */
err = mtk_eth_mux_setup(eth, path);
if (err)
return err;
return 0;
}

View File

@ -0,0 +1,405 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2022 MediaTek Inc.
* Author: Henry Yen <henry.yen@mediatek.com>
*/
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
#include "mtk_eth_dbg.h"
#include "mtk_eth_reset.h"
char* mtk_reset_event_name[32] = {
[MTK_EVENT_FORCE] = "Force",
[MTK_EVENT_WARM_CNT] = "Warm",
[MTK_EVENT_COLD_CNT] = "Cold",
[MTK_EVENT_TOTAL_CNT] = "Total",
[MTK_EVENT_FQ_EMPTY] = "FQ Empty",
[MTK_EVENT_TSO_FAIL] = "TSO Fail",
[MTK_EVENT_TSO_ILLEGAL] = "TSO Illegal",
[MTK_EVENT_TSO_ALIGN] = "TSO Align",
[MTK_EVENT_RFIFO_OV] = "RFIFO OV",
[MTK_EVENT_RFIFO_UF] = "RFIFO UF",
};
void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
{
struct mtk_reset_event *reset_event = &eth->reset_event;
reset_event->count[id]++;
}
int mtk_eth_cold_reset(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0 | RSTCTRL_PPE1);
else
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
return 0;
}
int mtk_eth_warm_reset(struct mtk_eth *eth)
{
u32 reset_bits = 0, i = 0, done = 0;
u32 val1 = 0, val2 = 0, val3 = 0;
mdelay(100);
reset_bits |= RSTCTRL_FE;
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
reset_bits, reset_bits);
while (i < 1000) {
regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val1);
if (val1 & RSTCTRL_FE)
break;
i++;
udelay(1);
}
if (i < 1000) {
reset_bits = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0 | RSTCTRL_PPE1;
else
reset_bits |= RSTCTRL_ETH | RSTCTRL_PPE0;
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
reset_bits, reset_bits);
udelay(1);
regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val2);
if (!(val2 & reset_bits))
pr_info("[%s] error val2=0x%x reset_bits=0x%x !\n",
__func__, val2, reset_bits);
reset_bits |= RSTCTRL_FE;
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
reset_bits, ~reset_bits);
udelay(1);
regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val3);
if (val3 & reset_bits)
pr_info("[%s] error val3=0x%x reset_bits=0x%x !\n",
__func__, val3, reset_bits);
done = 1;
mtk_reset_event_update(eth, MTK_EVENT_WARM_CNT);
}
pr_info("[%s] reset record val1=0x%x, val2=0x%x, val3=0x%x !\n",
__func__, val1, val2, val3);
if (!done)
mtk_eth_cold_reset(eth);
return 0;
}
u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status)
{
u32 ret = 0, val = 0;
if ((status & MTK_FE_INT_FQ_EMPTY) ||
(status & MTK_FE_INT_RFIFO_UF) ||
(status & MTK_FE_INT_RFIFO_OV) ||
(status & MTK_FE_INT_TSO_FAIL) ||
(status & MTK_FE_INT_TSO_ALIGN) ||
(status & MTK_FE_INT_TSO_ILLEGAL)) {
while (status) {
val = ffs((unsigned int)status) - 1;
mtk_reset_event_update(eth, val);
status &= ~(1 << val);
}
ret = 1;
}
if (atomic_read(&force)) {
mtk_reset_event_update(eth, MTK_EVENT_FORCE);
ret = 1;
}
if (ret) {
mtk_reset_event_update(eth, MTK_EVENT_TOTAL_CNT);
mtk_dump_netsys_info(eth);
}
return ret;
}
irqreturn_t mtk_handle_fe_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
u32 status = 0, val = 0;
status = mtk_r32(eth, MTK_FE_INT_STATUS);
pr_info("[%s] Trigger FE Misc ISR: 0x%x\n", __func__, status);
while (status) {
val = ffs((unsigned int)status) - 1;
status &= ~(1 << val);
if ((val == MTK_EVENT_FQ_EMPTY) ||
(val == MTK_EVENT_TSO_FAIL) ||
(val == MTK_EVENT_TSO_ILLEGAL) ||
(val == MTK_EVENT_TSO_ALIGN) ||
(val == MTK_EVENT_RFIFO_OV) ||
(val == MTK_EVENT_RFIFO_UF))
pr_info("[%s] Detect reset event: %s !\n", __func__,
mtk_reset_event_name[val]);
}
mtk_w32(eth, 0xFFFFFFFF, MTK_FE_INT_STATUS);
return IRQ_HANDLED;
}
static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
{
struct mtk_eth *eth = _eth;
u32 cur = offset;
pr_info("\n============ %s ============\n", name);
while(cur < offset + range) {
pr_info("0x%x: %08x %08x %08x %08x\n",
cur, mtk_r32(eth, cur), mtk_r32(eth, cur + 0x4),
mtk_r32(eth, cur + 0x8), mtk_r32(eth, cur + 0xc));
cur += 0x10;
}
}
void mtk_dump_netsys_info(void *_eth)
{
struct mtk_eth *eth = _eth;
mtk_dump_reg(eth, "FE", 0x0, 0x500);
mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400);
mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
}
void mtk_dma_monitor(struct timer_list *t)
{
struct mtk_eth *eth = from_timer(eth, t, mtk_dma_monitor_timer);
static u32 timestamp = 0;
static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0;
static u32 prev_wdidx = 0;
u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0));
u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY;
u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) &&
((mtk_r32(eth, MTK_PSE_OQ_STA(1)) & 0x000001FF) == 0) &&
((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0);
u32 is_cdm_full =
!(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY);
u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0;
u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) !=
((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE);
u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) &&
((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0x3F0000) == 0) &&
((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
if (cur_wdidx == prev_wdidx && is_wtx_busy &&
is_oq_free && is_cdm_full) {
err_cnt1++;
if (err_cnt1 == 3) {
pr_info("WDMA CDM Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt1 = %d", err_cnt1);
pr_info("prev_wdidx = 0x%x | cur_wdidx = 0x%x\n",
prev_wdidx, cur_wdidx);
pr_info("is_wtx_busy = %d | is_oq_free = %d | is_cdm_full = %d\n",
is_wtx_busy, is_oq_free, is_cdm_full);
pr_info("-- -- -- -- -- -- --\n");
pr_info("WDMA_CTX_PTR = 0x%x\n", mtk_r32(eth, 0x4808));
pr_info("WDMA_DTX_PTR = 0x%x\n",
mtk_r32(eth, MTK_WDMA_DTX_PTR(0)));
pr_info("WDMA_GLO_CFG = 0x%x\n",
mtk_r32(eth, MTK_WDMA_GLO_CFG(0)));
pr_info("WDMA_TX_DBG_MON0 = 0x%x\n",
mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)));
pr_info("PSE_OQ_STA1 = 0x%x\n",
mtk_r32(eth, MTK_PSE_OQ_STA(0)));
pr_info("PSE_OQ_STA2 = 0x%x\n",
mtk_r32(eth, MTK_PSE_OQ_STA(1)));
pr_info("PSE_OQ_STA5 = 0x%x\n",
mtk_r32(eth, MTK_PSE_OQ_STA(4)));
pr_info("==============================\n");
if ((atomic_read(&reset_lock) == 0) &&
(atomic_read(&force) == 0)){
atomic_inc(&force);
schedule_work(&eth->pending_work);
}
}
} else if (is_qfsm_hang && is_qfwd_hang) {
err_cnt2++;
if (err_cnt2 == 3) {
pr_info("QDMA Tx Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt2 = %d", err_cnt2);
pr_info("is_qfsm_hang = %d\n", is_qfsm_hang);
pr_info("is_qfwd_hang = %d\n", is_qfwd_hang);
pr_info("is_qfq_hang = %d\n", is_qfq_hang);
pr_info("-- -- -- -- -- -- --\n");
pr_info("MTK_QDMA_FSM = 0x%x\n",
mtk_r32(eth, MTK_QDMA_FSM));
pr_info("MTK_QDMA_FWD_CNT = 0x%x\n",
mtk_r32(eth, MTK_QDMA_FWD_CNT));
pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
mtk_r32(eth, MTK_QDMA_FQ_CNT));
pr_info("==============================\n");
if ((atomic_read(&reset_lock) == 0) &&
(atomic_read(&force) == 0)){
atomic_inc(&force);
schedule_work(&eth->pending_work);
}
}
} else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) {
err_cnt3++;
if (err_cnt3 == 3) {
pr_info("ADMA Rx Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt3 = %d", err_cnt3);
pr_info("is_oq0_stuck = %d\n", is_oq0_stuck);
pr_info("is_cdm1_busy = %d\n", is_cdm1_busy);
pr_info("is_adma_busy = %d\n", is_adma_busy);
pr_info("-- -- -- -- -- -- --\n");
pr_info("MTK_PSE_OQ_STA1 = 0x%x\n",
mtk_r32(eth, MTK_PSE_OQ_STA(0)));
pr_info("MTK_ADMA_RX_DBG0 = 0x%x\n",
mtk_r32(eth, MTK_ADMA_RX_DBG0));
pr_info("MTK_ADMA_RX_DBG1 = 0x%x\n",
mtk_r32(eth, MTK_ADMA_RX_DBG1));
pr_info("==============================\n");
if ((atomic_read(&reset_lock) == 0) &&
(atomic_read(&force) == 0)){
atomic_inc(&force);
schedule_work(&eth->pending_work);
}
}
} else {
err_cnt1 = 0;
err_cnt2 = 0;
err_cnt3 = 0;
}
prev_wdidx = cur_wdidx;
mod_timer(&eth->mtk_dma_monitor_timer, jiffies + 1 * HZ);
}
void mtk_prepare_reset_fe(struct mtk_eth *eth)
{
u32 i = 0, val = 0;
/* Disable NETSYS Interrupt */
mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
mtk_w32(eth, 0, MTK_PDMA_INT_MASK);
mtk_w32(eth, 0, MTK_QDMA_INT_MASK);
/* Disable Linux netif Tx path */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
netif_tx_disable(eth->netdev[i]);
}
/* Disable QDMA Tx */
val = mtk_r32(eth, MTK_QDMA_GLO_CFG);
mtk_w32(eth, val & ~(MTK_TX_DMA_EN), MTK_QDMA_GLO_CFG);
/* Power down sgmii */
regmap_read(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val |= SGMII_PHYA_PWD;
regmap_write(eth->sgmii->regmap[0], SGMSYS_QPHY_PWR_STATE_CTRL, val);
regmap_read(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val |= SGMII_PHYA_PWD;
regmap_write(eth->sgmii->regmap[1], SGMSYS_QPHY_PWR_STATE_CTRL, val);
/* Force link down GMAC */
val = mtk_r32(eth, MTK_MAC_MCR(0));
mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(0));
val = mtk_r32(eth, MTK_MAC_MCR(1));
mtk_w32(eth, val & ~(MAC_MCR_FORCE_LINK), MTK_MAC_MCR(1));
/* Disable GMAC Rx */
val = mtk_r32(eth, MTK_MAC_MCR(0));
mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(0));
val = mtk_r32(eth, MTK_MAC_MCR(1));
mtk_w32(eth, val & ~(MAC_MCR_RX_EN), MTK_MAC_MCR(1));
/* Enable GDM drop */
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
/* Disable ADMA Rx */
val = mtk_r32(eth, MTK_PDMA_GLO_CFG);
mtk_w32(eth, val & ~(MTK_RX_DMA_EN), MTK_PDMA_GLO_CFG);
}
void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id)
{
u32 i = 0, poll_time = 5000, val;
/* Disable KA */
mtk_m32(eth, MTK_PPE_KA_CFG_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
mtk_m32(eth, MTK_PPE_NTU_KA_MASK, 0, MTK_PPE_BIND_LMT_1(ppe_id));
mtk_w32(eth, 0, MTK_PPE_KA(ppe_id));
mdelay(10);
/* Set KA timer to maximum */
mtk_m32(eth, MTK_PPE_NTU_KA_MASK, (0xFF << 16), MTK_PPE_BIND_LMT_1(ppe_id));
mtk_w32(eth, 0xFFFFFFFF, MTK_PPE_KA(ppe_id));
/* Set KA tick select */
mtk_m32(eth, MTK_PPE_TICK_SEL_MASK, (0x1 << 24), MTK_PPE_TB_CFG(ppe_id));
mtk_m32(eth, MTK_PPE_KA_CFG_MASK, (0x3 << 12), MTK_PPE_TB_CFG(ppe_id));
mdelay(10);
/* Disable scan mode */
mtk_m32(eth, MTK_PPE_SCAN_MODE_MASK, 0, MTK_PPE_TB_CFG(ppe_id));
mdelay(10);
/* Check PPE idle */
while (i++ < poll_time) {
val = mtk_r32(eth, MTK_PPE_GLO_CFG(ppe_id));
if (!(val & MTK_PPE_BUSY))
break;
mdelay(1);
}
if (i >= poll_time) {
pr_info("[%s] PPE keeps busy !\n", __func__);
mtk_dump_reg(eth, "FE", 0x0, 0x500);
mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
}
}
static int mtk_eth_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
switch (event) {
case MTK_WIFI_RESET_DONE:
complete(&wait_ser_done);
break;
default:
break;
}
return NOTIFY_DONE;
}
struct notifier_block mtk_eth_netdevice_nb __read_mostly = {
.notifier_call = mtk_eth_netdevice_event,
};

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2022 MediaTek Inc.
* Author: Henry Yen <henry.yen@mediatek.com>
*/
#ifndef MTK_ETH_RESET_H
#define MTK_ETH_RESET_H
/* Frame Engine Reset FSM */
#define MTK_FE_START_RESET 0x2000
#define MTK_FE_RESET_DONE 0x2001
#define MTK_WIFI_RESET_DONE 0x2002
#define MTK_NAT_DISABLE 0x3000
#define MTK_FE_RESET_NAT_DONE 0x4001
/* ADMA Rx Debug Monitor */
#define MTK_ADMA_RX_DBG0 (PDMA_BASE + 0x238)
#define MTK_ADMA_RX_DBG1 (PDMA_BASE + 0x23C)
/* PPE Configurations */
#define MTK_PPE_GLO_CFG(x) (PPE_BASE(x) + 0x00)
#define MTK_PPE_TB_CFG(x) (PPE_BASE(x) + 0x1C)
#define MTK_PPE_BIND_LMT_1(x) (PPE_BASE(x) + 0x30)
#define MTK_PPE_KA(x) (PPE_BASE(x) + 0x34)
#define MTK_PPE_KA_CFG_MASK (0x3 << 12)
#define MTK_PPE_NTU_KA_MASK (0xFF << 16)
#define MTK_PPE_KA_T_MASK (0xFFFF << 0)
#define MTK_PPE_TCP_KA_MASK (0xFF << 16)
#define MTK_PPE_UDP_KA_MASK (0xFF << 24)
#define MTK_PPE_TICK_SEL_MASK (0x1 << 24)
#define MTK_PPE_SCAN_MODE_MASK (0x3 << 16)
#define MTK_PPE_BUSY BIT(31)
enum mtk_reset_type {
MTK_TYPE_COLD_RESET = 0,
MTK_TYPE_WARM_RESET,
};
enum mtk_reset_event_id {
MTK_EVENT_FORCE = 0,
MTK_EVENT_WARM_CNT = 1,
MTK_EVENT_COLD_CNT = 2,
MTK_EVENT_TOTAL_CNT = 3,
MTK_EVENT_FQ_EMPTY = 8,
MTK_EVENT_TSO_FAIL = 12,
MTK_EVENT_TSO_ILLEGAL = 13,
MTK_EVENT_TSO_ALIGN = 14,
MTK_EVENT_RFIFO_OV = 18,
MTK_EVENT_RFIFO_UF = 19,
};
extern struct notifier_block mtk_eth_netdevice_nb __read_mostly;
extern struct completion wait_ser_done;
extern char* mtk_reset_event_name[32];
extern atomic_t reset_lock;
irqreturn_t mtk_handle_fe_irq(int irq, void *_eth);
u32 mtk_check_reset_event(struct mtk_eth *eth, u32 status);
int mtk_eth_cold_reset(struct mtk_eth *eth);
int mtk_eth_warm_reset(struct mtk_eth *eth);
void mtk_reset_event_update(struct mtk_eth *eth, u32 id);
void mtk_dump_netsys_info(void *_eth);
void mtk_dma_monitor(struct timer_list *t);
void mtk_prepare_reset_fe(struct mtk_eth *eth);
void mtk_prepare_reset_ppe(struct mtk_eth *eth, u32 ppe_id);
#endif /* MTK_ETH_RESET_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
ccflags-y=-Werror
obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtkhnat.o
mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o hnat_mcast.o
mtkhnat-$(CONFIG_NET_DSA_MT7530) += hnat_stag.o

View File

@ -0,0 +1,912 @@
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
* Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
*/
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/if.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include "nf_hnat_mtk.h"
#include "hnat.h"
struct mtk_hnat *hnat_priv;
static struct socket *_hnat_roam_sock;
static struct work_struct _hnat_roam_work;
int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
EXPORT_SYMBOL(ra_sw_nat_hook_rx);
int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
EXPORT_SYMBOL(ra_sw_nat_hook_tx);
int (*ppe_del_entry_by_mac)(unsigned char *mac) = NULL;
EXPORT_SYMBOL(ppe_del_entry_by_mac);
void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
EXPORT_SYMBOL(ppe_dev_register_hook);
void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
EXPORT_SYMBOL(ppe_dev_unregister_hook);
static void hnat_sma_build_entry(struct timer_list *t)
{
int i;
for (i = 0; i < CFG_PPE_NUM; i++)
cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
SMA, SMA_FWD_CPU_BUILD_ENTRY);
}
void hnat_cache_ebl(int enable)
{
int i;
for (i = 0; i < CFG_PPE_NUM; i++) {
cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 1);
cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 0);
cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
}
}
static void hnat_reset_timestamp(struct timer_list *t)
{
struct foe_entry *entry;
int hash_index;
hnat_cache_ebl(0);
cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 0);
cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 0);
writel(0, hnat_priv->fe_base + 0x0010);
for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
entry = hnat_priv->foe_table_cpu[0] + hash_index;
if (entry->bfib1.state == BIND)
entry->bfib1.time_stamp =
readl(hnat_priv->fe_base + 0x0010) & (0xFFFF);
}
cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 1);
cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 1);
hnat_cache_ebl(1);
mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
}
static void cr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
val |= bs;
writel(val, reg);
}
static void cr_clr_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
val &= ~bs;
writel(val, reg);
}
void cr_set_field(void __iomem *reg, u32 field, u32 val)
{
unsigned int tv = readl(reg);
tv &= ~field;
tv |= ((val) << (ffs((unsigned int)field) - 1));
writel(tv, reg);
}
/*boundary entry can't be used to accelerate data flow*/
static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
{
int entry_base = 0;
int bad_entry, i, j;
struct foe_entry *foe_entry;
/*these entries are boundary every 128 entries*/
int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115};
if (!foe_table_cpu)
return;
for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) {
/* set boundary entries as static*/
for (j = 0; j < 8; j++) {
bad_entry = entry_base + boundary_entry_offset[j];
foe_entry = &foe_table_cpu[bad_entry];
foe_entry->udib1.sta = 1;
}
entry_base = (i + 1) * 128;
}
}
void set_gmac_ppe_fwd(int id, int enable)
{
void __iomem *reg;
u32 val;
reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG);
if (enable) {
cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
return;
}
/*disabled */
val = readl(reg);
if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
cr_set_field(reg, GDM_ALL_FRC_MASK,
BITS_GDM_ALL_FRC_P_CPU_PDMA);
}
static int entry_mac_cmp(struct foe_entry *entry, u8 *mac)
{
int ret = 0;
if(IS_IPV4_GRP(entry)) {
if(((swab32(entry->ipv4_hnapt.dmac_hi) == *(u32 *)mac) &&
(swab16(entry->ipv4_hnapt.dmac_lo) == *(u16 *)&mac[4])) ||
((swab32(entry->ipv4_hnapt.smac_hi) == *(u32 *)mac) &&
(swab16(entry->ipv4_hnapt.smac_lo) == *(u16 *)&mac[4])))
ret = 1;
} else {
if(((swab32(entry->ipv6_5t_route.dmac_hi) == *(u32 *)mac) &&
(swab16(entry->ipv6_5t_route.dmac_lo) == *(u16 *)&mac[4])) ||
((swab32(entry->ipv6_5t_route.smac_hi) == *(u32 *)mac) &&
(swab16(entry->ipv6_5t_route.smac_lo) == *(u16 *)&mac[4])))
ret = 1;
}
if (ret && debug_level >= 2)
pr_info("mac=%pM\n", mac);
return ret;
}
int entry_delete_by_mac(u8 *mac)
{
struct foe_entry *entry = NULL;
int index, i, ret = 0;
for (i = 0; i < CFG_PPE_NUM; i++) {
entry = hnat_priv->foe_table_cpu[i];
for (index = 0; index < DEF_ETRY_NUM; entry++, index++) {
if(entry->bfib1.state == BIND && entry_mac_cmp(entry, mac)) {
memset(entry, 0, sizeof(*entry));
hnat_cache_ebl(1);
if (debug_level >= 2)
pr_info("delete entry idx = %d\n", index);
ret++;
}
}
}
if(!ret && debug_level >= 2)
pr_info("entry not found\n");
return ret;
}
static void hnat_roam_handler(struct work_struct *work)
{
struct kvec iov;
struct msghdr msg;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
struct nlattr *nla;
u8 rcv_buf[512];
int len;
if (!_hnat_roam_sock)
return;
iov.iov_base = rcv_buf;
iov.iov_len = sizeof(rcv_buf);
memset(&msg, 0, sizeof(msg));
msg.msg_namelen = sizeof(struct sockaddr_nl);
len = kernel_recvmsg(_hnat_roam_sock, &msg, &iov, 1, iov.iov_len, 0);
if (len <= 0)
goto out;
nlh = (struct nlmsghdr*)rcv_buf;
if (!NLMSG_OK(nlh, len) || nlh->nlmsg_type != RTM_NEWNEIGH)
goto out;
len = nlh->nlmsg_len - NLMSG_HDRLEN;
ndm = (struct ndmsg *)NLMSG_DATA(nlh);
if (ndm->ndm_family != PF_BRIDGE)
goto out;
nla = (struct nlattr *)((u8 *)ndm + sizeof(struct ndmsg));
len -= NLMSG_LENGTH(sizeof(struct ndmsg));
while (nla_ok(nla, len)) {
if (nla_type(nla) == NDA_LLADDR) {
entry_delete_by_mac(nla_data(nla));
}
nla = nla_next(nla, &len);
}
out:
schedule_work(&_hnat_roam_work);
}
static int hnat_roaming_enable(void)
{
struct socket *sock = NULL;
struct sockaddr_nl addr;
int ret;
INIT_WORK(&_hnat_roam_work, hnat_roam_handler);
ret = sock_create_kern(&init_net, AF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
if (ret < 0)
goto out;
_hnat_roam_sock = sock;
addr.nl_family = AF_NETLINK;
addr.nl_pad = 0;
addr.nl_pid = 65534;
addr.nl_groups = 1 << (RTNLGRP_NEIGH - 1);
ret = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
if (ret < 0)
goto out;
schedule_work(&_hnat_roam_work);
pr_info("hnat roaming work enable\n");
return 0;
out:
if (sock)
sock_release(sock);
return ret;
}
static void hnat_roaming_disable(void)
{
if (_hnat_roam_sock)
sock_release(_hnat_roam_sock);
_hnat_roam_sock = NULL;
pr_info("hnat roaming work disable\n");
}
static int hnat_hw_init(u32 ppe_id)
{
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
/* setup hashing */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ETRY_NUM, hnat_priv->etry_num_cfg);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
writel(HASH_SEED_KEY, hnat_priv->ppe_base[ppe_id] + PPE_HASH_SEED);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, XMODE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
/* set ip proto */
writel(0xFFFFFFFF, hnat_priv->ppe_base[ppe_id] + PPE_IP_PROT_CHK);
/* setup caching */
hnat_cache_ebl(1);
/* enable FOE */
cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK |
BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
if (hnat_priv->data->version == MTK_HNAT_V4)
cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
/* setup FOE aging */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_MNP, 1000);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_DLTA, 3);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, UDP_DLTA, 12);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, NTU_DLTA, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, FIN_DLTA, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, TCP_DLTA, 7);
/* setup FOE ka */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 0);
mdelay(10);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 2);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 3);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TICK_SEL, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 1);
/* setup FOE rate limit */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, QURT_LMT, 16383);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, HALF_LMT, 16383);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, FULL_LMT, 16383);
/* setup binding threshold as 30 packets per second */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BNDR, BIND_RATE, 0x1E);
/* setup FOE cf gen */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 1);
writel(0, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); /* pdma */
/* writel(0x55555555, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); */ /* qdma */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
if (hnat_priv->data->version == MTK_HNAT_V4) {
writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1);
writel(0x7f, hnat_priv->ppe_base[ppe_id] + PPE_SBW_CTRL);
}
/*enable ppe mib counter*/
if (hnat_priv->data->per_flow_accounting) {
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_EN, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_READ_CLEAR, 1);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1);
}
hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
dev_info(hnat_priv->dev, "PPE%d hwnat start\n", ppe_id);
return 0;
}
static int hnat_start(u32 ppe_id)
{
u32 foe_table_sz;
u32 foe_mib_tb_sz;
u32 etry_num_cfg;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
/* mapp the FOE table */
for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ;
etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
hnat_priv->foe_table_cpu[ppe_id] = dma_alloc_coherent(
hnat_priv->dev, foe_table_sz,
&hnat_priv->foe_table_dev[ppe_id], GFP_KERNEL);
if (hnat_priv->foe_table_cpu[ppe_id])
break;
}
if (!hnat_priv->foe_table_cpu[ppe_id])
return -1;
dev_info(hnat_priv->dev, "PPE%d entry number = %d\n",
ppe_id, hnat_priv->foe_etry_num);
writel(hnat_priv->foe_table_dev[ppe_id], hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
if (hnat_priv->data->version == MTK_HNAT_V1)
exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
if (hnat_priv->data->per_flow_accounting) {
foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
hnat_priv->foe_mib_cpu[ppe_id] =
dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
&hnat_priv->foe_mib_dev[ppe_id], GFP_KERNEL);
if (!hnat_priv->foe_mib_cpu[ppe_id])
return -1;
writel(hnat_priv->foe_mib_dev[ppe_id],
hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
memset(hnat_priv->foe_mib_cpu[ppe_id], 0, foe_mib_tb_sz);
hnat_priv->acct[ppe_id] =
kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
GFP_KERNEL);
if (!hnat_priv->acct[ppe_id])
return -1;
}
hnat_priv->etry_num_cfg = etry_num_cfg;
hnat_hw_init(ppe_id);
return 0;
}
static int ppe_busy_wait(u32 ppe_id)
{
unsigned long t_start = jiffies;
u32 r = 0;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
while (1) {
r = readl((hnat_priv->ppe_base[ppe_id] + 0x0));
if (!(r & BIT(31)))
return 0;
if (time_after(jiffies, t_start + HZ))
break;
mdelay(10);
}
dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
return -1;
}
static void hnat_stop(u32 ppe_id)
{
u32 foe_table_sz;
u32 foe_mib_tb_sz;
struct foe_entry *entry, *end;
if (ppe_id >= CFG_PPE_NUM)
return;
/* send all traffic back to the DMA engine */
set_gmac_ppe_fwd(0, 0);
set_gmac_ppe_fwd(1, 0);
dev_info(hnat_priv->dev, "hwnat stop\n");
if (hnat_priv->foe_table_cpu[ppe_id]) {
entry = hnat_priv->foe_table_cpu[ppe_id];
end = hnat_priv->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num;
while (entry < end) {
entry->bfib1.state = INVALID;
entry++;
}
}
/* disable caching */
hnat_cache_ebl(0);
/* flush cache has to be ahead of hnat disable --*/
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 0);
/* disable scan mode and keep-alive */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0);
ppe_busy_wait(ppe_id);
/* disable FOE */
cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN |
BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN |
BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
if (hnat_priv->data->version == MTK_HNAT_V4)
cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
/* disable FOE aging */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 0);
/* free the FOE table */
foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
if (hnat_priv->foe_table_cpu[ppe_id])
dma_free_coherent(hnat_priv->dev, foe_table_sz,
hnat_priv->foe_table_cpu[ppe_id],
hnat_priv->foe_table_dev[ppe_id]);
hnat_priv->foe_table_cpu[ppe_id] = NULL;
writel(0, hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
if (hnat_priv->data->per_flow_accounting) {
foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
if (hnat_priv->foe_mib_cpu[ppe_id])
dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz,
hnat_priv->foe_mib_cpu[ppe_id],
hnat_priv->foe_mib_dev[ppe_id]);
writel(0, hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
kfree(hnat_priv->acct[ppe_id]);
}
}
static void hnat_release_netdev(void)
{
int i;
struct extdev_entry *ext_entry;
for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
ext_entry = hnat_priv->ext_if[i];
if (ext_entry->dev)
dev_put(ext_entry->dev);
ext_if_del(ext_entry);
kfree(ext_entry);
}
if (hnat_priv->g_ppdev)
dev_put(hnat_priv->g_ppdev);
if (hnat_priv->g_wandev)
dev_put(hnat_priv->g_wandev);
}
static struct notifier_block nf_hnat_netdevice_nb __read_mostly = {
.notifier_call = nf_hnat_netdevice_event,
};
static struct notifier_block nf_hnat_netevent_nb __read_mostly = {
.notifier_call = nf_hnat_netevent_handler,
};
int hnat_enable_hook(void)
{
/* register hook functions used by WHNAT module.
*/
if (hnat_priv->data->whnat) {
ra_sw_nat_hook_rx =
(hnat_priv->data->version == MTK_HNAT_V4) ?
mtk_sw_nat_hook_rx : NULL;
ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
ppe_dev_register_hook = mtk_ppe_dev_register_hook;
ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook;
}
if (hnat_register_nf_hooks())
return -1;
ppe_del_entry_by_mac = entry_delete_by_mac;
hook_toggle = 1;
return 0;
}
int hnat_disable_hook(void)
{
int i, hash_index;
struct foe_entry *entry;
ra_sw_nat_hook_tx = NULL;
ra_sw_nat_hook_rx = NULL;
hnat_unregister_nf_hooks();
for (i = 0; i < CFG_PPE_NUM; i++) {
cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
SMA, SMA_ONLY_FWD_CPU);
for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
entry = hnat_priv->foe_table_cpu[i] + hash_index;
if (entry->bfib1.state == BIND) {
entry->ipv4_hnapt.udib1.state = INVALID;
entry->ipv4_hnapt.udib1.time_stamp =
readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
}
}
}
/* clear HWNAT cache */
hnat_cache_ebl(1);
mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
ppe_del_entry_by_mac = NULL;
hook_toggle = 0;
return 0;
}
int hnat_warm_init(void)
{
u32 foe_table_sz, foe_mib_tb_sz, ppe_id = 0;
unregister_netevent_notifier(&nf_hnat_netevent_nb);
for (ppe_id = 0; ppe_id < CFG_PPE_NUM; ppe_id++) {
foe_table_sz =
hnat_priv->foe_etry_num * sizeof(struct foe_entry);
writel(hnat_priv->foe_table_dev[ppe_id],
hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
if (hnat_priv->data->version == MTK_HNAT_V1)
exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
if (hnat_priv->data->per_flow_accounting) {
foe_mib_tb_sz =
hnat_priv->foe_etry_num * sizeof(struct mib_entry);
writel(hnat_priv->foe_mib_dev[ppe_id],
hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
memset(hnat_priv->foe_mib_cpu[ppe_id], 0,
foe_mib_tb_sz);
}
hnat_hw_init(ppe_id);
}
set_gmac_ppe_fwd(0, 1);
set_gmac_ppe_fwd(1, 1);
register_netevent_notifier(&nf_hnat_netevent_nb);
return 0;
}
static struct packet_type mtk_pack_type __read_mostly = {
.type = HQOS_MAGIC_TAG,
.func = mtk_hqos_ptype_cb,
};
static int hnat_probe(struct platform_device *pdev)
{
int i;
int err = 0;
int index = 0;
struct resource *res;
const char *name;
struct device_node *np;
unsigned int val;
struct property *prop;
struct extdev_entry *ext_entry;
const struct of_device_id *match;
hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL);
if (!hnat_priv)
return -ENOMEM;
hnat_priv->foe_etry_num = DEF_ETRY_NUM;
match = of_match_device(of_hnat_match, &pdev->dev);
if (unlikely(!match))
return -EINVAL;
hnat_priv->data = (struct mtk_hnat_data *)match->data;
hnat_priv->dev = &pdev->dev;
np = hnat_priv->dev->of_node;
err = of_property_read_string(np, "mtketh-wan", &name);
if (err < 0)
return -EINVAL;
strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ - 1);
dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan);
err = of_property_read_string(np, "mtketh-lan", &name);
if (err < 0)
strncpy(hnat_priv->lan, "eth0", IFNAMSIZ);
else
strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ - 1);
dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
err = of_property_read_string(np, "mtketh-ppd", &name);
if (err < 0)
strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
else
strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ - 1);
dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd);
/*get total gmac num in hnat*/
err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val);
if (err < 0)
return -EINVAL;
hnat_priv->gmac_num = val;
dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num);
err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val);
if (err < 0) {
hnat_priv->wan_dsa_port = NONE_DSA_PORT;
} else {
hnat_priv->wan_dsa_port = val;
dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port);
}
err = of_property_read_u32_index(np, "mtketh-ppe-num", 0, &val);
if (err < 0)
hnat_priv->ppe_num = 1;
else
hnat_priv->ppe_num = val;
dev_info(&pdev->dev, "ppe num = %d\n", hnat_priv->ppe_num);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
res->end - res->start + 1);
if (!hnat_priv->fe_base)
return -EADDRNOTAVAIL;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0x2200;
if (CFG_PPE_NUM > 1)
hnat_priv->ppe_base[1] = hnat_priv->fe_base + 0x2600;
#else
hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0xe00;
#endif
err = hnat_init_debugfs(hnat_priv);
if (err)
return err;
prop = of_find_property(np, "ext-devices", NULL);
for (name = of_prop_next_string(prop, NULL); name;
name = of_prop_next_string(prop, name), index++) {
ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
if (!ext_entry) {
err = -ENOMEM;
goto err_out1;
}
strncpy(ext_entry->name, (char *)name, IFNAMSIZ - 1);
ext_if_add(ext_entry);
}
for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
ext_entry = hnat_priv->ext_if[i];
dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name);
}
hnat_priv->lvid = 1;
hnat_priv->wvid = 2;
for (i = 0; i < CFG_PPE_NUM; i++) {
err = hnat_start(i);
if (err)
goto err_out;
}
if (hnat_priv->data->whnat) {
err = whnat_adjust_nf_hooks();
if (err)
goto err_out;
}
err = hnat_enable_hook();
if (err)
goto err_out;
register_netdevice_notifier(&nf_hnat_netdevice_nb);
register_netevent_notifier(&nf_hnat_netevent_nb);
if (hnat_priv->data->mcast) {
for (i = 0; i < CFG_PPE_NUM; i++)
hnat_mcast_enable(i);
}
timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0);
if (hnat_priv->data->version == MTK_HNAT_V3) {
timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0);
hnat_priv->hnat_reset_timestamp_timer.expires = jiffies;
add_timer(&hnat_priv->hnat_reset_timestamp_timer);
}
if (IS_HQOS_MODE && IS_GMAC1_MODE)
dev_add_pack(&mtk_pack_type);
err = hnat_roaming_enable();
if (err)
pr_info("hnat roaming work fail\n");
return 0;
err_out:
for (i = 0; i < CFG_PPE_NUM; i++)
hnat_stop(i);
err_out1:
hnat_deinit_debugfs(hnat_priv);
for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
ext_entry = hnat_priv->ext_if[i];
ext_if_del(ext_entry);
kfree(ext_entry);
}
return err;
}
static int hnat_remove(struct platform_device *pdev)
{
int i;
hnat_roaming_disable();
unregister_netdevice_notifier(&nf_hnat_netdevice_nb);
unregister_netevent_notifier(&nf_hnat_netevent_nb);
hnat_disable_hook();
if (hnat_priv->data->mcast)
hnat_mcast_disable();
for (i = 0; i < CFG_PPE_NUM; i++)
hnat_stop(i);
hnat_deinit_debugfs(hnat_priv);
hnat_release_netdev();
del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer);
if (hnat_priv->data->version == MTK_HNAT_V3)
del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer);
if (IS_HQOS_MODE && IS_GMAC1_MODE)
dev_remove_pack(&mtk_pack_type);
return 0;
}
static const struct mtk_hnat_data hnat_data_v1 = {
.num_of_sch = 2,
.whnat = false,
.per_flow_accounting = false,
.mcast = false,
.version = MTK_HNAT_V1,
};
static const struct mtk_hnat_data hnat_data_v2 = {
.num_of_sch = 2,
.whnat = true,
.per_flow_accounting = true,
.mcast = false,
.version = MTK_HNAT_V2,
};
static const struct mtk_hnat_data hnat_data_v3 = {
.num_of_sch = 4,
.whnat = false,
.per_flow_accounting = false,
.mcast = false,
.version = MTK_HNAT_V3,
};
static const struct mtk_hnat_data hnat_data_v4 = {
.num_of_sch = 4,
.whnat = true,
.per_flow_accounting = true,
.mcast = false,
.version = MTK_HNAT_V4,
};
const struct of_device_id of_hnat_match[] = {
{ .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
{ .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
{ .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
{ .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
{ .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
{},
};
MODULE_DEVICE_TABLE(of, of_hnat_match);
static struct platform_driver hnat_driver = {
.probe = hnat_probe,
.remove = hnat_remove,
.driver = {
.name = "mediatek_soc_hnat",
.of_match_table = of_hnat_match,
},
};
module_platform_driver(hnat_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Mediatek Hardware NAT");

View File

@ -0,0 +1,975 @@
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
* Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
*/
#include <linux/debugfs.h>
#include <linux/string.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <net/netevent.h>
#include <linux/mod_devicetable.h>
#include "hnat_mcast.h"
/*--------------------------------------------------------------------------*/
/* Register Offset*/
/*--------------------------------------------------------------------------*/
#define PPE_GLO_CFG 0x00
#define PPE_FLOW_CFG 0x04
#define PPE_IP_PROT_CHK 0x08
#define PPE_IP_PROT_0 0x0C
#define PPE_IP_PROT_1 0x10
#define PPE_IP_PROT_2 0x14
#define PPE_IP_PROT_3 0x18
#define PPE_TB_CFG 0x1C
#define PPE_TB_BASE 0x20
#define PPE_TB_USED 0x24
#define PPE_BNDR 0x28
#define PPE_BIND_LMT_0 0x2C
#define PPE_BIND_LMT_1 0x30
#define PPE_KA 0x34
#define PPE_UNB_AGE 0x38
#define PPE_BND_AGE_0 0x3C
#define PPE_BND_AGE_1 0x40
#define PPE_HASH_SEED 0x44
#define PPE_DFT_CPORT 0x48
#define PPE_DFT_CPORT1 0x4C
#define PPE_MCAST_PPSE 0x84
#define PPE_MCAST_L_0 0x88
#define PPE_MCAST_H_0 0x8C
#define PPE_MCAST_L_1 0x90
#define PPE_MCAST_H_1 0x94
#define PPE_MCAST_L_2 0x98
#define PPE_MCAST_H_2 0x9C
#define PPE_MCAST_L_3 0xA0
#define PPE_MCAST_H_3 0xA4
#define PPE_MCAST_L_4 0xA8
#define PPE_MCAST_H_4 0xAC
#define PPE_MCAST_L_5 0xB0
#define PPE_MCAST_H_5 0xB4
#define PPE_MCAST_L_6 0xBC
#define PPE_MCAST_H_6 0xC0
#define PPE_MCAST_L_7 0xC4
#define PPE_MCAST_H_7 0xC8
#define PPE_MCAST_L_8 0xCC
#define PPE_MCAST_H_8 0xD0
#define PPE_MCAST_L_9 0xD4
#define PPE_MCAST_H_9 0xD8
#define PPE_MCAST_L_A 0xDC
#define PPE_MCAST_H_A 0xE0
#define PPE_MCAST_L_B 0xE4
#define PPE_MCAST_H_B 0xE8
#define PPE_MCAST_L_C 0xEC
#define PPE_MCAST_H_C 0xF0
#define PPE_MCAST_L_D 0xF4
#define PPE_MCAST_H_D 0xF8
#define PPE_MCAST_L_E 0xFC
#define PPE_MCAST_H_E 0xE0
#define PPE_MCAST_L_F 0x100
#define PPE_MCAST_H_F 0x104
#define PPE_MCAST_L_10 0xC00
#define PPE_MCAST_H_10 0xC04
#define PPE_MTU_DRP 0x108
#define PPE_MTU_VLYR_0 0x10C
#define PPE_MTU_VLYR_1 0x110
#define PPE_MTU_VLYR_2 0x114
#define PPE_VPM_TPID 0x118
#define PPE_CAH_CTRL 0x120
#define PPE_CAH_TAG_SRH 0x124
#define PPE_CAH_LINE_RW 0x128
#define PPE_CAH_WDATA 0x12C
#define PPE_CAH_RDATA 0x130
#define PPE_MIB_CFG 0X134
#define PPE_MIB_TB_BASE 0X138
#define PPE_MIB_SER_CR 0X13C
#define PPE_MIB_SER_R0 0X140
#define PPE_MIB_SER_R1 0X144
#define PPE_MIB_SER_R2 0X148
#define PPE_MIB_CAH_CTRL 0X150
#define PPE_MIB_CAH_TAG_SRH 0X154
#define PPE_MIB_CAH_LINE_RW 0X158
#define PPE_MIB_CAH_WDATA 0X15C
#define PPE_MIB_CAH_RDATA 0X160
#define PPE_SBW_CTRL 0x174
#define GDMA1_FWD_CFG 0x500
#define GDMA2_FWD_CFG 0x1500
/* QDMA Tx queue configuration */
#define QTX_CFG(x) (QDMA_BASE + ((x) * 0x10))
#define QTX_CFG_HW_RESV_CNT_OFFSET (8)
#define QTX_CFG_SW_RESV_CNT_OFFSET (0)
#define QTX_SCH(x) (QDMA_BASE + 0x4 + ((x) * 0x10))
#define QTX_SCH_MIN_RATE_EN BIT(27)
#define QTX_SCH_MAX_RATE_EN BIT(11)
#define QTX_SCH_MIN_RATE_MAN_OFFSET (20)
#define QTX_SCH_MIN_RATE_EXP_OFFSET (16)
#define QTX_SCH_MAX_RATE_WGHT_OFFSET (12)
#define QTX_SCH_MAX_RATE_MAN_OFFSET (4)
#define QTX_SCH_MAX_RATE_EXP_OFFSET (0)
/* QDMA Tx scheduler configuration */
#define QDMA_PAGE (QDMA_BASE + 0x1f0)
#define QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
#define QTX_MIB_IF (QDMA_BASE + 0x2bc)
#define QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
#define QDMA_TX_SCH_WFQ_EN BIT(15)
/*--------------------------------------------------------------------------*/
/* Register Mask*/
/*--------------------------------------------------------------------------*/
/* PPE_TB_CFG mask */
#define TB_ETRY_NUM (0x7 << 0) /* RW */
#define TB_ENTRY_SIZE (0x1 << 3) /* RW */
#define SMA (0x3 << 4) /* RW */
#define NTU_AGE (0x1 << 7) /* RW */
#define UNBD_AGE (0x1 << 8) /* RW */
#define TCP_AGE (0x1 << 9) /* RW */
#define UDP_AGE (0x1 << 10) /* RW */
#define FIN_AGE (0x1 << 11) /* RW */
#define KA_CFG (0x3 << 12)
#define HASH_MODE (0x3 << 14) /* RW */
#define SCAN_MODE (0x3 << 16) /* RW */
#define XMODE (0x3 << 18) /* RW */
#define TICK_SEL (0x1 << 24) /* RW */
/*PPE_CAH_CTRL mask*/
#define CAH_EN (0x1 << 0) /* RW */
#define CAH_X_MODE (0x1 << 9) /* RW */
/*PPE_UNB_AGE mask*/
#define UNB_DLTA (0xff << 0) /* RW */
#define UNB_MNP (0xffff << 16) /* RW */
/*PPE_BND_AGE_0 mask*/
#define UDP_DLTA (0xffff << 0) /* RW */
#define NTU_DLTA (0xffff << 16) /* RW */
/*PPE_BND_AGE_1 mask*/
#define TCP_DLTA (0xffff << 0) /* RW */
#define FIN_DLTA (0xffff << 16) /* RW */
/*PPE_KA mask*/
#define KA_T (0xffff << 0) /* RW */
#define TCP_KA (0xff << 16) /* RW */
#define UDP_KA (0xff << 24) /* RW */
/*PPE_BIND_LMT_0 mask*/
#define QURT_LMT (0x3ff << 0) /* RW */
#define HALF_LMT (0x3ff << 16) /* RW */
/*PPE_BIND_LMT_1 mask*/
#define FULL_LMT (0x3fff << 0) /* RW */
#define NTU_KA (0xff << 16) /* RW */
/*PPE_BNDR mask*/
#define BIND_RATE (0xffff << 0) /* RW */
#define PBND_RD_PRD (0xffff << 16) /* RW */
/*PPE_GLO_CFG mask*/
#define PPE_EN (0x1 << 0) /* RW */
#define TTL0_DRP (0x1 << 4) /* RW */
#define MCAST_TB_EN (0x1 << 7) /* RW */
#define MCAST_HASH (0x3 << 12) /* RW */
#define MC_P3_PPSE (0xf << 12) /* RW */
#define MC_P2_PPSE (0xf << 8) /* RW */
#define MC_P1_PPSE (0xf << 4) /* RW */
#define MC_P0_PPSE (0xf << 0) /* RW */
#define MIB_EN (0x1 << 0) /* RW */
#define MIB_READ_CLEAR (0X1 << 1) /* RW */
#define MIB_CAH_EN (0X1 << 0) /* RW */
/*GDMA_FWD_CFG mask */
#define GDM_UFRC_MASK (0x7 << 12) /* RW */
#define GDM_BFRC_MASK (0x7 << 8) /*RW*/
#define GDM_MFRC_MASK (0x7 << 4) /*RW*/
#define GDM_OFRC_MASK (0x7 << 0) /*RW*/
#define GDM_ALL_FRC_MASK \
(GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK)
/*QDMA_PAGE mask*/
#define QTX_CFG_PAGE (0xf << 0) /* RW */
/*QTX_MIB_IF mask*/
#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */
#define VQTX_MIB_EN (0x1 << 28) /* RW */
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_unbind_info_blk {
u32 time_stamp : 8;
u32 sp : 4;
u32 pcnt : 8;
u32 ilgf : 1;
u32 mc : 1;
u32 preb : 1;
u32 pkt_type : 5;
u32 state : 2;
u32 udp : 1;
u32 sta : 1; /* static entry */
} __packed;
struct hnat_bind_info_blk {
u32 time_stamp : 8;
u32 sp : 4;
u32 mc : 1;
u32 ka : 1; /* keep alive */
u32 vlan_layer : 3;
u32 psn : 1; /* egress packet has PPPoE session */
u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
u32 ps : 1; /* packet sampling */
u32 cah : 1; /* cacheable flag */
u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
u32 ttl : 1;
u32 pkt_type : 5;
u32 state : 2;
u32 udp : 1;
u32 sta : 1; /* static entry */
} __packed;
struct hnat_info_blk2 {
u32 qid : 7; /* QID in Qos Port */
u32 port_mg : 1;
u32 fqos : 1; /* force to PSE QoS port */
u32 dp : 4; /* force to PSE port x */
u32 mcast : 1; /* multicast this packet to CPU */
u32 pcpl : 1; /* OSBN */
u32 mibf : 1;
u32 alen : 1;
u32 rxid : 2;
u32 winfoi : 1;
u32 port_ag : 4;
u32 dscp : 8; /* DSCP value */
} __packed;
struct hnat_winfo {
u32 bssid : 6; /* WiFi Bssidx */
u32 wcid : 10; /* WiFi wtable Idx */
} __packed;
#else
struct hnat_unbind_info_blk {
u32 time_stamp : 8;
u32 pcnt : 16; /* packet count */
u32 preb : 1;
u32 pkt_type : 3;
u32 state : 2;
u32 udp : 1;
u32 sta : 1; /* static entry */
} __packed;
struct hnat_bind_info_blk {
u32 time_stamp : 15;
u32 ka : 1; /* keep alive */
u32 vlan_layer : 3;
u32 psn : 1; /* egress packet has PPPoE session */
u32 vpm : 1; /* 0:ethertype remark, 1:0x8100(CR default) */
u32 ps : 1; /* packet sampling */
u32 cah : 1; /* cacheable flag */
u32 rmt : 1; /* remove tunnel ip header (6rd/dslite only) */
u32 ttl : 1;
u32 pkt_type : 3;
u32 state : 2;
u32 udp : 1;
u32 sta : 1; /* static entry */
} __packed;
struct hnat_info_blk2 {
u32 qid : 4; /* QID in Qos Port */
u32 fqos : 1; /* force to PSE QoS port */
u32 dp : 3; /* force to PSE port x
* 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
*/
u32 mcast : 1; /* multicast this packet to CPU */
u32 pcpl : 1; /* OSBN */
u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
u32 alen : 1; /* 0:post 1:pre packet length in accounting */
u32 port_mg : 6; /* port meter group */
u32 port_ag : 6; /* port account group */
u32 dscp : 8; /* DSCP value */
} __packed;
struct hnat_winfo {
u32 bssid : 6; /* WiFi Bssidx */
u32 wcid : 8; /* WiFi wtable Idx */
u32 rxid : 2; /* WiFi Ring idx */
} __packed;
#endif
/* info blk2 for WHNAT */
struct hnat_info_blk2_whnat {
u32 qid : 4; /* QID[3:0] in Qos Port */
u32 fqos : 1; /* force to PSE QoS port */
u32 dp : 3; /* force to PSE port x
* 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP
*/
u32 mcast : 1; /* multicast this packet to CPU */
u32 pcpl : 1; /* OSBN */
u32 mibf : 1; /* 0:off 1:on PPE MIB counter */
u32 alen : 1; /* 0:post 1:pre packet length in accounting */
u32 qid2 : 2; /* QID[5:4] in Qos Port */
u32 resv : 2;
u32 wdmaid : 1; /* 0:to pcie0 dev 1:to pcie1 dev */
u32 winfoi : 1; /* 0:off 1:on Wi-Fi hwnat support */
u32 port_ag : 6; /* port account group */
u32 dscp : 8; /* DSCP value */
} __packed;
struct hnat_ipv4_hnapt {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 sip;
u32 dip;
u16 dport;
u16 sport;
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u32 new_sip;
u32 new_dip;
u16 new_dport;
u16 new_sport;
u16 m_timestamp; /* For mcast*/
u16 resv1;
u32 resv2;
u32 resv3 : 26;
u32 act_dp : 6; /* UDF */
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
} __packed;
struct hnat_ipv4_dslite {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 sip;
u32 dip;
u16 dport;
u16 sport;
u32 tunnel_sipv6_0;
u32 tunnel_sipv6_1;
u32 tunnel_sipv6_2;
u32 tunnel_sipv6_3;
u32 tunnel_dipv6_0;
u32 tunnel_dipv6_1;
u32 tunnel_dipv6_2;
u32 tunnel_dipv6_3;
u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
u32 hop_limit : 8;
u32 resv2 : 18;
u32 act_dp : 6; /* UDF */
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
u32 new_sip;
u32 new_dip;
u16 new_dport;
u16 new_sport;
#endif
} __packed;
struct hnat_ipv6_3t_route {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 ipv6_sip0;
u32 ipv6_sip1;
u32 ipv6_sip2;
u32 ipv6_sip3;
u32 ipv6_dip0;
u32 ipv6_dip1;
u32 ipv6_dip2;
u32 ipv6_dip3;
u32 prot : 8;
u32 hph : 24; /* hash placeholder */
u32 resv1;
u32 resv2;
u32 resv3;
u32 resv4 : 26;
u32 act_dp : 6; /* UDF */
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
} __packed;
struct hnat_ipv6_5t_route {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 ipv6_sip0;
u32 ipv6_sip1;
u32 ipv6_sip2;
u32 ipv6_sip3;
u32 ipv6_dip0;
u32 ipv6_dip1;
u32 ipv6_dip2;
u32 ipv6_dip3;
u16 dport;
u16 sport;
u32 resv1;
u32 resv2;
u32 resv3;
u32 resv4 : 26;
u32 act_dp : 6; /* UDF */
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
} __packed;
struct hnat_ipv6_6rd {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 ipv6_sip0;
u32 ipv6_sip1;
u32 ipv6_sip2;
u32 ipv6_sip3;
u32 ipv6_dip0;
u32 ipv6_dip1;
u32 ipv6_dip2;
u32 ipv6_dip3;
u16 dport;
u16 sport;
u32 tunnel_sipv4;
u32 tunnel_dipv4;
u32 hdr_chksum : 16;
u32 dscp : 8;
u32 ttl : 8;
u32 flag : 3;
u32 resv1 : 13;
u32 per_flow_6rd_id : 1;
u32 resv2 : 9;
u32 act_dp : 6; /* UDF */
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
u32 resv3;
u32 resv4;
u16 new_dport;
u16 new_sport;
#endif
} __packed;
struct foe_entry {
union {
struct hnat_unbind_info_blk udib1;
struct hnat_bind_info_blk bfib1;
struct hnat_ipv4_hnapt ipv4_hnapt;
struct hnat_ipv4_dslite ipv4_dslite;
struct hnat_ipv6_3t_route ipv6_3t_route;
struct hnat_ipv6_5t_route ipv6_5t_route;
struct hnat_ipv6_6rd ipv6_6rd;
};
};
/* If user wants to change default FOE entry number, both DEF_ETRY_NUM and
* DEF_ETRY_NUM_CFG need to be modified.
*/
#define DEF_ETRY_NUM 8192
/* feasible values : 32768, 16384, 8192, 4096, 2048, 1024 */
#define DEF_ETRY_NUM_CFG TABLE_8K
/* corresponding values : TABLE_32K, TABLE_16K, TABLE_8K, TABLE_4K, TABLE_2K,
* TABLE_1K
*/
#define MAX_EXT_DEVS (0x3fU)
#define MAX_IF_NUM 64
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MAX_PPE_NUM 2
#else
#define MAX_PPE_NUM 1
#endif
#define CFG_PPE_NUM (hnat_priv->ppe_num)
struct mib_entry {
u32 byt_cnt_l;
u16 byt_cnt_h;
u32 pkt_cnt_l;
u8 pkt_cnt_h;
u8 resv0;
u32 resv1;
} __packed;
struct hnat_accounting {
u64 bytes;
u64 packets;
};
enum mtk_hnat_version {
MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
MTK_HNAT_V2, /* version 2: mt7622 */
MTK_HNAT_V3, /* version 3: mt7629 */
MTK_HNAT_V4, /* version 4: mt7986 */
};
struct mtk_hnat_data {
u8 num_of_sch;
bool whnat;
bool per_flow_accounting;
bool mcast;
enum mtk_hnat_version version;
};
struct mtk_hnat {
struct device *dev;
void __iomem *fe_base;
void __iomem *ppe_base[MAX_PPE_NUM];
struct foe_entry *foe_table_cpu[MAX_PPE_NUM];
dma_addr_t foe_table_dev[MAX_PPE_NUM];
u8 enable;
u8 enable1;
struct dentry *root;
struct debugfs_regset32 *regset[MAX_PPE_NUM];
struct mib_entry *foe_mib_cpu[MAX_PPE_NUM];
dma_addr_t foe_mib_dev[MAX_PPE_NUM];
struct hnat_accounting *acct[MAX_PPE_NUM];
const struct mtk_hnat_data *data;
/*devices we plays for*/
char wan[IFNAMSIZ];
char lan[IFNAMSIZ];
char ppd[IFNAMSIZ];
u16 lvid;
u16 wvid;
struct reset_control *rstc;
u8 ppe_num;
u8 gmac_num;
u8 wan_dsa_port;
struct ppe_mcast_table *pmcast;
u32 foe_etry_num;
u32 etry_num_cfg;
struct net_device *g_ppdev;
struct net_device *g_wandev;
struct net_device *wifi_hook_if[MAX_IF_NUM];
struct extdev_entry *ext_if[MAX_EXT_DEVS];
struct timer_list hnat_sma_build_entry_timer;
struct timer_list hnat_reset_timestamp_timer;
struct timer_list hnat_mcast_check_timer;
bool nf_stat_en;
};
struct extdev_entry {
char name[IFNAMSIZ];
struct net_device *dev;
};
struct tcpudphdr {
__be16 src;
__be16 dst;
};
enum FoeEntryState { INVALID = 0, UNBIND = 1, BIND = 2, FIN = 3 };
enum FoeIpAct {
IPV4_HNAPT = 0,
IPV4_HNAT = 1,
IPV4_DSLITE = 3,
IPV6_3T_ROUTE = 4,
IPV6_5T_ROUTE = 5,
IPV6_6RD = 7,
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
IPV4_MAP_T = 8,
IPV4_MAP_E = 9,
#else
IPV4_MAP_T = 6,
IPV4_MAP_E = 6,
#endif
};
/*--------------------------------------------------------------------------*/
/* Common Definition*/
/*--------------------------------------------------------------------------*/
#define HNAT_SW_VER "1.1.0"
#define HASH_SEED_KEY 0x12345678
/*PPE_TB_CFG value*/
#define ENTRY_80B 1
#define ENTRY_64B 0
#define TABLE_1K 0
#define TABLE_2K 1
#define TABLE_4K 2
#define TABLE_8K 3
#define TABLE_16K 4
#define TABLE_32K 5
#define SMA_DROP 0 /* Drop the packet */
#define SMA_DROP2 1 /* Drop the packet */
#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */
#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */
#define HASH_MODE_0 0
#define HASH_MODE_1 1
#define HASH_MODE_2 2
#define HASH_MODE_3 3
/*PPE_FLOW_CFG*/
#define BIT_FUC_FOE BIT(2)
#define BIT_FMC_FOE BIT(1)
#define BIT_FBC_FOE BIT(0)
#define BIT_UDP_IP4F_NAT_EN BIT(7) /*Enable IPv4 fragment + UDP packet NAT*/
#define BIT_IPV6_3T_ROUTE_EN BIT(8)
#define BIT_IPV6_5T_ROUTE_EN BIT(9)
#define BIT_IPV6_6RD_EN BIT(10)
#define BIT_IPV4_NAT_EN BIT(12)
#define BIT_IPV4_NAPT_EN BIT(13)
#define BIT_IPV4_DSL_EN BIT(14)
#define BIT_MIB_BUSY BIT(16)
#define BIT_IPV4_NAT_FRAG_EN BIT(17)
#define BIT_IPV4_HASH_GREK BIT(19)
#define BIT_IPV6_HASH_GREK BIT(20)
#define BIT_IPV4_MAPE_EN BIT(21)
#define BIT_IPV4_MAPT_EN BIT(22)
/*GDMA_FWD_CFG value*/
#define BITS_GDM_UFRC_P_PPE (NR_PPE0_PORT << 12)
#define BITS_GDM_BFRC_P_PPE (NR_PPE0_PORT << 8)
#define BITS_GDM_MFRC_P_PPE (NR_PPE0_PORT << 4)
#define BITS_GDM_OFRC_P_PPE (NR_PPE0_PORT << 0)
#define BITS_GDM_ALL_FRC_P_PPE \
(BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE | \
BITS_GDM_OFRC_P_PPE)
#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
#define BITS_GDM_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
#define BITS_GDM_ALL_FRC_P_CPU_PDMA \
(BITS_GDM_UFRC_P_CPU_PDMA | BITS_GDM_BFRC_P_CPU_PDMA | \
BITS_GDM_MFRC_P_CPU_PDMA | BITS_GDM_OFRC_P_CPU_PDMA)
#define BITS_GDM_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
#define BITS_GDM_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
#define BITS_GDM_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
#define BITS_GDM_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
#define BITS_GDM_ALL_FRC_P_CPU_QDMA \
(BITS_GDM_UFRC_P_CPU_QDMA | BITS_GDM_BFRC_P_CPU_QDMA | \
BITS_GDM_MFRC_P_CPU_QDMA | BITS_GDM_OFRC_P_CPU_QDMA)
#define BITS_GDM_UFRC_P_DISCARD (NR_DISCARD << 12)
#define BITS_GDM_BFRC_P_DISCARD (NR_DISCARD << 8)
#define BITS_GDM_MFRC_P_DISCARD (NR_DISCARD << 4)
#define BITS_GDM_OFRC_P_DISCARD (NR_DISCARD << 0)
#define BITS_GDM_ALL_FRC_P_DISCARD \
(BITS_GDM_UFRC_P_DISCARD | BITS_GDM_BFRC_P_DISCARD | \
BITS_GDM_MFRC_P_DISCARD | BITS_GDM_OFRC_P_DISCARD)
#define hnat_is_enabled(hnat_priv) (hnat_priv->enable)
#define hnat_enabled(hnat_priv) (hnat_priv->enable = 1)
#define hnat_disabled(hnat_priv) (hnat_priv->enable = 0)
#define hnat_is_enabled1(hnat_priv) (hnat_priv->enable1)
#define hnat_enabled1(hnat_priv) (hnat_priv->enable1 = 1)
#define hnat_disabled1(hnat_priv) (hnat_priv->enable1 = 0)
#define entry_hnat_is_bound(e) (e->bfib1.state == BIND)
#define entry_hnat_state(e) (e->bfib1.state)
#define skb_hnat_is_hashed(skb) \
(skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num)
#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN)
#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN)
#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD)
#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL)
#define FROM_EXT(skb) (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
#define FROM_WED(skb) ((skb_hnat_iface(skb) == FOE_MAGIC_WED0) || \
(skb_hnat_iface(skb) == FOE_MAGIC_WED1))
#define FOE_MAGIC_GE_LAN 0x1
#define FOE_MAGIC_GE_WAN 0x2
#define FOE_MAGIC_EXT 0x3
#define FOE_MAGIC_GE_VIRTUAL 0x4
#define FOE_MAGIC_GE_PPD 0x5
#define FOE_MAGIC_WED0 0x78
#define FOE_MAGIC_WED1 0x79
#define FOE_INVALID 0xf
#define index6b(i) (0x3fU - i)
#define IPV4_HNAPT 0
#define IPV4_HNAT 1
#define IP_FORMAT(addr) \
(((unsigned char *)&addr)[3], ((unsigned char *)&addr)[2], \
((unsigned char *)&addr)[1], ((unsigned char *)&addr)[0])
/*PSE Ports*/
#define NR_PDMA_PORT 0
#define NR_GMAC1_PORT 1
#define NR_GMAC2_PORT 2
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define NR_WHNAT_WDMA_PORT EINVAL
#define NR_PPE0_PORT 3
#define NR_PPE1_PORT 4
#else
#define NR_WHNAT_WDMA_PORT 3
#define NR_PPE0_PORT 4
#endif
#define NR_QDMA_PORT 5
#define NR_DISCARD 7
#define NR_WDMA0_PORT 8
#define NR_WDMA1_PORT 9
#define LAN_DEV_NAME hnat_priv->lan
#define IS_WAN(dev) \
(!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan)))
#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME)))
#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
#define IS_WHNAT(dev) \
((hnat_priv->data->whnat && \
(get_wifi_hook_if_index_from_dev(dev) != 0)) ? 1 : 0)
#define IS_EXT(dev) ((get_index_from_dev(dev) != 0) ? 1 : 0)
#define IS_PPD(dev) (!strcmp(dev->name, hnat_priv->ppd))
#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1 : 0)
#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
#define IS_IPV4_DSLITE(x) (((x)->bfib1.pkt_type == IPV4_DSLITE) ? 1 : 0)
#define IS_IPV4_MAPE(x) (((x)->bfib1.pkt_type == IPV4_MAP_E) ? 1 : 0)
#define IS_IPV4_MAPT(x) (((x)->bfib1.pkt_type == IPV4_MAP_T) ? 1 : 0)
#define IS_IPV6_3T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_3T_ROUTE) ? 1 : 0)
#define IS_IPV6_5T_ROUTE(x) (((x)->bfib1.pkt_type == IPV6_5T_ROUTE) ? 1 : 0)
#define IS_IPV6_6RD(x) (((x)->bfib1.pkt_type == IPV6_6RD) ? 1 : 0)
#define IS_IPV6_GRP(x) \
(IS_IPV6_3T_ROUTE(x) | IS_IPV6_5T_ROUTE(x) | IS_IPV6_6RD(x) | \
IS_IPV4_DSLITE(x) | IS_IPV4_MAPE(x) | IS_IPV4_MAPT(x))
#define IS_BOND_MODE (!strncmp(LAN_DEV_NAME, "bond", 4))
#define IS_GMAC1_MODE ((hnat_priv->gmac_num == 1) ? 1 : 0)
#define IS_HQOS_MODE (qos_toggle == 1)
#define IS_PPPQ_MODE (qos_toggle == 2) /* Per Port Per Queue */
#define MAX_PPPQ_PORT_NUM 6
#define es(entry) (entry_state[entry->bfib1.state])
#define ei(entry, end) (hnat_priv->foe_etry_num - (int)(end - entry))
#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type])
#define ipv4_smac(mac, e) \
({ \
mac[0] = e->ipv4_hnapt.smac_hi[3]; \
mac[1] = e->ipv4_hnapt.smac_hi[2]; \
mac[2] = e->ipv4_hnapt.smac_hi[1]; \
mac[3] = e->ipv4_hnapt.smac_hi[0]; \
mac[4] = e->ipv4_hnapt.smac_lo[1]; \
mac[5] = e->ipv4_hnapt.smac_lo[0]; \
})
#define ipv4_dmac(mac, e) \
({ \
mac[0] = e->ipv4_hnapt.dmac_hi[3]; \
mac[1] = e->ipv4_hnapt.dmac_hi[2]; \
mac[2] = e->ipv4_hnapt.dmac_hi[1]; \
mac[3] = e->ipv4_hnapt.dmac_hi[0]; \
mac[4] = e->ipv4_hnapt.dmac_lo[1]; \
mac[5] = e->ipv4_hnapt.dmac_lo[0]; \
})
#define IS_DSA_LAN(dev) (!strncmp(dev->name, "lan", 3))
#define IS_DSA_WAN(dev) (!strncmp(dev->name, "wan", 3))
#define NONE_DSA_PORT 0xff
#define MAX_CRSN_NUM 32
#define IPV6_HDR_LEN 40
/*QDMA_PAGE value*/
#define NUM_OF_Q_PER_PAGE 16
/*IPv6 Header*/
#ifndef NEXTHDR_IPIP
#define NEXTHDR_IPIP 4
#endif
extern const struct of_device_id of_hnat_match[];
extern struct mtk_hnat *hnat_priv;
#if defined(CONFIG_NET_DSA_MT7530)
u32 hnat_dsa_fill_stag(const struct net_device *netdev,
struct foe_entry *entry,
struct flow_offload_hw_path *hw_path,
u16 eth_proto, int mape);
static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
{
return (priv->wan_dsa_port != NONE_DSA_PORT);
}
#else
static inline u32 hnat_dsa_fill_stag(const struct net_device *netdev,
struct foe_entry *entry,
struct flow_offload_hw_path *hw_path,
u16 eth_proto, int mape)
{
return 0;
}
static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
{
return false;
}
#endif
void hnat_deinit_debugfs(struct mtk_hnat *h);
int hnat_init_debugfs(struct mtk_hnat *h);
int hnat_register_nf_hooks(void);
void hnat_unregister_nf_hooks(void);
int whnat_adjust_nf_hooks(void);
int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused);
extern int dbg_cpu_reason;
extern int debug_level;
extern int hook_toggle;
extern int mape_toggle;
extern int qos_toggle;
int ext_if_add(struct extdev_entry *ext_entry);
int ext_if_del(struct extdev_entry *ext_entry);
void cr_set_field(void __iomem *reg, u32 field, u32 val);
int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no);
int mtk_sw_nat_hook_rx(struct sk_buff *skb);
void mtk_ppe_dev_register_hook(struct net_device *dev);
void mtk_ppe_dev_unregister_hook(struct net_device *dev);
int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
void *ptr);
int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
void *ptr);
uint32_t foe_dump_pkt(struct sk_buff *skb);
uint32_t hnat_cpu_reason_cnt(struct sk_buff *skb);
int hnat_enable_hook(void);
int hnat_disable_hook(void);
void hnat_cache_ebl(int enable);
void hnat_qos_shaper_ebl(u32 id, u32 enable);
void set_gmac_ppe_fwd(int gmac_no, int enable);
int entry_detail(u32 ppe_id, int index);
int entry_delete_by_mac(u8 *mac);
int entry_delete(u32 ppe_id, int index);
int hnat_warm_init(void);
struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id,
u32 index, struct hnat_accounting *diff);
static inline u16 foe_timestamp(struct mtk_hnat *h)
{
return (readl(hnat_priv->fe_base + 0x0010)) & 0xffff;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,354 @@
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
*/
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if_bridge.h>
#include "hnat.h"
/* *
* mcast_entry_get - Returns the index of an unused entry
* or an already existed entry in mtbl
*/
static int mcast_entry_get(u16 vlan_id, u32 dst_mac)
{
int index = -1;
u8 i;
struct ppe_mcast_group *p = hnat_priv->pmcast->mtbl;
u8 max = hnat_priv->pmcast->max_entry;
for (i = 0; i < max; i++) {
if ((index == -1) && (!p->valid)) {
index = i; /*get the first unused entry index*/
continue;
}
if ((p->vid == vlan_id) && (p->mac_hi == dst_mac)) {
index = i;
break;
}
p++;
}
if (index == -1)
pr_info("%s:group table is full\n", __func__);
return index;
}
static void get_mac_from_mdb_entry(struct br_mdb_entry *entry,
u32 *mac_hi, u16 *mac_lo)
{
switch (ntohs(entry->addr.proto)) {
case ETH_P_IP:
*mac_lo = 0x0100;
*mac_hi = swab32((entry->addr.u.ip4 & 0xfffffe00) + 0x5e);
break;
case ETH_P_IPV6:
*mac_lo = 0x3333;
*mac_hi = swab32(entry->addr.u.ip6.s6_addr32[3]);
break;
}
trace_printk("%s:group mac_h=0x%08x, mac_l=0x%04x\n",
__func__, *mac_hi, *mac_lo);
}
/*set_hnat_mtbl - set ppe multicast register*/
static int set_hnat_mtbl(struct ppe_mcast_group *group, u32 ppe_id, int index)
{
struct ppe_mcast_h mcast_h;
struct ppe_mcast_l mcast_l;
u16 mac_lo = group->mac_lo;
u32 mac_hi = group->mac_hi;
u8 mc_port = group->mc_port;
void __iomem *reg;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
mcast_h.u.value = 0;
mcast_l.addr = 0;
if (mac_lo == 0x0100)
mcast_h.u.info.mc_mpre_sel = 0;
else if (mac_lo == 0x3333)
mcast_h.u.info.mc_mpre_sel = 1;
mcast_h.u.info.mc_px_en = mc_port;
mcast_l.addr = mac_hi;
mcast_h.u.info.valid = group->valid;
trace_printk("%s:index=%d,group info=0x%x,addr=0x%x\n",
__func__, index, mcast_h.u.value, mcast_l.addr);
if (index < 0x10) {
reg = hnat_priv->ppe_base[ppe_id] + PPE_MCAST_H_0 + ((index) * 8);
writel(mcast_h.u.value, reg);
reg = hnat_priv->ppe_base[ppe_id] + PPE_MCAST_L_0 + ((index) * 8);
writel(mcast_l.addr, reg);
} else {
index = index - 0x10;
reg = hnat_priv->fe_base + PPE_MCAST_H_10 + ((index) * 8);
writel(mcast_h.u.value, reg);
reg = hnat_priv->fe_base + PPE_MCAST_L_10 + ((index) * 8);
writel(mcast_h.u.value, reg);
}
return 0;
}
/**
* hnat_mcast_table_update -
* 1.get a valid group entry
* 2.update group info
* a.update eif&oif count
* b.eif ==0 & oif == 0,delete it from group table
* c.oif != 0,set mc forward port to cpu,else do not forward to cpu
* 3.set the group info to ppe register
*/
static int hnat_mcast_table_update(int type, struct br_mdb_entry *entry)
{
struct net_device *dev;
u32 mac_hi = 0;
u16 mac_lo = 0;
int i, index;
struct ppe_mcast_group *group;
rcu_read_lock();
dev = dev_get_by_index_rcu(&init_net, entry->ifindex);
if (!dev) {
rcu_read_unlock();
return -ENODEV;
}
rcu_read_unlock();
get_mac_from_mdb_entry(entry, &mac_hi, &mac_lo);
index = mcast_entry_get(entry->vid, mac_hi);
if (index == -1)
return -1;
group = &hnat_priv->pmcast->mtbl[index];
group->mac_hi = mac_hi;
group->mac_lo = mac_lo;
switch (type) {
case RTM_NEWMDB:
if (IS_LAN(dev) || IS_WAN(dev))
group->eif++;
else
group->oif++;
group->vid = entry->vid;
group->valid = true;
break;
case RTM_DELMDB:
if (group->valid) {
if (IS_LAN(dev) || IS_WAN(dev))
group->eif--;
else
group->oif--;
}
break;
}
trace_printk("%s:devname=%s,eif=%d,oif=%d\n", __func__,
dev->name, group->eif, group->oif);
if (group->valid) {
if (group->oif && group->eif)
/*eth&wifi both in group,forward to cpu&GDMA1*/
group->mc_port = (MCAST_TO_PDMA || MCAST_TO_GDMA1);
else if (group->oif)
/*only wifi in group,forward to cpu only*/
group->mc_port = MCAST_TO_PDMA;
else
/*only eth in group,forward to GDMA1 only*/
group->mc_port = MCAST_TO_GDMA1;
if (!group->oif && !group->eif)
/*nobody in this group,clear the entry*/
memset(group, 0, sizeof(struct ppe_mcast_group));
for (i = 0; i < CFG_PPE_NUM; i++)
set_hnat_mtbl(group, i, index);
}
return 0;
}
static void hnat_mcast_nlmsg_handler(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
struct nlattr *nest, *nest2, *info;
struct br_port_msg *bpm;
struct br_mdb_entry *entry;
struct ppe_mcast_table *pmcast;
struct sock *sk;
pmcast = container_of(work, struct ppe_mcast_table, work);
sk = pmcast->msock->sk;
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
nlh = nlmsg_hdr(skb);
if (!nlmsg_ok(nlh, skb->len)) {
kfree_skb(skb);
continue;
}
bpm = nlmsg_data(nlh);
nest = nlmsg_find_attr(nlh, sizeof(bpm), MDBA_MDB);
if (!nest) {
kfree_skb(skb);
continue;
}
nest2 = nla_find_nested(nest, MDBA_MDB_ENTRY);
if (nest2) {
info = nla_find_nested(nest2, MDBA_MDB_ENTRY_INFO);
if (!info) {
kfree_skb(skb);
continue;
}
entry = (struct br_mdb_entry *)nla_data(info);
trace_printk("%s:cmd=0x%2x,ifindex=0x%x,state=0x%x",
__func__, nlh->nlmsg_type,
entry->ifindex, entry->state);
trace_printk("vid=0x%x,ip=0x%x,proto=0x%x\n",
entry->vid, entry->addr.u.ip4,
entry->addr.proto);
hnat_mcast_table_update(nlh->nlmsg_type, entry);
}
kfree_skb(skb);
}
}
static void hnat_mcast_nlmsg_rcv(struct sock *sk)
{
struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
struct workqueue_struct *queue = pmcast->queue;
struct work_struct *work = &pmcast->work;
queue_work(queue, work);
}
static struct socket *hnat_mcast_netlink_open(struct net *net)
{
struct socket *sock = NULL;
int ret;
struct sockaddr_nl addr;
ret = sock_create_kern(net, PF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
if (ret < 0)
goto out;
sock->sk->sk_data_ready = hnat_mcast_nlmsg_rcv;
addr.nl_family = PF_NETLINK;
addr.nl_pid = 65536; /*fix me:how to get an unique id?*/
addr.nl_groups = RTMGRP_MDB;
ret = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr));
if (ret < 0)
goto out;
return sock;
out:
if (sock)
sock_release(sock);
return NULL;
}
static void hnat_mcast_check_timestamp(struct timer_list *t)
{
struct foe_entry *entry;
int i, hash_index;
u16 e_ts, foe_ts;
for (i = 0; i < CFG_PPE_NUM; i++) {
for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
entry = hnat_priv->foe_table_cpu[i] + hash_index;
if (entry->bfib1.sta == 1) {
e_ts = (entry->ipv4_hnapt.m_timestamp) & 0xffff;
foe_ts = foe_timestamp(hnat_priv);
if ((foe_ts - e_ts) > 0x3000)
foe_ts = (~(foe_ts)) & 0xffff;
if (abs(foe_ts - e_ts) > 20)
entry_delete(i, hash_index);
}
}
}
mod_timer(&hnat_priv->hnat_mcast_check_timer, jiffies + 10 * HZ);
}
int hnat_mcast_enable(u32 ppe_id)
{
struct ppe_mcast_table *pmcast;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
pmcast = kzalloc(sizeof(*pmcast), GFP_KERNEL);
if (!pmcast)
return -1;
if (hnat_priv->data->version == MTK_HNAT_V1)
pmcast->max_entry = 0x10;
else
pmcast->max_entry = MAX_MCAST_ENTRY;
INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler);
pmcast->queue = create_singlethread_workqueue("ppe_mcast");
if (!pmcast->queue)
goto err;
pmcast->msock = hnat_mcast_netlink_open(&init_net);
if (!pmcast->msock)
goto err;
hnat_priv->pmcast = pmcast;
/* mt7629 should checkout mcast entry life time manualy */
if (hnat_priv->data->version == MTK_HNAT_V3) {
timer_setup(&hnat_priv->hnat_mcast_check_timer,
hnat_mcast_check_timestamp, 0);
hnat_priv->hnat_mcast_check_timer.expires = jiffies;
add_timer(&hnat_priv->hnat_mcast_check_timer);
}
/* Enable multicast table lookup */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
/* multicast port0 map to PDMA */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P0_PPSE, 0);
/* multicast port1 map to GMAC1 */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P1_PPSE, 1);
/* multicast port2 map to GMAC2 */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P2_PPSE, 2);
/* multicast port3 map to QDMA */
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P3_PPSE, 5);
return 0;
err:
if (pmcast->queue)
destroy_workqueue(pmcast->queue);
if (pmcast->msock)
sock_release(pmcast->msock);
kfree(pmcast);
return -1;
}
int hnat_mcast_disable(void)
{
struct ppe_mcast_table *pmcast = hnat_priv->pmcast;
if (!pmcast)
return -EINVAL;
if (hnat_priv->data->version == MTK_HNAT_V3)
del_timer_sync(&hnat_priv->hnat_mcast_check_timer);
flush_work(&pmcast->work);
destroy_workqueue(pmcast->queue);
sock_release(pmcast->msock);
kfree(pmcast);
return 0;
}

View File

@ -0,0 +1,69 @@
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2014-2016 Zhiqiang Yang <zhiqiang.yang@mediatek.com>
*/
#ifndef NF_HNAT_MCAST_H
#define NF_HNAT_MCAST_H
#define RTMGRP_IPV4_MROUTE 0x20
#define RTMGRP_MDB 0x2000000
#define MAX_MCAST_ENTRY 64
#define MCAST_TO_PDMA (0x1 << 0)
#define MCAST_TO_GDMA1 (0x1 << 1)
#define MCAST_TO_GDMA2 (0x1 << 2)
struct ppe_mcast_group {
u32 mac_hi; /*multicast mac addr*/
u16 mac_lo; /*multicast mac addr*/
u16 vid;
u8 mc_port; /*1:forward to cpu,2:forward to GDMA1,4:forward to GDMA2*/
u8 eif; /*num of eth if added to multi group. */
u8 oif; /* num of other if added to multi group ,ex wifi.*/
bool valid;
};
struct ppe_mcast_table {
struct workqueue_struct *queue;
struct work_struct work;
struct socket *msock;
struct ppe_mcast_group mtbl[MAX_MCAST_ENTRY];
u8 max_entry;
};
struct ppe_mcast_h {
union {
u32 value;
struct {
u32 mc_vid:12;
u32 mc_qos_qid54:2; /* mt7622 only */
u32 valid:1;
u32 rev1:1;
/*0:forward to cpu,1:forward to GDMA1*/
u32 mc_px_en:4;
u32 mc_mpre_sel:2; /* 0=01:00, 2=33:33 */
u32 mc_vid_cmp:1;
u32 rev2:1;
u32 mc_px_qos_en:4;
u32 mc_qos_qid:4;
} info;
} u;
};
struct ppe_mcast_l {
u32 addr;
};
int hnat_mcast_enable(u32 ppe_id);
int hnat_mcast_disable(void);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2020 MediaTek Inc.
* Author: Landen Chao <landen.chao@mediatek.com>
*/
#include <linux/of_device.h>
#include <net/netfilter/nf_flow_table.h>
#include "hnat.h"
u32 hnat_dsa_fill_stag(const struct net_device *netdev,
struct foe_entry *entry,
struct flow_offload_hw_path *hw_path,
u16 eth_proto,
int mape)
{
const struct net_device *ndev;
const unsigned int *port_reg;
int port_index;
u16 sp_tag;
if (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN)
ndev = hw_path->dev;
else
ndev = netdev;
port_reg = of_get_property(ndev->dev.of_node, "reg", NULL);
if (unlikely(!port_reg))
return -EINVAL;
port_index = be32_to_cpup(port_reg);
sp_tag = BIT(port_index);
if (!entry->bfib1.vlan_layer)
entry->bfib1.vlan_layer = 1;
else
/* VLAN existence indicator */
sp_tag |= BIT(8);
entry->bfib1.vpm = 0;
switch (eth_proto) {
case ETH_P_IP:
if (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE
|| (entry->ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E))
entry->ipv4_dslite.etype = sp_tag;
else
entry->ipv4_hnapt.etype = sp_tag;
break;
case ETH_P_IPV6:
/* In the case MAPE LAN --> WAN, binding entry is to CPU.
* Do not add special tag.
*/
if (!mape)
/* etype offset of ipv6 entries are the same. */
entry->ipv6_5t_route.etype = sp_tag;
break;
default:
pr_info("DSA + HNAT unsupport protocol\n");
}
return port_index;
}

View File

@ -0,0 +1,129 @@
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
* Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
*/
#ifndef NF_HNAT_MTK_H
#define NF_HNAT_MTK_H
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include "../mtk_eth_soc.h"
#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44]))
struct hnat_skb_cb2 {
__u32 magic;
};
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_desc {
u32 entry : 15;
u32 filled : 3;
u32 crsn : 5;
u32 resv1 : 3;
u32 sport : 4;
u32 resv2 : 1;
u32 alg : 1;
u32 iface : 8;
u32 wdmaid : 2;
u32 rxid : 2;
u32 wcid : 10;
u32 bssid : 6;
u32 resv5 : 20;
u32 magic_tag_protect : 16;
} __packed;
#else
struct hnat_desc {
u32 entry : 14;
u32 crsn : 5;
u32 sport : 4;
u32 alg : 1;
u32 iface : 4;
u32 filled : 3;
u32 resv : 1;
u32 magic_tag_protect : 16;
u32 wdmaid : 8;
u32 rxid : 2;
u32 wcid : 8;
u32 bssid : 6;
} __packed;
#endif
#define HQOS_MAGIC_TAG 0x5678
#define HAS_HQOS_MAGIC_TAG(skb) (qos_toggle && skb->protocol == HQOS_MAGIC_TAG)
#define HNAT_MAGIC_TAG 0x6789
#define HNAT_INFO_FILLED 0x7
#define WIFI_INFO_LEN 3
#define FOE_INFO_LEN (10 + WIFI_INFO_LEN)
#define IS_SPACE_AVAILABLE_HEAD(skb) \
((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport)
#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg)
#define skb_hnat_iface(skb) (((struct hnat_desc *)(skb->head))->iface)
#define skb_hnat_filled(skb) (((struct hnat_desc *)(skb->head))->filled)
#define skb_hnat_magic_tag(skb) (((struct hnat_desc *)((skb)->head))->magic_tag_protect)
#define skb_hnat_wdma_id(skb) (((struct hnat_desc *)((skb)->head))->wdmaid)
#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid)
#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid)
#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid)
#define skb_hnat_ppe(skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_WED1 && CFG_PPE_NUM > 1) ? 1 : 0)
#define do_ext2ge_fast_try(dev, skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb))
#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)
#define clr_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x0)
#define set_to_ppe(skb) (HNAT_SKB_CB2(skb)->magic = 0x78681415)
#define is_from_extge(skb) (HNAT_SKB_CB2(skb)->magic == 0x78786688)
#define is_hnat_info_filled(skb) (skb_hnat_filled(skb) == HNAT_INFO_FILLED)
#define is_magic_tag_valid(skb) (skb_hnat_magic_tag(skb) == HNAT_MAGIC_TAG)
#define set_from_mape(skb) (HNAT_SKB_CB2(skb)->magic = 0x78787788)
#define is_from_mape(skb) (HNAT_SKB_CB2(skb)->magic == 0x78787788)
#define is_unreserved_port(hdr) \
((ntohs(hdr->source) > 1023) && (ntohs(hdr->dest) > 1023))
#define TTL_0 0x02
#define HAS_OPTION_HEADER 0x03
#define NO_FLOW_IS_ASSIGNED 0x07
#define IPV4_WITH_FRAGMENT 0x08
#define IPV4_HNAPT_DSLITE_WITH_FRAGMENT 0x09
#define IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP 0x0A
#define IPV6_5T_6RD_WITHOUT_TCP_UDP 0x0B
#define TCP_FIN_SYN_RST \
0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
#define UN_HIT 0x0D /* FOE Un-hit */
#define HIT_UNBIND 0x0E /* FOE Hit unbind */
#define HIT_UNBIND_RATE_REACH 0x0F
#define HIT_BIND_TCP_FIN 0x10
#define HIT_BIND_TTL_1 0x11
#define HIT_BIND_WITH_VLAN_VIOLATION 0x12
#define HIT_BIND_KEEPALIVE_UC_OLD_HDR 0x13
#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14
#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15
#define HIT_BIND_FORCE_TO_CPU 0x16
#define HIT_BIND_WITH_OPTION_HEADER 0x17
#define HIT_BIND_MULTICAST_TO_CPU 0x18
#define HIT_BIND_MULTICAST_TO_GMAC_CPU 0x19
#define HIT_PRE_BIND 0x1A
#define HIT_BIND_PACKET_SAMPLING 0x1B
#define HIT_BIND_EXCEED_MTU 0x1C
u32 hnat_tx(struct sk_buff *skb);
u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd);
u32 hnat_reg(struct net_device *, void __iomem *);
u32 hnat_unreg(void);
#endif

View File

@ -0,0 +1,135 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018-2019 MediaTek Inc.
/* A library for MediaTek SGMII circuit
*
* Author: Sean Wang <sean.wang@mediatek.com>
*
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
{
struct device_node *np;
int i;
ss->ana_rgc3 = ana_rgc3;
for (i = 0; i < MTK_MAX_DEVS; i++) {
np = of_parse_phandle(r, "mediatek,sgmiisys", i);
if (!np)
break;
ss->regmap[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
ss->flags[i] &= ~(MTK_SGMII_PN_SWAP);
if (of_property_read_bool(np, "pn_swap"))
ss->flags[i] |= MTK_SGMII_PN_SWAP;
}
return 0;
}
int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, unsigned int id)
{
unsigned int val;
if (!ss->regmap[id])
return -EINVAL;
/* Setup the link timer and QPHY power up inside SGMIISYS */
regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
SGMII_LINK_TIMER_DEFAULT);
regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
val |= SGMII_REMOTE_FAULT_DIS;
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP))
regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL,
SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX);
/* Release PHYA power down state */
regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return 0;
}
int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, unsigned int id,
const struct phylink_link_state *state)
{
unsigned int val;
if (!ss->regmap[id])
return -EINVAL;
regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
val &= ~RG_PHY_SPEED_MASK;
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
val |= RG_PHY_SPEED_3_125G;
regmap_write(ss->regmap[id], ss->ana_rgc3, val);
/* Disable SGMII AN */
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
val &= ~SGMII_AN_ENABLE;
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
/* SGMII force mode setting */
regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
val &= ~SGMII_IF_MODE_MASK;
switch (state->speed) {
case SPEED_10:
val |= SGMII_SPEED_10;
break;
case SPEED_100:
val |= SGMII_SPEED_100;
break;
case SPEED_2500:
case SPEED_1000:
val |= SGMII_SPEED_1000;
break;
};
if (state->duplex == DUPLEX_FULL)
val |= SGMII_DUPLEX_FULL;
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP))
regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL,
SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX);
/* Release PHYA power down state */
regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return 0;
}
void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
{
struct mtk_sgmii *ss = eth->sgmii;
unsigned int val, sid;
/* Decide how GMAC and SGMIISYS be mapped */
sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
0 : mac_id;
if (!ss->regmap[sid])
return;
regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
}

View File

@ -0,0 +1,2 @@
config MT753X_GSW
tristate "Driver for the MediaTek MT753x switch"

View File

@ -0,0 +1,11 @@
#
# Makefile for MediaTek MT753x gigabit switch
#
obj-$(CONFIG_MT753X_GSW) += mt753x.o
mt753x-$(CONFIG_SWCONFIG) += mt753x_swconfig.o
mt753x-y += mt753x_mdio.o mt7530.o mt7531.o \
mt753x_common.o mt753x_vlan.o mt753x_nl.o

View File

@ -0,0 +1,644 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include "mt753x.h"
#include "mt753x_regs.h"
/* MT7530 registers */
/* Unique fields of PMCR for MT7530 */
#define FORCE_MODE BIT(15)
/* Unique fields of GMACCR for MT7530 */
#define VLAN_SUPT_NO_S 14
#define VLAN_SUPT_NO_M 0x1c000
#define LATE_COL_DROP BIT(13)
/* Unique fields of (M)HWSTRAP for MT7530 */
#define BOND_OPTION BIT(24)
#define P5_PHY0_SEL BIT(20)
#define CHG_TRAP BIT(16)
#define LOOPDET_DIS BIT(14)
#define P5_INTF_SEL_GMAC5 BIT(13)
#define SMI_ADDR_S 11
#define SMI_ADDR_M 0x1800
#define XTAL_FSEL_S 9
#define XTAL_FSEL_M 0x600
#define P6_INTF_DIS BIT(8)
#define P5_INTF_MODE_RGMII BIT(7)
#define P5_INTF_DIS_S BIT(6)
#define C_MDIO_BPS_S BIT(5)
#define EEPROM_EN_S BIT(4)
/* PHY EEE Register bitmap of define */
#define PHY_DEV07 0x07
#define PHY_DEV07_REG_03C 0x3c
/* PHY Extend Register 0x14 bitmap of define */
#define PHY_EXT_REG_14 0x14
/* Fields of PHY_EXT_REG_14 */
#define PHY_EN_DOWN_SHFIT BIT(4)
/* PHY Token Ring Register 0x10 bitmap of define */
#define PHY_TR_REG_10 0x10
/* PHY Token Ring Register 0x12 bitmap of define */
#define PHY_TR_REG_12 0x12
/* PHY LPI PCS/DSP Control Register bitmap of define */
#define PHY_LPI_REG_11 0x11
/* PHY DEV 0x1e Register bitmap of define */
#define PHY_DEV1E 0x1e
#define PHY_DEV1E_REG_123 0x123
#define PHY_DEV1E_REG_A6 0xa6
/* Values of XTAL_FSEL */
#define XTAL_20MHZ 1
#define XTAL_40MHZ 2
#define XTAL_25MHZ 3
/* Top single control CR define */
#define TOP_SIG_CTRL 0x7808
/* TOP_SIG_CTRL Register bitmap of define */
#define OUTPUT_INTR_S 16
#define OUTPUT_INTR_M 0x30000
#define P6ECR 0x7830
#define P6_INTF_MODE_TRGMII BIT(0)
#define TRGMII_TXCTRL 0x7a40
#define TRAIN_TXEN BIT(31)
#define TXC_INV BIT(30)
#define TX_DOEO BIT(29)
#define TX_RST BIT(28)
#define TRGMII_TD0_CTRL 0x7a50
#define TRGMII_TD1_CTRL 0x7a58
#define TRGMII_TD2_CTRL 0x7a60
#define TRGMII_TD3_CTRL 0x7a68
#define TRGMII_TXCTL_CTRL 0x7a70
#define TRGMII_TCK_CTRL 0x7a78
#define TRGMII_TD_CTRL(n) (0x7a50 + (n) * 8)
#define NUM_TRGMII_CTRL 6
#define TX_DMPEDRV BIT(31)
#define TX_DM_SR BIT(15)
#define TX_DMERODT BIT(14)
#define TX_DMOECTL BIT(13)
#define TX_TAP_S 8
#define TX_TAP_M 0xf00
#define TX_TRAIN_WD_S 0
#define TX_TRAIN_WD_M 0xff
#define TRGMII_TD0_ODT 0x7a54
#define TRGMII_TD1_ODT 0x7a5c
#define TRGMII_TD2_ODT 0x7a64
#define TRGMII_TD3_ODT 0x7a6c
#define TRGMII_TXCTL_ODT 0x7574
#define TRGMII_TCK_ODT 0x757c
#define TRGMII_TD_ODT(n) (0x7a54 + (n) * 8)
#define NUM_TRGMII_ODT 6
#define TX_DM_DRVN_PRE_S 30
#define TX_DM_DRVN_PRE_M 0xc0000000
#define TX_DM_DRVP_PRE_S 28
#define TX_DM_DRVP_PRE_M 0x30000000
#define TX_DM_TDSEL_S 24
#define TX_DM_TDSEL_M 0xf000000
#define TX_ODTEN BIT(23)
#define TX_DME_PRE BIT(20)
#define TX_DM_DRVNT0 BIT(19)
#define TX_DM_DRVPT0 BIT(18)
#define TX_DM_DRVNTE BIT(17)
#define TX_DM_DRVPTE BIT(16)
#define TX_DM_ODTN_S 12
#define TX_DM_ODTN_M 0x7000
#define TX_DM_ODTP_S 8
#define TX_DM_ODTP_M 0x700
#define TX_DM_DRVN_S 4
#define TX_DM_DRVN_M 0xf0
#define TX_DM_DRVP_S 0
#define TX_DM_DRVP_M 0x0f
#define P5RGMIIRXCR 0x7b00
#define CSR_RGMII_RCTL_CFG_S 24
#define CSR_RGMII_RCTL_CFG_M 0x7000000
#define CSR_RGMII_RXD_CFG_S 16
#define CSR_RGMII_RXD_CFG_M 0x70000
#define CSR_RGMII_EDGE_ALIGN BIT(8)
#define CSR_RGMII_RXC_90DEG_CFG_S 4
#define CSR_RGMII_RXC_90DEG_CFG_M 0xf0
#define CSR_RGMII_RXC_0DEG_CFG_S 0
#define CSR_RGMII_RXC_0DEG_CFG_M 0x0f
#define P5RGMIITXCR 0x7b04
#define CSR_RGMII_TXEN_CFG_S 16
#define CSR_RGMII_TXEN_CFG_M 0x70000
#define CSR_RGMII_TXD_CFG_S 8
#define CSR_RGMII_TXD_CFG_M 0x700
#define CSR_RGMII_TXC_CFG_S 0
#define CSR_RGMII_TXC_CFG_M 0x1f
#define CHIP_REV 0x7ffc
#define CHIP_NAME_S 16
#define CHIP_NAME_M 0xffff0000
#define CHIP_REV_S 0
#define CHIP_REV_M 0x0f
/* MMD registers */
#define CORE_PLL_GROUP2 0x401
#define RG_SYSPLL_EN_NORMAL BIT(15)
#define RG_SYSPLL_VODEN BIT(14)
#define RG_SYSPLL_POSDIV_S 5
#define RG_SYSPLL_POSDIV_M 0x60
#define CORE_PLL_GROUP4 0x403
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1_S 0
#define RG_LCDDS_PCW_NCPO1_M 0xffff
#define CORE_PLL_GROUP6 0x405
#define RG_LCDDS_PCW_NCPO0_S 0
#define RG_LCDDS_PCW_NCPO0_M 0xffff
#define CORE_PLL_GROUP7 0x406
#define RG_LCDDS_PWDB BIT(15)
#define RG_LCDDS_ISO_EN BIT(13)
#define RG_LCCDS_C_S 4
#define RG_LCCDS_C_M 0x70
#define RG_LCDDS_PCW_NCPO_CHG BIT(3)
#define CORE_PLL_GROUP10 0x409
#define RG_LCDDS_SSC_DELTA_S 0
#define RG_LCDDS_SSC_DELTA_M 0xfff
#define CORE_PLL_GROUP11 0x40a
#define RG_LCDDS_SSC_DELTA1_S 0
#define RG_LCDDS_SSC_DELTA1_M 0xfff
#define CORE_GSWPLL_GCR_1 0x040d
#define GSWPLL_PREDIV_S 14
#define GSWPLL_PREDIV_M 0xc000
#define GSWPLL_POSTDIV_200M_S 12
#define GSWPLL_POSTDIV_200M_M 0x3000
#define GSWPLL_EN_PRE BIT(11)
#define GSWPLL_FBKSEL BIT(10)
#define GSWPLL_BP BIT(9)
#define GSWPLL_BR BIT(8)
#define GSWPLL_FBKDIV_200M_S 0
#define GSWPLL_FBKDIV_200M_M 0xff
#define CORE_GSWPLL_GCR_2 0x040e
#define GSWPLL_POSTDIV_500M_S 8
#define GSWPLL_POSTDIV_500M_M 0x300
#define GSWPLL_FBKDIV_500M_S 0
#define GSWPLL_FBKDIV_500M_M 0xff
#define TRGMII_GSW_CLK_CG 0x0410
#define TRGMIICK_EN BIT(1)
#define GSWCK_EN BIT(0)
static int mt7530_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
{
if (phy < MT753X_NUM_PHYS)
phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
return mdiobus_read(gsw->host_bus, phy, reg);
}
static void mt7530_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
{
if (phy < MT753X_NUM_PHYS)
phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
mdiobus_write(gsw->host_bus, phy, reg, val);
}
static int mt7530_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
{
u16 val;
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->host_bus->mdio_lock);
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
(MMD_ADDR << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
(MMD_DATA << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
val = gsw->host_bus->read(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG);
mutex_unlock(&gsw->host_bus->mdio_lock);
return val;
}
static void mt7530_mmd_write(struct gsw_mt753x *gsw, int addr, int devad,
u16 reg, u16 val)
{
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->host_bus->mdio_lock);
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
(MMD_ADDR << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, reg);
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ACC_CTL_REG,
(MMD_DATA << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
gsw->host_bus->write(gsw->host_bus, addr, MII_MMD_ADDR_DATA_REG, val);
mutex_unlock(&gsw->host_bus->mdio_lock);
}
static void mt7530_core_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
{
gsw->mmd_write(gsw, 0, 0x1f, reg, val);
}
static void mt7530_trgmii_setting(struct gsw_mt753x *gsw)
{
u16 i;
mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0780);
mdelay(1);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
mdelay(1);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
/* PLL BIAS enable */
mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN);
mdelay(1);
/* PLL LPF enable */
mt7530_core_reg_write(gsw, CORE_PLL_GROUP4,
RG_SYSPLL_DDSFBK_EN |
RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
/* sys PLL enable */
mt7530_core_reg_write(gsw, CORE_PLL_GROUP2,
RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
(1 << RG_SYSPLL_POSDIV_S));
/* LCDDDS PWDS */
mt7530_core_reg_write(gsw, CORE_PLL_GROUP7,
(3 << RG_LCCDS_C_S) |
RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
mdelay(1);
/* Enable MT7530 TRGMII clock */
mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN | TRGMIICK_EN);
/* lower Tx Driving */
for (i = 0 ; i < NUM_TRGMII_ODT; i++)
mt753x_reg_write(gsw, TRGMII_TD_ODT(i),
(4 << TX_DM_DRVP_S) | (4 << TX_DM_DRVN_S));
}
static void mt7530_rgmii_setting(struct gsw_mt753x *gsw)
{
u32 val;
mt7530_core_reg_write(gsw, CORE_PLL_GROUP5, 0x0c80);
mdelay(1);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP6, 0);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP10, 0x87);
mdelay(1);
mt7530_core_reg_write(gsw, CORE_PLL_GROUP11, 0x87);
val = mt753x_reg_read(gsw, TRGMII_TXCTRL);
val &= ~TXC_INV;
mt753x_reg_write(gsw, TRGMII_TXCTRL, val);
mt753x_reg_write(gsw, TRGMII_TCK_CTRL,
(8 << TX_TAP_S) | (0x55 << TX_TRAIN_WD_S));
}
static int mt7530_mac_port_setup(struct gsw_mt753x *gsw)
{
u32 hwstrap, p6ecr = 0, p5mcr, p6mcr, phyad;
hwstrap = mt753x_reg_read(gsw, MHWSTRAP);
hwstrap &= ~(P6_INTF_DIS | P5_INTF_MODE_RGMII | P5_INTF_DIS_S);
hwstrap |= P5_INTF_SEL_GMAC5;
if (!gsw->port5_cfg.enabled) {
p5mcr = FORCE_MODE;
hwstrap |= P5_INTF_DIS_S;
} else {
p5mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
MAC_MODE | MAC_TX_EN | MAC_RX_EN |
BKOFF_EN | BACKPR_EN;
if (gsw->port5_cfg.force_link) {
p5mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
FORCE_TX_FC;
p5mcr |= gsw->port5_cfg.speed << FORCE_SPD_S;
if (gsw->port5_cfg.duplex)
p5mcr |= FORCE_DPX;
}
switch (gsw->port5_cfg.phy_mode) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
break;
case PHY_INTERFACE_MODE_RGMII:
hwstrap |= P5_INTF_MODE_RGMII;
break;
default:
dev_info(gsw->dev, "%s is not supported by port5\n",
phy_modes(gsw->port5_cfg.phy_mode));
p5mcr = FORCE_MODE;
hwstrap |= P5_INTF_DIS_S;
}
/* Port5 to PHY direct mode */
if (of_property_read_u32(gsw->port5_cfg.np, "phy-address",
&phyad))
goto parse_p6;
if (phyad != 0 && phyad != 4) {
dev_info(gsw->dev,
"Only PHY 0/4 can be connected to Port 5\n");
goto parse_p6;
}
hwstrap &= ~P5_INTF_SEL_GMAC5;
if (phyad == 0)
hwstrap |= P5_PHY0_SEL;
else
hwstrap &= ~P5_PHY0_SEL;
}
parse_p6:
if (!gsw->port6_cfg.enabled) {
p6mcr = FORCE_MODE;
hwstrap |= P6_INTF_DIS;
} else {
p6mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
MAC_MODE | MAC_TX_EN | MAC_RX_EN |
BKOFF_EN | BACKPR_EN;
if (gsw->port6_cfg.force_link) {
p6mcr |= FORCE_MODE | FORCE_LINK | FORCE_RX_FC |
FORCE_TX_FC;
p6mcr |= gsw->port6_cfg.speed << FORCE_SPD_S;
if (gsw->port6_cfg.duplex)
p6mcr |= FORCE_DPX;
}
switch (gsw->port6_cfg.phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
p6ecr = BIT(1);
break;
case PHY_INTERFACE_MODE_TRGMII:
/* set MT7530 central align */
p6ecr = BIT(0);
break;
default:
dev_info(gsw->dev, "%s is not supported by port6\n",
phy_modes(gsw->port6_cfg.phy_mode));
p6mcr = FORCE_MODE;
hwstrap |= P6_INTF_DIS;
}
}
mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
mt753x_reg_write(gsw, P6ECR, p6ecr);
mt753x_reg_write(gsw, PMCR(5), p5mcr);
mt753x_reg_write(gsw, PMCR(6), p6mcr);
return 0;
}
static void mt7530_core_pll_setup(struct gsw_mt753x *gsw)
{
u32 hwstrap;
hwstrap = mt753x_reg_read(gsw, HWSTRAP);
switch ((hwstrap & XTAL_FSEL_M) >> XTAL_FSEL_S) {
case XTAL_40MHZ:
/* Disable MT7530 core clock */
mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, 0);
/* disable MT7530 PLL */
mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
(2 << GSWPLL_POSTDIV_200M_S) |
(32 << GSWPLL_FBKDIV_200M_S));
/* For MT7530 core clock = 500Mhz */
mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_2,
(1 << GSWPLL_POSTDIV_500M_S) |
(25 << GSWPLL_FBKDIV_500M_S));
/* Enable MT7530 PLL */
mt7530_core_reg_write(gsw, CORE_GSWPLL_GCR_1,
(2 << GSWPLL_POSTDIV_200M_S) |
(32 << GSWPLL_FBKDIV_200M_S) |
GSWPLL_EN_PRE);
usleep_range(20, 40);
/* Enable MT7530 core clock */
mt7530_core_reg_write(gsw, TRGMII_GSW_CLK_CG, GSWCK_EN);
break;
default:
/* TODO: PLL settings for 20/25MHz */
break;
}
hwstrap = mt753x_reg_read(gsw, HWSTRAP);
hwstrap |= CHG_TRAP;
if (gsw->direct_phy_access)
hwstrap &= ~C_MDIO_BPS_S;
else
hwstrap |= C_MDIO_BPS_S;
mt753x_reg_write(gsw, MHWSTRAP, hwstrap);
if (gsw->port6_cfg.enabled &&
gsw->port6_cfg.phy_mode == PHY_INTERFACE_MODE_TRGMII) {
mt7530_trgmii_setting(gsw);
} else {
/* RGMII */
mt7530_rgmii_setting(gsw);
}
/* delay setting for 10/1000M */
mt753x_reg_write(gsw, P5RGMIIRXCR,
CSR_RGMII_EDGE_ALIGN |
(2 << CSR_RGMII_RXC_0DEG_CFG_S));
mt753x_reg_write(gsw, P5RGMIITXCR, 0x14 << CSR_RGMII_TXC_CFG_S);
}
static int mt7530_sw_detect(struct gsw_mt753x *gsw, struct chip_rev *crev)
{
u32 rev;
rev = mt753x_reg_read(gsw, CHIP_REV);
if (((rev & CHIP_NAME_M) >> CHIP_NAME_S) == MT7530) {
if (crev) {
crev->rev = rev & CHIP_REV_M;
crev->name = "MT7530";
}
return 0;
}
return -ENODEV;
}
static void mt7530_phy_setting(struct gsw_mt753x *gsw)
{
int i;
u32 val;
for (i = 0; i < MT753X_NUM_PHYS; i++) {
/* Disable EEE */
gsw->mmd_write(gsw, i, PHY_DEV07, PHY_DEV07_REG_03C, 0);
/* Enable HW auto downshift */
gsw->mii_write(gsw, i, 0x1f, 0x1);
val = gsw->mii_read(gsw, i, PHY_EXT_REG_14);
val |= PHY_EN_DOWN_SHFIT;
gsw->mii_write(gsw, i, PHY_EXT_REG_14, val);
/* Increase SlvDPSready time */
gsw->mii_write(gsw, i, 0x1f, 0x52b5);
gsw->mii_write(gsw, i, PHY_TR_REG_10, 0xafae);
gsw->mii_write(gsw, i, PHY_TR_REG_12, 0x2f);
gsw->mii_write(gsw, i, PHY_TR_REG_10, 0x8fae);
/* Increase post_update_timer */
gsw->mii_write(gsw, i, 0x1f, 0x3);
gsw->mii_write(gsw, i, PHY_LPI_REG_11, 0x4b);
gsw->mii_write(gsw, i, 0x1f, 0);
/* Adjust 100_mse_threshold */
gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
/* Disable mcc */
gsw->mmd_write(gsw, i, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
}
}
static inline bool get_phy_access_mode(const struct device_node *np)
{
return of_property_read_bool(np, "mt7530,direct-phy-access");
}
static int mt7530_sw_init(struct gsw_mt753x *gsw)
{
int i;
u32 val;
gsw->direct_phy_access = get_phy_access_mode(gsw->dev->of_node);
/* Force MT7530 to use (in)direct PHY access */
val = mt753x_reg_read(gsw, HWSTRAP);
val |= CHG_TRAP;
if (gsw->direct_phy_access)
val &= ~C_MDIO_BPS_S;
else
val |= C_MDIO_BPS_S;
mt753x_reg_write(gsw, MHWSTRAP, val);
/* Read PHY address base from HWSTRAP */
gsw->phy_base = (((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3) + 8;
gsw->phy_base &= MT753X_SMI_ADDR_MASK;
if (gsw->direct_phy_access) {
gsw->mii_read = mt7530_mii_read;
gsw->mii_write = mt7530_mii_write;
gsw->mmd_read = mt7530_mmd_read;
gsw->mmd_write = mt7530_mmd_write;
} else {
gsw->mii_read = mt753x_mii_read;
gsw->mii_write = mt753x_mii_write;
gsw->mmd_read = mt753x_mmd_ind_read;
gsw->mmd_write = mt753x_mmd_ind_write;
}
for (i = 0; i < MT753X_NUM_PHYS; i++) {
val = gsw->mii_read(gsw, i, MII_BMCR);
val |= BMCR_PDOWN;
gsw->mii_write(gsw, i, MII_BMCR, val);
}
/* Force MAC link down before reset */
mt753x_reg_write(gsw, PMCR(5), FORCE_MODE);
mt753x_reg_write(gsw, PMCR(6), FORCE_MODE);
/* Switch soft reset */
/* BUG: sw reset causes gsw int flooding */
mt753x_reg_write(gsw, SYS_CTRL, SW_PHY_RST | SW_SYS_RST | SW_REG_RST);
usleep_range(10, 20);
/* global mac control settings configuration */
mt753x_reg_write(gsw, GMACCR,
LATE_COL_DROP | (15 << MTCC_LMT_S) |
(2 << MAX_RX_JUMBO_S) | RX_PKT_LEN_MAX_JUMBO);
/* Output INTR selected */
val = mt753x_reg_read(gsw, TOP_SIG_CTRL);
val &= ~OUTPUT_INTR_M;
val |= (3 << OUTPUT_INTR_S);
mt753x_reg_write(gsw, TOP_SIG_CTRL, val);
mt7530_core_pll_setup(gsw);
mt7530_mac_port_setup(gsw);
return 0;
}
static int mt7530_sw_post_init(struct gsw_mt753x *gsw)
{
int i;
u32 val;
mt7530_phy_setting(gsw);
for (i = 0; i < MT753X_NUM_PHYS; i++) {
val = gsw->mii_read(gsw, i, MII_BMCR);
val &= ~BMCR_PDOWN;
gsw->mii_write(gsw, i, MII_BMCR, val);
}
return 0;
}
struct mt753x_sw_id mt7530_id = {
.model = MT7530,
.detect = mt7530_sw_detect,
.init = mt7530_sw_init,
.post_init = mt7530_sw_post_init
};

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 MediaTek Inc.
*/
#ifndef _MT7530_H_
#define _MT7530_H_
#include "mt753x.h"
extern struct mt753x_sw_id mt7530_id;
#endif /* _MT7530_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 MediaTek Inc.
*/
#ifndef _MT7531_H_
#define _MT7531_H_
#include "mt753x.h"
extern struct mt753x_sw_id mt7531_id;
#endif /* _MT7531_H_ */

View File

@ -0,0 +1,224 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MT753X_H_
#define _MT753X_H_
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/workqueue.h>
#include <linux/gpio/consumer.h>
#ifdef CONFIG_SWCONFIG
#include <linux/switch.h>
#endif
#include "mt753x_vlan.h"
#define MT753X_DFL_CPU_PORT 6
#define MT753X_NUM_PHYS 5
#define MT753X_DFL_SMI_ADDR 0x1f
#define MT753X_SMI_ADDR_MASK 0x1f
struct gsw_mt753x;
enum mt753x_model {
MT7530 = 0x7530,
MT7531 = 0x7531
};
struct mt753x_port_cfg {
struct device_node *np;
int phy_mode;
u32 enabled: 1;
u32 force_link: 1;
u32 speed: 2;
u32 duplex: 1;
bool ssc_on;
bool stag_on;
};
struct mt753x_phy {
struct gsw_mt753x *gsw;
struct net_device netdev;
struct phy_device *phydev;
};
struct gsw_mt753x {
u32 id;
struct device *dev;
struct mii_bus *host_bus;
struct mii_bus *gphy_bus;
struct mutex mii_lock; /* MII access lock */
u32 smi_addr;
u32 phy_base;
int direct_phy_access;
enum mt753x_model model;
const char *name;
struct mt753x_port_cfg port5_cfg;
struct mt753x_port_cfg port6_cfg;
bool hw_phy_cal;
bool phy_status_poll;
struct mt753x_phy phys[MT753X_NUM_PHYS];
// int phy_irqs[PHY_MAX_ADDR]; //FIXME
int phy_link_sts;
int irq;
int reset_pin;
struct work_struct irq_worker;
#ifdef CONFIG_SWCONFIG
struct switch_dev swdev;
u32 cpu_port;
#endif
int global_vlan_enable;
struct mt753x_vlan_entry vlan_entries[MT753X_NUM_VLANS];
struct mt753x_port_entry port_entries[MT753X_NUM_PORTS];
int (*mii_read)(struct gsw_mt753x *gsw, int phy, int reg);
void (*mii_write)(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
int (*mmd_read)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
void (*mmd_write)(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
u16 val);
struct list_head list;
};
struct chip_rev {
const char *name;
u32 rev;
};
struct mt753x_sw_id {
enum mt753x_model model;
int (*detect)(struct gsw_mt753x *gsw, struct chip_rev *crev);
int (*init)(struct gsw_mt753x *gsw);
int (*post_init)(struct gsw_mt753x *gsw);
};
extern struct list_head mt753x_devs;
struct gsw_mt753x *mt753x_get_gsw(u32 id);
struct gsw_mt753x *mt753x_get_first_gsw(void);
void mt753x_put_gsw(void);
void mt753x_lock_gsw(void);
u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg);
void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val);
int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg);
void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val);
int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
u16 val);
int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg);
void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
u16 val);
int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr);
void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
u32 data);
void mt753x_irq_worker(struct work_struct *work);
void mt753x_irq_enable(struct gsw_mt753x *gsw);
int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
int extphy_init(struct gsw_mt753x *gsw, int addr);
/* MDIO Indirect Access Registers */
#define MII_MMD_ACC_CTL_REG 0x0d
#define MMD_CMD_S 14
#define MMD_CMD_M 0xc000
#define MMD_DEVAD_S 0
#define MMD_DEVAD_M 0x1f
/* MMD_CMD: MMD commands */
#define MMD_ADDR 0
#define MMD_DATA 1
#define MII_MMD_ADDR_DATA_REG 0x0e
/* Procedure of MT753x Internal Register Access
*
* 1. Internal Register Address
*
* The MT753x has a 16-bit register address and each register is 32-bit.
* This means the lowest two bits are not used as the register address is
* 4-byte aligned.
*
* Rest of the valid bits are divided into two parts:
* Bit 15..6 is the Page address
* Bit 5..2 is the low address
*
* -------------------------------------------------------------------
* | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
* |----------------------------------------|---------------|--------|
* | Page Address | Address | Unused |
* -------------------------------------------------------------------
*
* 2. MDIO access timing
*
* The MT753x uses the following MDIO timing for a single register read
*
* Phase 1: Write Page Address
* -------------------------------------------------------------------
* | ST | OP | PHY_ADDR | TYPE | RSVD | TA | RSVD | PAGE_ADDR |
* -------------------------------------------------------------------
* | 01 | 01 | 11111 | 1 | 1111 | xx | 00000 | REG_ADDR[15..6] |
* -------------------------------------------------------------------
*
* Phase 2: Write low Address & Read low word
* -------------------------------------------------------------------
* | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA |
* -------------------------------------------------------------------
* | 01 | 10 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] |
* -------------------------------------------------------------------
*
* Phase 3: Read high word
* -------------------------------------------------------------------
* | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA |
* -------------------------------------------------------------------
* | 01 | 10 | 11111 | 1 | 0000 | xx | DATA[31..16] |
* -------------------------------------------------------------------
*
* The MT753x uses the following MDIO timing for a single register write
*
* Phase 1: Write Page Address (The same as read)
*
* Phase 2: Write low Address and low word
* -------------------------------------------------------------------
* | ST | OP | PHY_ADDR | TYPE | LOW_ADDR | TA | DATA |
* -------------------------------------------------------------------
* | 01 | 01 | 11111 | 0 | REG_ADDR[5..2] | xx | DATA[15..0] |
* -------------------------------------------------------------------
*
* Phase 3: write high word
* -------------------------------------------------------------------
* | ST | OP | PHY_ADDR | TYPE | RSVD | TA | DATA |
* -------------------------------------------------------------------
* | 01 | 01 | 11111 | 1 | 0000 | xx | DATA[31..16] |
* -------------------------------------------------------------------
*
*/
/* Internal Register Address fields */
#define MT753X_REG_PAGE_ADDR_S 6
#define MT753X_REG_PAGE_ADDR_M 0xffc0
#define MT753X_REG_ADDR_S 2
#define MT753X_REG_ADDR_M 0x3c
#endif /* _MT753X_H_ */

View File

@ -0,0 +1,90 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include "mt753x.h"
#include "mt753x_regs.h"
void mt753x_irq_enable(struct gsw_mt753x *gsw)
{
u32 val;
int i;
/* Record initial PHY link status */
for (i = 0; i < MT753X_NUM_PHYS; i++) {
val = gsw->mii_read(gsw, i, MII_BMSR);
if (val & BMSR_LSTATUS)
gsw->phy_link_sts |= BIT(i);
}
val = BIT(MT753X_NUM_PHYS) - 1;
mt753x_reg_write(gsw, SYS_INT_EN, val);
}
static void display_port_link_status(struct gsw_mt753x *gsw, u32 port)
{
u32 pmsr, speed_bits;
const char *speed;
pmsr = mt753x_reg_read(gsw, PMSR(port));
speed_bits = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S;
switch (speed_bits) {
case MAC_SPD_10:
speed = "10Mbps";
break;
case MAC_SPD_100:
speed = "100Mbps";
break;
case MAC_SPD_1000:
speed = "1Gbps";
break;
case MAC_SPD_2500:
speed = "2.5Gbps";
break;
}
if (pmsr & MAC_LNK_STS) {
dev_info(gsw->dev, "Port %d Link is Up - %s/%s\n",
port, speed, (pmsr & MAC_DPX_STS) ? "Full" : "Half");
} else {
dev_info(gsw->dev, "Port %d Link is Down\n", port);
}
}
void mt753x_irq_worker(struct work_struct *work)
{
struct gsw_mt753x *gsw;
u32 sts, physts, laststs;
int i;
gsw = container_of(work, struct gsw_mt753x, irq_worker);
sts = mt753x_reg_read(gsw, SYS_INT_STS);
/* Check for changed PHY link status */
for (i = 0; i < MT753X_NUM_PHYS; i++) {
if (!(sts & PHY_LC_INT(i)))
continue;
laststs = gsw->phy_link_sts & BIT(i);
physts = !!(gsw->mii_read(gsw, i, MII_BMSR) & BMSR_LSTATUS);
physts <<= i;
if (physts ^ laststs) {
gsw->phy_link_sts ^= BIT(i);
display_port_link_status(gsw, i);
}
}
mt753x_reg_write(gsw, SYS_INT_STS, sts);
enable_irq(gsw->irq);
}

View File

@ -0,0 +1,861 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reset.h>
#include <linux/hrtimer.h>
#include <linux/mii.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/of_irq.h>
#include <linux/phy.h>
#include "mt753x.h"
#include "mt753x_swconfig.h"
#include "mt753x_regs.h"
#include "mt753x_nl.h"
#include "mt7530.h"
#include "mt7531.h"
static u32 mt753x_id;
struct list_head mt753x_devs;
static DEFINE_MUTEX(mt753x_devs_lock);
static struct mt753x_sw_id *mt753x_sw_ids[] = {
&mt7530_id,
&mt7531_id,
};
u32 mt753x_reg_read(struct gsw_mt753x *gsw, u32 reg)
{
u32 high, low;
mutex_lock(&gsw->host_bus->mdio_lock);
gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
(reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
low = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr,
(reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S);
high = gsw->host_bus->read(gsw->host_bus, gsw->smi_addr, 0x10);
mutex_unlock(&gsw->host_bus->mdio_lock);
return (high << 16) | (low & 0xffff);
}
void mt753x_reg_write(struct gsw_mt753x *gsw, u32 reg, u32 val)
{
mutex_lock(&gsw->host_bus->mdio_lock);
gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x1f,
(reg & MT753X_REG_PAGE_ADDR_M) >> MT753X_REG_PAGE_ADDR_S);
gsw->host_bus->write(gsw->host_bus, gsw->smi_addr,
(reg & MT753X_REG_ADDR_M) >> MT753X_REG_ADDR_S, val & 0xffff);
gsw->host_bus->write(gsw->host_bus, gsw->smi_addr, 0x10, val >> 16);
mutex_unlock(&gsw->host_bus->mdio_lock);
}
/* Indirect MDIO clause 22/45 access */
static int mt753x_mii_rw(struct gsw_mt753x *gsw, int phy, int reg, u16 data,
u32 cmd, u32 st)
{
ktime_t timeout;
u32 val, timeout_us;
int ret = 0;
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = mt753x_reg_read(gsw, PHY_IAC);
if ((val & PHY_ACS_ST) == 0)
break;
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
}
val = (st << MDIO_ST_S) |
((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
val |= data & MDIO_RW_DATA_M;
mt753x_reg_write(gsw, PHY_IAC, val | PHY_ACS_ST);
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = mt753x_reg_read(gsw, PHY_IAC);
if ((val & PHY_ACS_ST) == 0)
break;
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
}
if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
val = mt753x_reg_read(gsw, PHY_IAC);
ret = val & MDIO_RW_DATA_M;
}
return ret;
}
int mt753x_mii_read(struct gsw_mt753x *gsw, int phy, int reg)
{
int val;
if (phy < MT753X_NUM_PHYS)
phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
val = mt753x_mii_rw(gsw, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
mutex_unlock(&gsw->mii_lock);
return val;
}
void mt753x_mii_write(struct gsw_mt753x *gsw, int phy, int reg, u16 val)
{
if (phy < MT753X_NUM_PHYS)
phy = (gsw->phy_base + phy) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
mt753x_mii_rw(gsw, phy, reg, val, MDIO_CMD_WRITE, MDIO_ST_C22);
mutex_unlock(&gsw->mii_lock);
}
int mt753x_mmd_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
{
int val;
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
val = mt753x_mii_rw(gsw, addr, devad, 0, MDIO_CMD_READ_C45,
MDIO_ST_C45);
mutex_unlock(&gsw->mii_lock);
return val;
}
void mt753x_mmd_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
u16 val)
{
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
mt753x_mii_rw(gsw, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
mt753x_mii_rw(gsw, addr, devad, val, MDIO_CMD_WRITE, MDIO_ST_C45);
mutex_unlock(&gsw->mii_lock);
}
int mt753x_mmd_ind_read(struct gsw_mt753x *gsw, int addr, int devad, u16 reg)
{
u16 val;
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
(MMD_ADDR << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
MDIO_CMD_WRITE, MDIO_ST_C22);
mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
MDIO_CMD_WRITE, MDIO_ST_C22);
mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
(MMD_DATA << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
MDIO_CMD_WRITE, MDIO_ST_C22);
val = mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, 0,
MDIO_CMD_READ, MDIO_ST_C22);
mutex_unlock(&gsw->mii_lock);
return val;
}
void mt753x_mmd_ind_write(struct gsw_mt753x *gsw, int addr, int devad, u16 reg,
u16 val)
{
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
mutex_lock(&gsw->mii_lock);
mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
(MMD_ADDR << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
MDIO_CMD_WRITE, MDIO_ST_C22);
mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, reg,
MDIO_CMD_WRITE, MDIO_ST_C22);
mt753x_mii_rw(gsw, addr, MII_MMD_ACC_CTL_REG,
(MMD_DATA << MMD_CMD_S) |
((devad << MMD_DEVAD_S) & MMD_DEVAD_M),
MDIO_CMD_WRITE, MDIO_ST_C22);
mt753x_mii_rw(gsw, addr, MII_MMD_ADDR_DATA_REG, val,
MDIO_CMD_WRITE, MDIO_ST_C22);
mutex_unlock(&gsw->mii_lock);
}
static inline int mt753x_get_duplex(const struct device_node *np)
{
return of_property_read_bool(np, "full-duplex");
}
static void mt753x_load_port_cfg(struct gsw_mt753x *gsw)
{
struct device_node *port_np;
struct device_node *fixed_link_node;
struct mt753x_port_cfg *port_cfg;
u32 port;
for_each_child_of_node(gsw->dev->of_node, port_np) {
if (!of_device_is_compatible(port_np, "mediatek,mt753x-port"))
continue;
if (!of_device_is_available(port_np))
continue;
if (of_property_read_u32(port_np, "reg", &port))
continue;
switch (port) {
case 5:
port_cfg = &gsw->port5_cfg;
break;
case 6:
port_cfg = &gsw->port6_cfg;
break;
default:
continue;
}
if (port_cfg->enabled) {
dev_info(gsw->dev, "duplicated node for port%d\n",
port_cfg->phy_mode);
continue;
}
port_cfg->np = port_np;
port_cfg->phy_mode = of_get_phy_mode(port_np);
if (port_cfg->phy_mode < 0) {
dev_info(gsw->dev, "incorrect phy-mode %d\n", port);
continue;
}
fixed_link_node = of_get_child_by_name(port_np, "fixed-link");
if (fixed_link_node) {
u32 speed;
port_cfg->force_link = 1;
port_cfg->duplex = mt753x_get_duplex(fixed_link_node);
if (of_property_read_u32(fixed_link_node, "speed",
&speed)) {
speed = 0;
continue;
}
of_node_put(fixed_link_node);
switch (speed) {
case 10:
port_cfg->speed = MAC_SPD_10;
break;
case 100:
port_cfg->speed = MAC_SPD_100;
break;
case 1000:
port_cfg->speed = MAC_SPD_1000;
break;
case 2500:
port_cfg->speed = MAC_SPD_2500;
break;
default:
dev_info(gsw->dev, "incorrect speed %d\n",
speed);
continue;
}
}
port_cfg->ssc_on = of_property_read_bool(port_cfg->np,
"mediatek,ssc-on");
port_cfg->stag_on = of_property_read_bool(port_cfg->np,
"mediatek,stag-on");
port_cfg->enabled = 1;
}
}
void mt753x_tr_write(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr,
u32 data)
{
ktime_t timeout;
u32 timeout_us;
u32 val;
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
if (!!(val & PHY_TR_PKT_XMT_STA))
break;
if (ktime_compare(ktime_get(), timeout) > 0)
goto out;
}
gsw->mii_write(gsw, addr, PHY_TR_LOW_DATA, PHY_TR_LOW_VAL(data));
gsw->mii_write(gsw, addr, PHY_TR_HIGH_DATA, PHY_TR_HIGH_VAL(data));
val = PHY_TR_PKT_XMT_STA | (PHY_TR_WRITE << PHY_TR_WR_S) |
(ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
(daddr << PHY_TR_DATA_ADDR_S);
gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
if (!!(val & PHY_TR_PKT_XMT_STA))
break;
if (ktime_compare(ktime_get(), timeout) > 0)
goto out;
}
out:
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
}
int mt753x_tr_read(struct gsw_mt753x *gsw, int addr, u8 ch, u8 node, u8 daddr)
{
ktime_t timeout;
u32 timeout_us;
u32 val;
u8 val_h;
if (addr < MT753X_NUM_PHYS)
addr = (gsw->phy_base + addr) & MT753X_SMI_ADDR_MASK;
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
if (!!(val & PHY_TR_PKT_XMT_STA))
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
return -ETIMEDOUT;
}
}
val = PHY_TR_PKT_XMT_STA | (PHY_TR_READ << PHY_TR_WR_S) |
(ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
(daddr << PHY_TR_DATA_ADDR_S);
gsw->mii_write(gsw, addr, PHY_TR_CTRL, val);
timeout_us = 100000;
timeout = ktime_add_us(ktime_get(), timeout_us);
while (1) {
val = gsw->mii_read(gsw, addr, PHY_TR_CTRL);
if (!!(val & PHY_TR_PKT_XMT_STA))
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
return -ETIMEDOUT;
}
}
val = gsw->mii_read(gsw, addr, PHY_TR_LOW_DATA);
val_h = gsw->mii_read(gsw, addr, PHY_TR_HIGH_DATA);
val |= (val_h << 16);
gsw->mii_write(gsw, addr, PHY_CL22_PAGE_CTRL, 0);
return val;
}
static void mt753x_add_gsw(struct gsw_mt753x *gsw)
{
mutex_lock(&mt753x_devs_lock);
gsw->id = mt753x_id++;
INIT_LIST_HEAD(&gsw->list);
list_add_tail(&gsw->list, &mt753x_devs);
mutex_unlock(&mt753x_devs_lock);
}
static void mt753x_remove_gsw(struct gsw_mt753x *gsw)
{
mutex_lock(&mt753x_devs_lock);
list_del(&gsw->list);
mutex_unlock(&mt753x_devs_lock);
}
struct gsw_mt753x *mt753x_get_gsw(u32 id)
{
struct gsw_mt753x *dev;
mutex_lock(&mt753x_devs_lock);
list_for_each_entry(dev, &mt753x_devs, list) {
if (dev->id == id)
return dev;
}
mutex_unlock(&mt753x_devs_lock);
return NULL;
}
struct gsw_mt753x *mt753x_get_first_gsw(void)
{
struct gsw_mt753x *dev;
mutex_lock(&mt753x_devs_lock);
list_for_each_entry(dev, &mt753x_devs, list)
return dev;
mutex_unlock(&mt753x_devs_lock);
return NULL;
}
void mt753x_put_gsw(void)
{
mutex_unlock(&mt753x_devs_lock);
}
void mt753x_lock_gsw(void)
{
mutex_lock(&mt753x_devs_lock);
}
static int mt753x_hw_reset(struct gsw_mt753x *gsw)
{
struct device_node *np = gsw->dev->of_node;
struct reset_control *rstc;
int mcm;
int ret = -EINVAL;
mcm = of_property_read_bool(np, "mediatek,mcm");
if (mcm) {
rstc = devm_reset_control_get(gsw->dev, "mcm");
ret = IS_ERR(rstc);
if (IS_ERR(rstc)) {
dev_err(gsw->dev, "Missing reset ctrl of switch\n");
return ret;
}
reset_control_assert(rstc);
msleep(30);
reset_control_deassert(rstc);
gsw->reset_pin = -1;
return 0;
}
gsw->reset_pin = of_get_named_gpio(np, "reset-gpios", 0);
if (gsw->reset_pin < 0) {
dev_err(gsw->dev, "Missing reset pin of switch\n");
return ret;
}
ret = devm_gpio_request(gsw->dev, gsw->reset_pin, "mt753x-reset");
if (ret) {
dev_info(gsw->dev, "Failed to request gpio %d\n",
gsw->reset_pin);
return ret;
}
gpio_direction_output(gsw->reset_pin, 0);
msleep(30);
gpio_set_value(gsw->reset_pin, 1);
msleep(500);
return 0;
}
#if 1 //XDXDXDXD
static int mt753x_mdio_read(struct mii_bus *bus, int addr, int reg)
{
struct gsw_mt753x *gsw = bus->priv;
return gsw->mii_read(gsw, addr, reg);
}
static int mt753x_mdio_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct gsw_mt753x *gsw = bus->priv;
gsw->mii_write(gsw, addr, reg, val);
return 0;
}
static const struct net_device_ops mt753x_dummy_netdev_ops = {
};
static void mt753x_phy_link_handler(struct net_device *dev)
{
struct mt753x_phy *phy = container_of(dev, struct mt753x_phy, netdev);
struct phy_device *phydev = phy->phydev;
struct gsw_mt753x *gsw = phy->gsw;
u32 port = phy - gsw->phys;
if (phydev->link) {
dev_info(gsw->dev,
"Port %d Link is Up - %s/%s - flow control %s\n",
port, phy_speed_to_str(phydev->speed),
(phydev->duplex == DUPLEX_FULL) ? "Full" : "Half",
phydev->pause ? "rx/tx" : "off");
} else {
dev_info(gsw->dev, "Port %d Link is Down\n", port);
}
}
static void mt753x_connect_internal_phys(struct gsw_mt753x *gsw,
struct device_node *mii_np)
{
struct device_node *phy_np;
struct mt753x_phy *phy;
int phy_mode;
u32 phyad;
if (!mii_np)
return;
for_each_child_of_node(mii_np, phy_np) {
if (of_property_read_u32(phy_np, "reg", &phyad))
continue;
if (phyad >= MT753X_NUM_PHYS)
continue;
phy_mode = of_get_phy_mode(phy_np);
if (phy_mode < 0) {
dev_info(gsw->dev, "incorrect phy-mode %d for PHY %d\n",
phy_mode, phyad);
continue;
}
phy = &gsw->phys[phyad];
phy->gsw = gsw;
init_dummy_netdev(&phy->netdev);
phy->netdev.netdev_ops = &mt753x_dummy_netdev_ops;
phy->phydev = of_phy_connect(&phy->netdev, phy_np,
mt753x_phy_link_handler, 0, phy_mode);
if (!phy->phydev) {
dev_info(gsw->dev, "could not connect to PHY %d\n",
phyad);
continue;
}
phy_start(phy->phydev);
}
}
static void mt753x_disconnect_internal_phys(struct gsw_mt753x *gsw)
{
int i;
for (i = 0; i < ARRAY_SIZE(gsw->phys); i++) {
if (gsw->phys[i].phydev) {
phy_stop(gsw->phys[i].phydev);
phy_disconnect(gsw->phys[i].phydev);
gsw->phys[i].phydev = NULL;
}
}
}
static int mt753x_mdio_register(struct gsw_mt753x *gsw)
{
struct device_node *mii_np;
int i, ret;
mii_np = of_get_child_by_name(gsw->dev->of_node, "mdio-bus");
if (mii_np && !of_device_is_available(mii_np)) {
ret = -ENODEV;
goto err_put_node;
}
gsw->gphy_bus = devm_mdiobus_alloc(gsw->dev);
if (!gsw->gphy_bus) {
ret = -ENOMEM;
goto err_put_node;
}
gsw->gphy_bus->name = "mt753x_mdio";
gsw->gphy_bus->read = mt753x_mdio_read;
gsw->gphy_bus->write = mt753x_mdio_write;
gsw->gphy_bus->priv = gsw;
gsw->gphy_bus->parent = gsw->dev;
gsw->gphy_bus->phy_mask = BIT(MT753X_NUM_PHYS) - 1;
// gsw->gphy_bus->irq = gsw->phy_irqs;
for (i = 0; i < PHY_MAX_ADDR; i++)
gsw->gphy_bus->irq[i] = PHY_POLL;
if (mii_np)
snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "%s@%s",
mii_np->name, gsw->dev->of_node->name);
else
snprintf(gsw->gphy_bus->id, MII_BUS_ID_SIZE, "mdio@%s",
gsw->dev->of_node->name);
ret = of_mdiobus_register(gsw->gphy_bus, mii_np);
if (ret) {
devm_mdiobus_free(gsw->dev, gsw->gphy_bus);
gsw->gphy_bus = NULL;
} else {
if (gsw->phy_status_poll)
mt753x_connect_internal_phys(gsw, mii_np);
}
err_put_node:
if (mii_np)
of_node_put(mii_np);
return ret;
}
#endif
static irqreturn_t mt753x_irq_handler(int irq, void *dev)
{
struct gsw_mt753x *gsw = dev;
disable_irq_nosync(gsw->irq);
schedule_work(&gsw->irq_worker);
return IRQ_HANDLED;
}
static int mt753x_probe(struct platform_device *pdev)
{
struct gsw_mt753x *gsw;
struct mt753x_sw_id *sw;
struct device_node *np = pdev->dev.of_node;
struct device_node *mdio;
struct mii_bus *mdio_bus;
int ret = -EINVAL;
struct chip_rev rev;
struct mt753x_mapping *map;
int i;
mdio = of_parse_phandle(np, "mediatek,mdio", 0);
if (!mdio)
return -EINVAL;
mdio_bus = of_mdio_find_bus(mdio);
if (!mdio_bus)
return -EPROBE_DEFER;
gsw = devm_kzalloc(&pdev->dev, sizeof(struct gsw_mt753x), GFP_KERNEL);
if (!gsw)
return -ENOMEM;
gsw->host_bus = mdio_bus;
gsw->dev = &pdev->dev;
mutex_init(&gsw->mii_lock);
/* Switch hard reset */
if (mt753x_hw_reset(gsw))
goto fail;
/* Fetch the SMI address dirst */
if (of_property_read_u32(np, "mediatek,smi-addr", &gsw->smi_addr))
gsw->smi_addr = MT753X_DFL_SMI_ADDR;
/* Get LAN/WAN port mapping */
map = mt753x_find_mapping(np);
if (map) {
mt753x_apply_mapping(gsw, map);
gsw->global_vlan_enable = 1;
dev_info(gsw->dev, "LAN/WAN VLAN setting=%s\n", map->name);
}
/* Load MAC port configurations */
mt753x_load_port_cfg(gsw);
/* Check for valid switch and then initialize */
for (i = 0; i < ARRAY_SIZE(mt753x_sw_ids); i++) {
if (!mt753x_sw_ids[i]->detect(gsw, &rev)) {
sw = mt753x_sw_ids[i];
gsw->name = rev.name;
gsw->model = sw->model;
dev_info(gsw->dev, "Switch is MediaTek %s rev %d",
gsw->name, rev.rev);
/* Initialize the switch */
ret = sw->init(gsw);
if (ret)
goto fail;
break;
}
}
if (i >= ARRAY_SIZE(mt753x_sw_ids)) {
dev_err(gsw->dev, "No mt753x switch found\n");
goto fail;
}
gsw->irq = platform_get_irq(pdev, 0);
if (gsw->irq >= 0) {
ret = devm_request_irq(gsw->dev, gsw->irq, mt753x_irq_handler,
0, dev_name(gsw->dev), gsw);
if (ret) {
dev_err(gsw->dev, "Failed to request irq %d\n",
gsw->irq);
goto fail;
}
INIT_WORK(&gsw->irq_worker, mt753x_irq_worker);
}
platform_set_drvdata(pdev, gsw);
gsw->phy_status_poll = of_property_read_bool(gsw->dev->of_node,
"mediatek,phy-poll");
mt753x_add_gsw(gsw);
#if 1 //XDXD
mt753x_mdio_register(gsw);
#endif
mt753x_swconfig_init(gsw);
if (sw->post_init)
sw->post_init(gsw);
if (gsw->irq >= 0)
mt753x_irq_enable(gsw);
return 0;
fail:
devm_kfree(&pdev->dev, gsw);
return ret;
}
static int mt753x_remove(struct platform_device *pdev)
{
struct gsw_mt753x *gsw = platform_get_drvdata(pdev);
if (gsw->irq >= 0)
cancel_work_sync(&gsw->irq_worker);
if (gsw->reset_pin >= 0)
devm_gpio_free(&pdev->dev, gsw->reset_pin);
#ifdef CONFIG_SWCONFIG
mt753x_swconfig_destroy(gsw);
#endif
#if 1 //XDXD
mt753x_disconnect_internal_phys(gsw);
mdiobus_unregister(gsw->gphy_bus);
#endif
mt753x_remove_gsw(gsw);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id mt753x_ids[] = {
{ .compatible = "mediatek,mt753x" },
{ },
};
MODULE_DEVICE_TABLE(of, mt753x_ids);
static struct platform_driver mt753x_driver = {
.probe = mt753x_probe,
.remove = mt753x_remove,
.driver = {
.name = "mt753x",
.of_match_table = mt753x_ids,
},
};
static int __init mt753x_init(void)
{
int ret;
INIT_LIST_HEAD(&mt753x_devs);
ret = platform_driver_register(&mt753x_driver);
mt753x_nl_init();
return ret;
}
module_init(mt753x_init);
static void __exit mt753x_exit(void)
{
mt753x_nl_exit();
platform_driver_unregister(&mt753x_driver);
}
module_exit(mt753x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
MODULE_DESCRIPTION("Driver for MediaTek MT753x Gigabit Switch");

View File

@ -0,0 +1,381 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <net/genetlink.h>
#include "mt753x.h"
#include "mt753x_nl.h"
struct mt753x_nl_cmd_item {
enum mt753x_cmd cmd;
bool require_dev;
int (*process)(struct genl_info *info, struct gsw_mt753x *gsw);
u32 nr_required_attrs;
const enum mt753x_attr *required_attrs;
};
static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info);
static const struct nla_policy mt753x_nl_cmd_policy[] = {
[MT753X_ATTR_TYPE_MESG] = { .type = NLA_STRING },
[MT753X_ATTR_TYPE_PHY] = { .type = NLA_S32 },
[MT753X_ATTR_TYPE_REG] = { .type = NLA_S32 },
[MT753X_ATTR_TYPE_VAL] = { .type = NLA_S32 },
[MT753X_ATTR_TYPE_DEV_NAME] = { .type = NLA_S32 },
[MT753X_ATTR_TYPE_DEV_ID] = { .type = NLA_S32 },
[MT753X_ATTR_TYPE_DEVAD] = { .type = NLA_S32 },
};
static const struct genl_ops mt753x_nl_ops[] = {
{
.cmd = MT753X_CMD_REQUEST,
.doit = mt753x_nl_response,
// .policy = mt753x_nl_cmd_policy,
.flags = GENL_ADMIN_PERM,
}, {
.cmd = MT753X_CMD_READ,
.doit = mt753x_nl_response,
// .policy = mt753x_nl_cmd_policy,
.flags = GENL_ADMIN_PERM,
}, {
.cmd = MT753X_CMD_WRITE,
.doit = mt753x_nl_response,
// .policy = mt753x_nl_cmd_policy,
.flags = GENL_ADMIN_PERM,
},
};
static struct genl_family mt753x_nl_family = {
.name = MT753X_GENL_NAME,
.version = MT753X_GENL_VERSION,
.maxattr = MT753X_NR_ATTR_TYPE,
.ops = mt753x_nl_ops,
.n_ops = ARRAY_SIZE(mt753x_nl_ops),
.policy = mt753x_nl_cmd_policy,
};
static int mt753x_nl_list_devs(char *buff, int size)
{
struct gsw_mt753x *gsw;
int len, total = 0;
char buf[80];
memset(buff, 0, size);
mt753x_lock_gsw();
list_for_each_entry(gsw, &mt753x_devs, list) {
len = snprintf(buf, sizeof(buf),
"id: %d, model: %s, node: %s\n",
gsw->id, gsw->name, gsw->dev->of_node->name);
strncat(buff, buf, size - total);
total += len;
}
mt753x_put_gsw();
return total;
}
static int mt753x_nl_prepare_reply(struct genl_info *info, u8 cmd,
struct sk_buff **skbp)
{
struct sk_buff *msg;
void *reply;
if (!info)
return -EINVAL;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* Construct send-back message header */
reply = genlmsg_put(msg, info->snd_portid, info->snd_seq,
&mt753x_nl_family, 0, cmd);
if (!reply) {
nlmsg_free(msg);
return -EINVAL;
}
*skbp = msg;
return 0;
}
static int mt753x_nl_send_reply(struct sk_buff *skb, struct genl_info *info)
{
struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
void *reply = genlmsg_data(genlhdr);
/* Finalize a generic netlink message (update message header) */
genlmsg_end(skb, reply);
/* reply to a request */
return genlmsg_reply(skb, info);
}
static s32 mt753x_nl_get_s32(struct genl_info *info, enum mt753x_attr attr,
s32 defval)
{
struct nlattr *na;
na = info->attrs[attr];
if (na)
return nla_get_s32(na);
return defval;
}
static int mt753x_nl_get_u32(struct genl_info *info, enum mt753x_attr attr,
u32 *val)
{
struct nlattr *na;
na = info->attrs[attr];
if (na) {
*val = nla_get_u32(na);
return 0;
}
return -1;
}
static struct gsw_mt753x *mt753x_nl_parse_find_gsw(struct genl_info *info)
{
struct gsw_mt753x *gsw;
struct nlattr *na;
int gsw_id;
na = info->attrs[MT753X_ATTR_TYPE_DEV_ID];
if (na) {
gsw_id = nla_get_s32(na);
if (gsw_id >= 0)
gsw = mt753x_get_gsw(gsw_id);
else
gsw = mt753x_get_first_gsw();
} else {
gsw = mt753x_get_first_gsw();
}
return gsw;
}
static int mt753x_nl_get_swdevs(struct genl_info *info, struct gsw_mt753x *gsw)
{
struct sk_buff *rep_skb = NULL;
char dev_info[512];
int ret;
ret = mt753x_nl_list_devs(dev_info, sizeof(dev_info));
if (!ret) {
pr_info("No switch registered\n");
return -EINVAL;
}
ret = mt753x_nl_prepare_reply(info, MT753X_CMD_REPLY, &rep_skb);
if (ret < 0)
goto err;
ret = nla_put_string(rep_skb, MT753X_ATTR_TYPE_MESG, dev_info);
if (ret < 0)
goto err;
return mt753x_nl_send_reply(rep_skb, info);
err:
if (rep_skb)
nlmsg_free(rep_skb);
return ret;
}
static int mt753x_nl_reply_read(struct genl_info *info, struct gsw_mt753x *gsw)
{
struct sk_buff *rep_skb = NULL;
s32 phy, devad, reg;
int value;
int ret = 0;
phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
if (reg < 0)
goto err;
ret = mt753x_nl_prepare_reply(info, MT753X_CMD_READ, &rep_skb);
if (ret < 0)
goto err;
if (phy >= 0) {
if (devad < 0)
value = gsw->mii_read(gsw, phy, reg);
else
value = gsw->mmd_read(gsw, phy, devad, reg);
} else {
value = mt753x_reg_read(gsw, reg);
}
ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
if (ret < 0)
goto err;
ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
if (ret < 0)
goto err;
return mt753x_nl_send_reply(rep_skb, info);
err:
if (rep_skb)
nlmsg_free(rep_skb);
return ret;
}
static int mt753x_nl_reply_write(struct genl_info *info, struct gsw_mt753x *gsw)
{
struct sk_buff *rep_skb = NULL;
s32 phy, devad, reg;
u32 value;
int ret = 0;
phy = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_PHY, -1);
devad = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_DEVAD, -1);
reg = mt753x_nl_get_s32(info, MT753X_ATTR_TYPE_REG, -1);
if (mt753x_nl_get_u32(info, MT753X_ATTR_TYPE_VAL, &value))
goto err;
if (reg < 0)
goto err;
ret = mt753x_nl_prepare_reply(info, MT753X_CMD_WRITE, &rep_skb);
if (ret < 0)
goto err;
if (phy >= 0) {
if (devad < 0)
gsw->mii_write(gsw, phy, reg, value);
else
gsw->mmd_write(gsw, phy, devad, reg, value);
} else {
mt753x_reg_write(gsw, reg, value);
}
ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_REG, reg);
if (ret < 0)
goto err;
ret = nla_put_s32(rep_skb, MT753X_ATTR_TYPE_VAL, value);
if (ret < 0)
goto err;
return mt753x_nl_send_reply(rep_skb, info);
err:
if (rep_skb)
nlmsg_free(rep_skb);
return ret;
}
static const enum mt753x_attr mt753x_nl_cmd_read_attrs[] = {
MT753X_ATTR_TYPE_REG
};
static const enum mt753x_attr mt753x_nl_cmd_write_attrs[] = {
MT753X_ATTR_TYPE_REG,
MT753X_ATTR_TYPE_VAL
};
static const struct mt753x_nl_cmd_item mt753x_nl_cmds[] = {
{
.cmd = MT753X_CMD_REQUEST,
.require_dev = false,
.process = mt753x_nl_get_swdevs
}, {
.cmd = MT753X_CMD_READ,
.require_dev = true,
.process = mt753x_nl_reply_read,
.required_attrs = mt753x_nl_cmd_read_attrs,
.nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_read_attrs),
}, {
.cmd = MT753X_CMD_WRITE,
.require_dev = true,
.process = mt753x_nl_reply_write,
.required_attrs = mt753x_nl_cmd_write_attrs,
.nr_required_attrs = ARRAY_SIZE(mt753x_nl_cmd_write_attrs),
}
};
static int mt753x_nl_response(struct sk_buff *skb, struct genl_info *info)
{
struct genlmsghdr *hdr = nlmsg_data(info->nlhdr);
const struct mt753x_nl_cmd_item *cmditem = NULL;
struct gsw_mt753x *gsw = NULL;
u32 sat_req_attrs = 0;
int i, ret;
for (i = 0; i < ARRAY_SIZE(mt753x_nl_cmds); i++) {
if (hdr->cmd == mt753x_nl_cmds[i].cmd) {
cmditem = &mt753x_nl_cmds[i];
break;
}
}
if (!cmditem) {
pr_info("mt753x-nl: unknown cmd %u\n", hdr->cmd);
return -EINVAL;
}
for (i = 0; i < cmditem->nr_required_attrs; i++) {
if (info->attrs[cmditem->required_attrs[i]])
sat_req_attrs++;
}
if (sat_req_attrs != cmditem->nr_required_attrs) {
pr_info("mt753x-nl: missing required attr(s) for cmd %u\n",
hdr->cmd);
return -EINVAL;
}
if (cmditem->require_dev) {
gsw = mt753x_nl_parse_find_gsw(info);
if (!gsw) {
pr_info("mt753x-nl: failed to find switch dev\n");
return -EINVAL;
}
}
ret = cmditem->process(info, gsw);
mt753x_put_gsw();
return ret;
}
int __init mt753x_nl_init(void)
{
int ret;
ret = genl_register_family(&mt753x_nl_family);
if (ret) {
pr_info("mt753x-nl: genl_register_family_with_ops failed\n");
return ret;
}
return 0;
}
void __exit mt753x_nl_exit(void)
{
genl_unregister_family(&mt753x_nl_family);
}

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Sirui Zhao <Sirui.Zhao@mediatek.com>
*/
#ifndef _MT753X_NL_H_
#define _MT753X_NL_H_
#define MT753X_GENL_NAME "mt753x"
#define MT753X_GENL_VERSION 0x1
enum mt753x_cmd {
MT753X_CMD_UNSPEC = 0,
MT753X_CMD_REQUEST,
MT753X_CMD_REPLY,
MT753X_CMD_READ,
MT753X_CMD_WRITE,
__MT753X_CMD_MAX,
};
enum mt753x_attr {
MT753X_ATTR_TYPE_UNSPEC = 0,
MT753X_ATTR_TYPE_MESG,
MT753X_ATTR_TYPE_PHY,
MT753X_ATTR_TYPE_DEVAD,
MT753X_ATTR_TYPE_REG,
MT753X_ATTR_TYPE_VAL,
MT753X_ATTR_TYPE_DEV_NAME,
MT753X_ATTR_TYPE_DEV_ID,
__MT753X_ATTR_TYPE_MAX,
};
#define MT753X_NR_ATTR_TYPE (__MT753X_ATTR_TYPE_MAX - 1)
#ifdef __KERNEL__
int __init mt753x_nl_init(void);
void __exit mt753x_nl_exit(void);
#endif /* __KERNEL__ */
#endif /* _MT753X_NL_H_ */

View File

@ -0,0 +1,345 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MT753X_REGS_H_
#define _MT753X_REGS_H_
#include <linux/bitops.h>
/* Values of Egress TAG Control */
#define ETAG_CTRL_UNTAG 0
#define ETAG_CTRL_TAG 2
#define ETAG_CTRL_SWAP 1
#define ETAG_CTRL_STACK 3
#define VTCR 0x90
#define VAWD1 0x94
#define VAWD2 0x98
/* Fields of VTCR */
#define VTCR_BUSY BIT(31)
#define IDX_INVLD BIT(16)
#define VTCR_FUNC_S 12
#define VTCR_FUNC_M 0xf000
#define VTCR_VID_S 0
#define VTCR_VID_M 0xfff
/* Values of VTCR_FUNC */
#define VTCR_READ_VLAN_ENTRY 0
#define VTCR_WRITE_VLAN_ENTRY 1
#define VTCR_INVD_VLAN_ENTRY 2
#define VTCR_ENABLE_VLAN_ENTRY 3
#define VTCR_READ_ACL_ENTRY 4
#define VTCR_WRITE_ACL_ENTRY 5
#define VTCR_READ_TRTCM_TABLE 6
#define VTCR_WRITE_TRTCM_TABLE 7
#define VTCR_READ_ACL_MASK_ENTRY 8
#define VTCR_WRITE_ACL_MASK_ENTRY 9
#define VTCR_READ_ACL_RULE_ENTRY 10
#define VTCR_WRITE_ACL_RULE_ENTRY 11
#define VTCR_READ_ACL_RATE_ENTRY 12
#define VTCR_WRITE_ACL_RATE_ENTRY 13
/* VLAN entry fields */
/* VAWD1 */
#define PORT_STAG BIT(31)
#define IVL_MAC BIT(30)
#define EG_CON BIT(29)
#define VTAG_EN BIT(28)
#define COPY_PRI BIT(27)
#define USER_PRI_S 24
#define USER_PRI_M 0x7000000
#define PORT_MEM_S 16
#define PORT_MEM_M 0xff0000
#define S_TAG1_S 4
#define S_TAG1_M 0xfff0
#define FID_S 1
#define FID_M 0x0e
#define VENTRY_VALID BIT(0)
/* VAWD2 */
#define S_TAG2_S 16
#define S_TAG2_M 0xffff0000
#define PORT_ETAG_S(p) ((p) * 2)
#define PORT_ETAG_M 0x03
#define PORT_CTRL_BASE 0x2000
#define PORT_CTRL_PORT_OFFSET 0x100
#define PORT_CTRL_REG(p, r) (PORT_CTRL_BASE + \
(p) * PORT_CTRL_PORT_OFFSET + (r))
#define CKGCR(p) PORT_CTRL_REG(p, 0x00)
#define PCR(p) PORT_CTRL_REG(p, 0x04)
#define PIC(p) PORT_CTRL_REG(p, 0x08)
#define PSC(p) PORT_CTRL_REG(p, 0x0c)
#define PVC(p) PORT_CTRL_REG(p, 0x10)
#define PPBV1(p) PORT_CTRL_REG(p, 0x14)
#define PPBV2(p) PORT_CTRL_REG(p, 0x18)
#define BSR(p) PORT_CTRL_REG(p, 0x1c)
#define STAG01 PORT_CTRL_REG(p, 0x20)
#define STAG23 PORT_CTRL_REG(p, 0x24)
#define STAG45 PORT_CTRL_REG(p, 0x28)
#define STAG67 PORT_CTRL_REG(p, 0x2c)
#define PPBV(p, g) (PPBV1(p) + ((g) / 2) * 4)
/* Fields of PCR */
#define MLDV2_EN BIT(30)
#define EG_TAG_S 28
#define EG_TAG_M 0x30000000
#define PORT_PRI_S 24
#define PORT_PRI_M 0x7000000
#define PORT_MATRIX_S 16
#define PORT_MATRIX_M 0xff0000
#define UP2DSCP_EN BIT(12)
#define UP2TAG_EN BIT(11)
#define ACL_EN BIT(10)
#define PORT_TX_MIR BIT(9)
#define PORT_RX_MIR BIT(8)
#define ACL_MIR BIT(7)
#define MIS_PORT_FW_S 4
#define MIS_PORT_FW_M 0x70
#define VLAN_MIS BIT(2)
#define PORT_VLAN_S 0
#define PORT_VLAN_M 0x03
/* Values of PORT_VLAN */
#define PORT_MATRIX_MODE 0
#define FALLBACK_MODE 1
#define CHECK_MODE 2
#define SECURITY_MODE 3
/* Fields of PVC */
#define STAG_VPID_S 16
#define STAG_VPID_M 0xffff0000
#define DIS_PVID BIT(15)
#define FORCE_PVID BIT(14)
#define PT_VPM BIT(12)
#define PT_OPTION BIT(11)
#define PVC_EG_TAG_S 8
#define PVC_EG_TAG_M 0x700
#define VLAN_ATTR_S 6
#define VLAN_ATTR_M 0xc0
#define PVC_PORT_STAG BIT(5)
#define BC_LKYV_EN BIT(4)
#define MC_LKYV_EN BIT(3)
#define UC_LKYV_EN BIT(2)
#define ACC_FRM_S 0
#define ACC_FRM_M 0x03
/* Values of VLAN_ATTR */
#define VA_USER_PORT 0
#define VA_STACK_PORT 1
#define VA_TRANSLATION_PORT 2
#define VA_TRANSPARENT_PORT 3
/* Fields of PPBV */
#define GRP_PORT_PRI_S(g) (((g) % 2) * 16 + 13)
#define GRP_PORT_PRI_M 0x07
#define GRP_PORT_VID_S(g) (((g) % 2) * 16)
#define GRP_PORT_VID_M 0xfff
#define PORT_MAC_CTRL_BASE 0x3000
#define PORT_MAC_CTRL_PORT_OFFSET 0x100
#define PORT_MAC_CTRL_REG(p, r) (PORT_MAC_CTRL_BASE + \
(p) * PORT_MAC_CTRL_PORT_OFFSET + (r))
#define PMCR(p) PORT_MAC_CTRL_REG(p, 0x00)
#define PMEEECR(p) PORT_MAC_CTRL_REG(p, 0x04)
#define PMSR(p) PORT_MAC_CTRL_REG(p, 0x08)
#define PINT_EN(p) PORT_MAC_CTRL_REG(p, 0x10)
#define PINT_STS(p) PORT_MAC_CTRL_REG(p, 0x14)
#define GMACCR (PORT_MAC_CTRL_BASE + 0xe0)
#define TXCRC_EN BIT(19)
#define RXCRC_EN BIT(18)
#define PRMBL_LMT_EN BIT(17)
#define MTCC_LMT_S 9
#define MTCC_LMT_M 0x1e00
#define MAX_RX_JUMBO_S 2
#define MAX_RX_JUMBO_M 0x3c
#define MAX_RX_PKT_LEN_S 0
#define MAX_RX_PKT_LEN_M 0x3
/* Values of MAX_RX_PKT_LEN */
#define RX_PKT_LEN_1518 0
#define RX_PKT_LEN_1536 1
#define RX_PKT_LEN_1522 2
#define RX_PKT_LEN_MAX_JUMBO 3
/* Fields of PMCR */
#define IPG_CFG_S 18
#define IPG_CFG_M 0xc0000
#define EXT_PHY BIT(17)
#define MAC_MODE BIT(16)
#define MAC_TX_EN BIT(14)
#define MAC_RX_EN BIT(13)
#define MAC_PRE BIT(11)
#define BKOFF_EN BIT(9)
#define BACKPR_EN BIT(8)
#define FORCE_EEE1G BIT(7)
#define FORCE_EEE1000 BIT(6)
#define FORCE_RX_FC BIT(5)
#define FORCE_TX_FC BIT(4)
#define FORCE_SPD_S 2
#define FORCE_SPD_M 0x0c
#define FORCE_DPX BIT(1)
#define FORCE_LINK BIT(0)
/* Fields of PMSR */
#define EEE1G_STS BIT(7)
#define EEE100_STS BIT(6)
#define RX_FC_STS BIT(5)
#define TX_FC_STS BIT(4)
#define MAC_SPD_STS_S 2
#define MAC_SPD_STS_M 0x0c
#define MAC_DPX_STS BIT(1)
#define MAC_LNK_STS BIT(0)
/* Values of MAC_SPD_STS */
#define MAC_SPD_10 0
#define MAC_SPD_100 1
#define MAC_SPD_1000 2
#define MAC_SPD_2500 3
/* Values of IPG_CFG */
#define IPG_96BIT 0
#define IPG_96BIT_WITH_SHORT_IPG 1
#define IPG_64BIT 2
#define MIB_COUNTER_BASE 0x4000
#define MIB_COUNTER_PORT_OFFSET 0x100
#define MIB_COUNTER_REG(p, r) (MIB_COUNTER_BASE + \
(p) * MIB_COUNTER_PORT_OFFSET + (r))
#define STATS_TDPC 0x00
#define STATS_TCRC 0x04
#define STATS_TUPC 0x08
#define STATS_TMPC 0x0C
#define STATS_TBPC 0x10
#define STATS_TCEC 0x14
#define STATS_TSCEC 0x18
#define STATS_TMCEC 0x1C
#define STATS_TDEC 0x20
#define STATS_TLCEC 0x24
#define STATS_TXCEC 0x28
#define STATS_TPPC 0x2C
#define STATS_TL64PC 0x30
#define STATS_TL65PC 0x34
#define STATS_TL128PC 0x38
#define STATS_TL256PC 0x3C
#define STATS_TL512PC 0x40
#define STATS_TL1024PC 0x44
#define STATS_TOC 0x48
#define STATS_RDPC 0x60
#define STATS_RFPC 0x64
#define STATS_RUPC 0x68
#define STATS_RMPC 0x6C
#define STATS_RBPC 0x70
#define STATS_RAEPC 0x74
#define STATS_RCEPC 0x78
#define STATS_RUSPC 0x7C
#define STATS_RFEPC 0x80
#define STATS_ROSPC 0x84
#define STATS_RJEPC 0x88
#define STATS_RPPC 0x8C
#define STATS_RL64PC 0x90
#define STATS_RL65PC 0x94
#define STATS_RL128PC 0x98
#define STATS_RL256PC 0x9C
#define STATS_RL512PC 0xA0
#define STATS_RL1024PC 0xA4
#define STATS_ROC 0xA8
#define STATS_RDPC_CTRL 0xB0
#define STATS_RDPC_ING 0xB4
#define STATS_RDPC_ARL 0xB8
#define SYS_CTRL 0x7000
#define SW_PHY_RST BIT(2)
#define SW_SYS_RST BIT(1)
#define SW_REG_RST BIT(0)
#define SYS_INT_EN 0x7008
#define SYS_INT_STS 0x700c
#define MAC_PC_INT BIT(16)
#define PHY_INT(p) BIT((p) + 8)
#define PHY_LC_INT(p) BIT(p)
#define PHY_IAC 0x701c
#define PHY_ACS_ST BIT(31)
#define MDIO_REG_ADDR_S 25
#define MDIO_REG_ADDR_M 0x3e000000
#define MDIO_PHY_ADDR_S 20
#define MDIO_PHY_ADDR_M 0x1f00000
#define MDIO_CMD_S 18
#define MDIO_CMD_M 0xc0000
#define MDIO_ST_S 16
#define MDIO_ST_M 0x30000
#define MDIO_RW_DATA_S 0
#define MDIO_RW_DATA_M 0xffff
/* MDIO_CMD: MDIO commands */
#define MDIO_CMD_ADDR 0
#define MDIO_CMD_WRITE 1
#define MDIO_CMD_READ 2
#define MDIO_CMD_READ_C45 3
/* MDIO_ST: MDIO start field */
#define MDIO_ST_C45 0
#define MDIO_ST_C22 1
#define HWSTRAP 0x7800
#define MHWSTRAP 0x7804
/* Internal GPHY Page Control Register */
#define PHY_CL22_PAGE_CTRL 0x1f
#define PHY_TR_PAGE 0x52b5
/* Internal GPHY Token Ring Access Registers */
#define PHY_TR_CTRL 0x10
#define PHY_TR_LOW_DATA 0x11
#define PHY_TR_HIGH_DATA 0x12
/* Fields of PHY_TR_CTRL */
#define PHY_TR_PKT_XMT_STA BIT(15)
#define PHY_TR_WR_S 13
#define PHY_TR_CH_ADDR_S 11
#define PHY_TR_NODE_ADDR_S 7
#define PHY_TR_DATA_ADDR_S 1
enum phy_tr_wr {
PHY_TR_WRITE = 0,
PHY_TR_READ = 1,
};
/* Helper macro for GPHY Token Ring Access */
#define PHY_TR_LOW_VAL(x) ((x) & 0xffff)
#define PHY_TR_HIGH_VAL(x) (((x) & 0xff0000) >> 16)
/* Token Ring Channels */
#define PMA_CH 0x1
#define DSP_CH 0x2
/* Token Ring Nodes */
#define PMA_NOD 0xf
#define DSP_NOD 0xd
/* Token Ring register range */
enum tr_pma_reg_addr {
PMA_MIN = 0x0,
PMA_01 = 0x1,
PMA_17 = 0x17,
PMA_18 = 0x18,
PMA_MAX = 0x3d,
};
enum tr_dsp_reg_addr {
DSP_MIN = 0x0,
DSP_06 = 0x6,
DSP_08 = 0x8,
DSP_0f = 0xf,
DSP_10 = 0x10,
DSP_MAX = 0x3e,
};
#endif /* _MT753X_REGS_H_ */

View File

@ -0,0 +1,517 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#include <linux/if.h>
#include <linux/list.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/bitops.h>
#include <net/genetlink.h>
#include <linux/delay.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/workqueue.h>
#include <linux/of_device.h>
#include "mt753x.h"
#include "mt753x_swconfig.h"
#include "mt753x_regs.h"
#define MT753X_PORT_MIB_TXB_ID 18 /* TxByte */
#define MT753X_PORT_MIB_RXB_ID 37 /* RxByte */
#define MIB_DESC(_s, _o, _n) \
{ \
.size = (_s), \
.offset = (_o), \
.name = (_n), \
}
struct mt753x_mib_desc {
unsigned int size;
unsigned int offset;
const char *name;
};
static const struct mt753x_mib_desc mt753x_mibs[] = {
MIB_DESC(1, STATS_TDPC, "TxDrop"),
MIB_DESC(1, STATS_TCRC, "TxCRC"),
MIB_DESC(1, STATS_TUPC, "TxUni"),
MIB_DESC(1, STATS_TMPC, "TxMulti"),
MIB_DESC(1, STATS_TBPC, "TxBroad"),
MIB_DESC(1, STATS_TCEC, "TxCollision"),
MIB_DESC(1, STATS_TSCEC, "TxSingleCol"),
MIB_DESC(1, STATS_TMCEC, "TxMultiCol"),
MIB_DESC(1, STATS_TDEC, "TxDefer"),
MIB_DESC(1, STATS_TLCEC, "TxLateCol"),
MIB_DESC(1, STATS_TXCEC, "TxExcCol"),
MIB_DESC(1, STATS_TPPC, "TxPause"),
MIB_DESC(1, STATS_TL64PC, "Tx64Byte"),
MIB_DESC(1, STATS_TL65PC, "Tx65Byte"),
MIB_DESC(1, STATS_TL128PC, "Tx128Byte"),
MIB_DESC(1, STATS_TL256PC, "Tx256Byte"),
MIB_DESC(1, STATS_TL512PC, "Tx512Byte"),
MIB_DESC(1, STATS_TL1024PC, "Tx1024Byte"),
MIB_DESC(2, STATS_TOC, "TxByte"),
MIB_DESC(1, STATS_RDPC, "RxDrop"),
MIB_DESC(1, STATS_RFPC, "RxFiltered"),
MIB_DESC(1, STATS_RUPC, "RxUni"),
MIB_DESC(1, STATS_RMPC, "RxMulti"),
MIB_DESC(1, STATS_RBPC, "RxBroad"),
MIB_DESC(1, STATS_RAEPC, "RxAlignErr"),
MIB_DESC(1, STATS_RCEPC, "RxCRC"),
MIB_DESC(1, STATS_RUSPC, "RxUnderSize"),
MIB_DESC(1, STATS_RFEPC, "RxFragment"),
MIB_DESC(1, STATS_ROSPC, "RxOverSize"),
MIB_DESC(1, STATS_RJEPC, "RxJabber"),
MIB_DESC(1, STATS_RPPC, "RxPause"),
MIB_DESC(1, STATS_RL64PC, "Rx64Byte"),
MIB_DESC(1, STATS_RL65PC, "Rx65Byte"),
MIB_DESC(1, STATS_RL128PC, "Rx128Byte"),
MIB_DESC(1, STATS_RL256PC, "Rx256Byte"),
MIB_DESC(1, STATS_RL512PC, "Rx512Byte"),
MIB_DESC(1, STATS_RL1024PC, "Rx1024Byte"),
MIB_DESC(2, STATS_ROC, "RxByte"),
MIB_DESC(1, STATS_RDPC_CTRL, "RxCtrlDrop"),
MIB_DESC(1, STATS_RDPC_ING, "RxIngDrop"),
MIB_DESC(1, STATS_RDPC_ARL, "RxARLDrop")
};
enum {
/* Global attributes. */
MT753X_ATTR_ENABLE_VLAN,
};
static int mt753x_get_vlan_enable(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
val->value.i = gsw->global_vlan_enable;
return 0;
}
static int mt753x_set_vlan_enable(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
gsw->global_vlan_enable = val->value.i != 0;
return 0;
}
static int mt753x_get_port_pvid(struct switch_dev *dev, int port, int *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
if (port >= MT753X_NUM_PORTS)
return -EINVAL;
*val = mt753x_reg_read(gsw, PPBV1(port));
*val &= GRP_PORT_VID_M;
return 0;
}
static int mt753x_set_port_pvid(struct switch_dev *dev, int port, int pvid)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
if (port >= MT753X_NUM_PORTS)
return -EINVAL;
if (pvid < MT753X_MIN_VID || pvid > MT753X_MAX_VID)
return -EINVAL;
gsw->port_entries[port].pvid = pvid;
return 0;
}
static int mt753x_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
u32 member;
u32 etags;
int i;
val->len = 0;
if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS)
return -EINVAL;
mt753x_vlan_ctrl(gsw, VTCR_READ_VLAN_ENTRY, val->port_vlan);
member = mt753x_reg_read(gsw, VAWD1);
member &= PORT_MEM_M;
member >>= PORT_MEM_S;
etags = mt753x_reg_read(gsw, VAWD2);
for (i = 0; i < MT753X_NUM_PORTS; i++) {
struct switch_port *p;
int etag;
if (!(member & BIT(i)))
continue;
p = &val->value.ports[val->len++];
p->id = i;
etag = (etags >> PORT_ETAG_S(i)) & PORT_ETAG_M;
if (etag == ETAG_CTRL_TAG)
p->flags |= BIT(SWITCH_PORT_FLAG_TAGGED);
else if (etag != ETAG_CTRL_UNTAG)
dev_info(gsw->dev,
"vlan egress tag control neither untag nor tag.\n");
}
return 0;
}
static int mt753x_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
u8 member = 0;
u8 etags = 0;
int i;
if (val->port_vlan < 0 || val->port_vlan >= MT753X_NUM_VLANS ||
val->len > MT753X_NUM_PORTS)
return -EINVAL;
for (i = 0; i < val->len; i++) {
struct switch_port *p = &val->value.ports[i];
if (p->id >= MT753X_NUM_PORTS)
return -EINVAL;
member |= BIT(p->id);
if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED))
etags |= BIT(p->id);
}
gsw->vlan_entries[val->port_vlan].member = member;
gsw->vlan_entries[val->port_vlan].etags = etags;
return 0;
}
static int mt753x_set_vid(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
int vlan;
u16 vid;
vlan = val->port_vlan;
vid = (u16)val->value.i;
if (vlan < 0 || vlan >= MT753X_NUM_VLANS)
return -EINVAL;
if (vid < MT753X_MIN_VID || vid > MT753X_MAX_VID)
return -EINVAL;
gsw->vlan_entries[vlan].vid = vid;
return 0;
}
static int mt753x_get_vid(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
val->value.i = val->port_vlan;
return 0;
}
static int mt753x_get_port_link(struct switch_dev *dev, int port,
struct switch_port_link *link)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
u32 speed, pmsr;
if (port < 0 || port >= MT753X_NUM_PORTS)
return -EINVAL;
pmsr = mt753x_reg_read(gsw, PMSR(port));
link->link = pmsr & MAC_LNK_STS;
link->duplex = pmsr & MAC_DPX_STS;
speed = (pmsr & MAC_SPD_STS_M) >> MAC_SPD_STS_S;
switch (speed) {
case MAC_SPD_10:
link->speed = SWITCH_PORT_SPEED_10;
break;
case MAC_SPD_100:
link->speed = SWITCH_PORT_SPEED_100;
break;
case MAC_SPD_1000:
link->speed = SWITCH_PORT_SPEED_1000;
break;
case MAC_SPD_2500:
/* TODO: swconfig has no support for 2500 now */
link->speed = SWITCH_PORT_SPEED_UNKNOWN;
break;
}
return 0;
}
static int mt753x_set_port_link(struct switch_dev *dev, int port,
struct switch_port_link *link)
{
#ifndef MODULE
if (port >= MT753X_NUM_PHYS)
return -EINVAL;
return switch_generic_set_link(dev, port, link);
#else
return -ENOTSUPP;
#endif
}
static u64 get_mib_counter(struct gsw_mt753x *gsw, int i, int port)
{
unsigned int offset;
u64 lo, hi, hi2;
offset = mt753x_mibs[i].offset;
if (mt753x_mibs[i].size == 1)
return mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
do {
hi = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
lo = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset));
hi2 = mt753x_reg_read(gsw, MIB_COUNTER_REG(port, offset + 4));
} while (hi2 != hi);
return (hi << 32) | lo;
}
static int mt753x_get_port_mib(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
static char buf[4096];
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
int i, len = 0;
if (val->port_vlan >= MT753X_NUM_PORTS)
return -EINVAL;
len += snprintf(buf + len, sizeof(buf) - len,
"Port %d MIB counters\n", val->port_vlan);
for (i = 0; i < ARRAY_SIZE(mt753x_mibs); ++i) {
u64 counter;
len += snprintf(buf + len, sizeof(buf) - len,
"%-11s: ", mt753x_mibs[i].name);
counter = get_mib_counter(gsw, i, val->port_vlan);
len += snprintf(buf + len, sizeof(buf) - len, "%llu\n",
counter);
}
val->value.s = buf;
val->len = len;
return 0;
}
static int mt753x_get_port_stats(struct switch_dev *dev, int port,
struct switch_port_stats *stats)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
if (port < 0 || port >= MT753X_NUM_PORTS)
return -EINVAL;
stats->tx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_TXB_ID, port);
stats->rx_bytes = get_mib_counter(gsw, MT753X_PORT_MIB_RXB_ID, port);
return 0;
}
static void mt753x_port_isolation(struct gsw_mt753x *gsw)
{
int i;
for (i = 0; i < MT753X_NUM_PORTS; i++)
mt753x_reg_write(gsw, PCR(i),
BIT(gsw->cpu_port) << PORT_MATRIX_S);
mt753x_reg_write(gsw, PCR(gsw->cpu_port), PORT_MATRIX_M);
for (i = 0; i < MT753X_NUM_PORTS; i++) {
u32 pvc_mode = 0x8100 << STAG_VPID_S;
if ((gsw->port5_cfg.stag_on && i == 5) ||
(gsw->port6_cfg.stag_on && i == 6))
pvc_mode |= PVC_PORT_STAG;
else
pvc_mode |= (VA_TRANSPARENT_PORT << VLAN_ATTR_S);
mt753x_reg_write(gsw, PVC(i), pvc_mode);
}
}
static int mt753x_apply_config(struct switch_dev *dev)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
if (!gsw->global_vlan_enable) {
mt753x_port_isolation(gsw);
return 0;
}
mt753x_apply_vlan_config(gsw);
return 0;
}
static int mt753x_reset_switch(struct switch_dev *dev)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
int i;
memset(gsw->port_entries, 0, sizeof(gsw->port_entries));
memset(gsw->vlan_entries, 0, sizeof(gsw->vlan_entries));
/* set default vid of each vlan to the same number of vlan, so the vid
* won't need be set explicitly.
*/
for (i = 0; i < MT753X_NUM_VLANS; i++)
gsw->vlan_entries[i].vid = i;
return 0;
}
static int mt753x_phy_read16(struct switch_dev *dev, int addr, u8 reg,
u16 *value)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
*value = gsw->mii_read(gsw, addr, reg);
return 0;
}
static int mt753x_phy_write16(struct switch_dev *dev, int addr, u8 reg,
u16 value)
{
struct gsw_mt753x *gsw = container_of(dev, struct gsw_mt753x, swdev);
gsw->mii_write(gsw, addr, reg, value);
return 0;
}
static const struct switch_attr mt753x_global[] = {
{
.type = SWITCH_TYPE_INT,
.name = "enable_vlan",
.description = "VLAN mode (1:enabled)",
.max = 1,
.id = MT753X_ATTR_ENABLE_VLAN,
.get = mt753x_get_vlan_enable,
.set = mt753x_set_vlan_enable,
}
};
static const struct switch_attr mt753x_port[] = {
{
.type = SWITCH_TYPE_STRING,
.name = "mib",
.description = "Get MIB counters for port",
.get = mt753x_get_port_mib,
.set = NULL,
},
};
static const struct switch_attr mt753x_vlan[] = {
{
.type = SWITCH_TYPE_INT,
.name = "vid",
.description = "VLAN ID (0-4094)",
.set = mt753x_set_vid,
.get = mt753x_get_vid,
.max = 4094,
},
};
static const struct switch_dev_ops mt753x_swdev_ops = {
.attr_global = {
.attr = mt753x_global,
.n_attr = ARRAY_SIZE(mt753x_global),
},
.attr_port = {
.attr = mt753x_port,
.n_attr = ARRAY_SIZE(mt753x_port),
},
.attr_vlan = {
.attr = mt753x_vlan,
.n_attr = ARRAY_SIZE(mt753x_vlan),
},
.get_vlan_ports = mt753x_get_vlan_ports,
.set_vlan_ports = mt753x_set_vlan_ports,
.get_port_pvid = mt753x_get_port_pvid,
.set_port_pvid = mt753x_set_port_pvid,
.get_port_link = mt753x_get_port_link,
.set_port_link = mt753x_set_port_link,
.get_port_stats = mt753x_get_port_stats,
.apply_config = mt753x_apply_config,
.reset_switch = mt753x_reset_switch,
.phy_read16 = mt753x_phy_read16,
.phy_write16 = mt753x_phy_write16,
};
int mt753x_swconfig_init(struct gsw_mt753x *gsw)
{
struct device_node *np = gsw->dev->of_node;
struct switch_dev *swdev;
int ret;
if (of_property_read_u32(np, "mediatek,cpuport", &gsw->cpu_port))
gsw->cpu_port = MT753X_DFL_CPU_PORT;
swdev = &gsw->swdev;
swdev->name = gsw->name;
swdev->alias = gsw->name;
swdev->cpu_port = gsw->cpu_port;
swdev->ports = MT753X_NUM_PORTS;
swdev->vlans = MT753X_NUM_VLANS;
swdev->ops = &mt753x_swdev_ops;
ret = register_switch(swdev, NULL);
if (ret) {
dev_notice(gsw->dev, "Failed to register switch %s\n",
swdev->name);
return ret;
}
mt753x_apply_config(swdev);
return 0;
}
void mt753x_swconfig_destroy(struct gsw_mt753x *gsw)
{
unregister_switch(&gsw->swdev);
}

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 MediaTek Inc.
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MT753X_SWCONFIG_H_
#define _MT753X_SWCONFIG_H_
#ifdef CONFIG_SWCONFIG
#include <linux/switch.h>
#include "mt753x.h"
int mt753x_swconfig_init(struct gsw_mt753x *gsw);
void mt753x_swconfig_destroy(struct gsw_mt753x *gsw);
#else
static inline int mt753x_swconfig_init(struct gsw_mt753x *gsw)
{
mt753x_apply_vlan_config(gsw);
return 0;
}
static inline void mt753x_swconfig_destroy(struct gsw_mt753x *gsw)
{
}
#endif
#endif /* _MT753X_SWCONFIG_H_ */

View File

@ -0,0 +1,193 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 MediaTek Inc.
*/
#include "mt753x.h"
#include "mt753x_regs.h"
struct mt753x_mapping mt753x_def_mapping[] = {
{
.name = "llllw",
.pvids = { 1, 1, 1, 1, 2, 2, 1 },
.members = { 0, 0x4f, 0x30 },
.etags = { 0, 0, 0 },
.vids = { 0, 1, 2 },
}, {
.name = "wllll",
.pvids = { 2, 1, 1, 1, 1, 2, 1 },
.members = { 0, 0x5e, 0x21 },
.etags = { 0, 0, 0 },
.vids = { 0, 1, 2 },
}, {
.name = "lwlll",
.pvids = { 1, 2, 1, 1, 1, 2, 1 },
.members = { 0, 0x5d, 0x22 },
.etags = { 0, 0, 0 },
.vids = { 0, 1, 2 },
}, {
.name = "lllll",
.pvids = { 1, 1, 1, 1, 1, 1, 1 },
.members = { 0, 0x7f },
.etags = { 0, 0 },
.vids = { 0, 1 },
},
};
void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val)
{
int i;
mt753x_reg_write(gsw, VTCR,
VTCR_BUSY | ((cmd << VTCR_FUNC_S) & VTCR_FUNC_M) |
(val & VTCR_VID_M));
for (i = 0; i < 300; i++) {
u32 val = mt753x_reg_read(gsw, VTCR);
if ((val & VTCR_BUSY) == 0)
break;
usleep_range(1000, 1100);
}
if (i == 300)
dev_info(gsw->dev, "vtcr timeout\n");
}
static void mt753x_write_vlan_entry(struct gsw_mt753x *gsw, int vlan, u16 vid,
u8 ports, u8 etags)
{
int port;
u32 val;
/* vlan port membership */
if (ports)
mt753x_reg_write(gsw, VAWD1,
IVL_MAC | VTAG_EN | VENTRY_VALID |
((ports << PORT_MEM_S) & PORT_MEM_M));
else
mt753x_reg_write(gsw, VAWD1, 0);
/* egress mode */
val = 0;
for (port = 0; port < MT753X_NUM_PORTS; port++) {
if (etags & BIT(port))
val |= ETAG_CTRL_TAG << PORT_ETAG_S(port);
else
val |= ETAG_CTRL_UNTAG << PORT_ETAG_S(port);
}
mt753x_reg_write(gsw, VAWD2, val);
/* write to vlan table */
mt753x_vlan_ctrl(gsw, VTCR_WRITE_VLAN_ENTRY, vid);
}
void mt753x_apply_vlan_config(struct gsw_mt753x *gsw)
{
int i, j;
u8 tag_ports;
u8 untag_ports;
/* set all ports as security mode */
for (i = 0; i < MT753X_NUM_PORTS; i++)
mt753x_reg_write(gsw, PCR(i),
PORT_MATRIX_M | SECURITY_MODE);
/* check if a port is used in tag/untag vlan egress mode */
tag_ports = 0;
untag_ports = 0;
for (i = 0; i < MT753X_NUM_VLANS; i++) {
u8 member = gsw->vlan_entries[i].member;
u8 etags = gsw->vlan_entries[i].etags;
if (!member)
continue;
for (j = 0; j < MT753X_NUM_PORTS; j++) {
if (!(member & BIT(j)))
continue;
if (etags & BIT(j))
tag_ports |= 1u << j;
else
untag_ports |= 1u << j;
}
}
/* set all untag-only ports as transparent and the rest as user port */
for (i = 0; i < MT753X_NUM_PORTS; i++) {
u32 pvc_mode = 0x8100 << STAG_VPID_S;
if (untag_ports & BIT(i) && !(tag_ports & BIT(i)))
pvc_mode = (0x8100 << STAG_VPID_S) |
(VA_TRANSPARENT_PORT << VLAN_ATTR_S);
if ((gsw->port5_cfg.stag_on && i == 5) ||
(gsw->port6_cfg.stag_on && i == 6))
pvc_mode = (0x8100 << STAG_VPID_S) | PVC_PORT_STAG;
mt753x_reg_write(gsw, PVC(i), pvc_mode);
}
/* first clear the switch vlan table */
for (i = 0; i < MT753X_NUM_VLANS; i++)
mt753x_write_vlan_entry(gsw, i, i, 0, 0);
/* now program only vlans with members to avoid
* clobbering remapped entries in later iterations
*/
for (i = 0; i < MT753X_NUM_VLANS; i++) {
u16 vid = gsw->vlan_entries[i].vid;
u8 member = gsw->vlan_entries[i].member;
u8 etags = gsw->vlan_entries[i].etags;
if (member)
mt753x_write_vlan_entry(gsw, i, vid, member, etags);
}
/* Port Default PVID */
for (i = 0; i < MT753X_NUM_PORTS; i++) {
int vlan = gsw->port_entries[i].pvid;
u16 pvid = 0;
u32 val;
if (vlan < MT753X_NUM_VLANS && gsw->vlan_entries[vlan].member)
pvid = gsw->vlan_entries[vlan].vid;
val = mt753x_reg_read(gsw, PPBV1(i));
val &= ~GRP_PORT_VID_M;
val |= pvid;
mt753x_reg_write(gsw, PPBV1(i), val);
}
}
struct mt753x_mapping *mt753x_find_mapping(struct device_node *np)
{
const char *map;
int i;
if (of_property_read_string(np, "mediatek,portmap", &map))
return NULL;
for (i = 0; i < ARRAY_SIZE(mt753x_def_mapping); i++)
if (!strcmp(map, mt753x_def_mapping[i].name))
return &mt753x_def_mapping[i];
return NULL;
}
void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map)
{
int i = 0;
for (i = 0; i < MT753X_NUM_PORTS; i++)
gsw->port_entries[i].pvid = map->pvids[i];
for (i = 0; i < MT753X_NUM_VLANS; i++) {
gsw->vlan_entries[i].member = map->members[i];
gsw->vlan_entries[i].etags = map->etags[i];
gsw->vlan_entries[i].vid = map->vids[i];
}
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 MediaTek Inc.
*/
#ifndef _MT753X_VLAN_H_
#define _MT753X_VLAN_H_
#define MT753X_NUM_PORTS 7
#define MT753X_NUM_VLANS 4095
#define MT753X_MAX_VID 4095
#define MT753X_MIN_VID 0
struct gsw_mt753x;
struct mt753x_port_entry {
u16 pvid;
};
struct mt753x_vlan_entry {
u16 vid;
u8 member;
u8 etags;
};
struct mt753x_mapping {
char *name;
u16 pvids[MT753X_NUM_PORTS];
u8 members[MT753X_NUM_VLANS];
u8 etags[MT753X_NUM_VLANS];
u16 vids[MT753X_NUM_VLANS];
};
extern struct mt753x_mapping mt753x_defaults[];
void mt753x_vlan_ctrl(struct gsw_mt753x *gsw, u32 cmd, u32 val);
void mt753x_apply_vlan_config(struct gsw_mt753x *gsw);
struct mt753x_mapping *mt753x_find_mapping(struct device_node *np);
void mt753x_apply_mapping(struct gsw_mt753x *gsw, struct mt753x_mapping *map);
#endif /* _MT753X_VLAN_H_ */

View File

@ -0,0 +1,558 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2019 MediaTek Inc.
* Author: Harry Huang <harry.huang@mediatek.com>
*/
#ifndef _RA_NAT_WANTED
#define _RA_NAT_WANTED
#include <linux/ip.h>
#include <linux/ipv6.h>
#ifndef NEXTHDR_IPIP
#define NEXTHDR_IPIP 4
#endif
#define hwnat_vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define hwnat_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#if defined(CONFIG_HW_NAT)
extern void hwnat_magic_tag_set_zero(struct sk_buff *skb);
extern void hwnat_check_magic_tag(struct sk_buff *skb);
extern void hwnat_set_headroom_zero(struct sk_buff *skb);
extern void hwnat_set_tailroom_zero(struct sk_buff *skb);
extern void hwnat_copy_headroom(u8 *data, struct sk_buff *skb);
extern void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb);
extern void hwnat_setup_dma_ops(struct device *dev, bool coherent);
#else
static inline void hwnat_magic_tag_set_zero(struct sk_buff *skb)
{
}
static inline void hwnat_check_magic_tag(struct sk_buff *skb)
{
}
static inline void hwnat_set_headroom_zero(struct sk_buff *skb)
{
}
static inline void hwnat_set_tailroom_zero(struct sk_buff *skb)
{
}
static inline void hwnat_copy_headroom(u8 *data, struct sk_buff *skb)
{
}
static inline void hwnat_copy_tailroom(u8 *data, int size, struct sk_buff *skb)
{
}
#endif
enum foe_cpu_reason {
TTL_0 = 0x02, /* IPv4(IPv6) TTL(hop limit) = 0 */
/* IPv4(IPv6) has option(extension) header */
HAS_OPTION_HEADER = 0x03,
NO_FLOW_IS_ASSIGNED = 0x07, /* No flow is assigned */
/* IPv4 HNAT doesn't support IPv4 /w fragment */
IPV4_WITH_FRAGMENT = 0x08,
/* IPv4 HNAPT/DS-Lite doesn't support IPv4 /w fragment */
IPV4_HNAPT_DSLITE_WITH_FRAGMENT = 0x09,
/* IPv4 HNAPT/DS-Lite can't find TCP/UDP sport/dport */
IPV4_HNAPT_DSLITE_WITHOUT_TCP_UDP = 0x0A,
/* IPv6 5T-route/6RD can't find TCP/UDP sport/dport */
IPV6_5T_6RD_WITHOUT_TCP_UDP = 0x0B,
/* Ingress packet is TCP fin/syn/rst */
/*(for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
TCP_FIN_SYN_RST = 0x0C,
UN_HIT = 0x0D, /* FOE Un-hit */
HIT_UNBIND = 0x0E, /* FOE Hit unbind */
/* FOE Hit unbind & rate reach */
HIT_UNBIND_RATE_REACH = 0x0F,
HIT_BIND_TCP_FIN = 0x10, /* Hit bind PPE TCP FIN entry */
/* Hit bind PPE entry and TTL(hop limit) = 1 */
/* and TTL(hot limit) - 1 */
HIT_BIND_TTL_1 = 0x11,
/* Hit bind and VLAN replacement violation */
/*(Ingress 1(0) VLAN layers and egress 4(3 or 4) VLAN layers) */
HIT_BIND_WITH_VLAN_VIOLATION = 0x12,
/* Hit bind and keep alive with unicast old-header packet */
HIT_BIND_KEEPALIVE_UC_OLD_HDR = 0x13,
/* Hit bind and keep alive with multicast new-header packet */
HIT_BIND_KEEPALIVE_MC_NEW_HDR = 0x14,
/* Hit bind and keep alive with duplicate old-header packet */
HIT_BIND_KEEPALIVE_DUP_OLD_HDR = 0x15,
/* FOE Hit bind & force to CPU */
HIT_BIND_FORCE_TO_CPU = 0x16,
/* Hit bind and remove tunnel IP header, */
/* but inner IP has option/next header */
HIT_BIND_WITH_OPTION_HEADER = 0x17,
/* Hit bind and exceed MTU */
HIT_BIND_EXCEED_MTU = 0x1C,
HIT_BIND_PACKET_SAMPLING = 0x1B, /* PS packet */
/* Switch clone multicast packet to CPU */
HIT_BIND_MULTICAST_TO_CPU = 0x18,
/* Switch clone multicast packet to GMAC1 & CPU */
HIT_BIND_MULTICAST_TO_GMAC_CPU = 0x19,
HIT_PRE_BIND = 0x1A /* Pre-bind */
};
#define MAX_IF_NUM 64
struct dmad_rx_descinfo4 {
uint32_t foe_entry_num:15;
uint32_t rsv0:3;
uint32_t CRSN:5;
uint32_t rsv1:3;
uint32_t SPORT:4;
uint32_t ppe:1;
uint32_t ALG:1;
uint32_t IF:8;
uint32_t WDMAID:2;
uint32_t RXID:2;
uint32_t WCID:10;
uint32_t BSSID:6;
uint32_t rsv3:4;
uint16_t minfo:1;
uint16_t ntype:3;
uint16_t chid:8;
uint16_t rsv4:4;
u16 MAGIC_TAG_PROTECT;
} __packed;
struct pdma_rx_desc_info4 {
u16 MAGIC_TAG_PROTECT;
uint32_t foe_entry_num:14;
uint32_t CRSN:5;
uint32_t SPORT:4;
uint32_t rsv:6;
uint32_t foe_entry_num_1:1;
uint32_t ppe:1;
uint32_t ALG:1;
uint32_t IF:8;
uint32_t WDMAID:2;
uint32_t RXID:2;
uint32_t WCID:10;
uint32_t BSSID:6;
uint32_t rsv2:4;
uint16_t minfo:1;
uint16_t ntype:3;
uint16_t chid:8;
uint16_t rsv3:4;
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
u16 SOURCE;
u16 DEST;
#endif
} __packed;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
struct head_rx_descinfo4 {
uint32_t foe_entry_num:14;
uint32_t CRSN:5;
uint32_t SPORT:4;
uint32_t rsv:6;
uint32_t foe_entry_num_1:1;
uint32_t ppe:1;
uint32_t ALG:1;
uint32_t IF:8;
uint32_t WDMAID:2;
uint32_t RXID:2;
uint32_t WCID:10;
uint32_t BSSID:6;
uint32_t rsv2:4;
uint16_t minfo:1;
uint16_t ntype:3;
uint16_t chid:8;
uint16_t rsv3:4;
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
u16 SOURCE;
u16 DEST;
#endif
u16 MAGIC_TAG_PROTECT;
} __packed;
#else
struct head_rx_descinfo4 {
uint32_t foe_entry_num:14;
uint32_t CRSN:5;
uint32_t SPORT:3;
uint32_t rsv:1;
uint32_t ALG:1;
uint32_t IF:4;
uint32_t rsv2:4;
uint32_t MAGIC_TAG_PROTECT: 16;
uint32_t WDMAID:2;
uint32_t RXID:2;
uint32_t WCID:10;
uint32_t BSSID:6;
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
u16 SOURCE;
u16 DEST;
#endif
} __packed;
#endif
struct cb_rx_desc_info4 {
u16 MAGIC_TAG_PROTECT0;
uint32_t foe_entry_num:15;
uint32_t CRSN:5;
uint32_t SPORT:4;
uint32_t ALG:1;
uint32_t rsv:7;
uint16_t IF:8;
uint16_t WDMAID:2;
uint16_t RXID:2;
uint16_t WCID:10;
uint16_t BSSID:6;
uint16_t rsv1:4;
uint16_t minfo:1;
uint16_t ntype:3;
uint16_t chid:8;
uint16_t rsv2:4;
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
u16 SOURCE;
u16 DEST;
#endif
u16 MAGIC_TAG_PROTECT1;
} __packed;
#define FOE_INFO_LEN 12
#define WIFI_INFO_LEN 6
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_INFO_LEN (6 + 4 + WIFI_INFO_LEN)
#define FOE_MAGIC_FASTPATH 0x77
#define FOE_MAGIC_L2TPPATH 0x78
#endif
#define FOE_MAGIC_PCI 0x73
#define FOE_MAGIC_WLAN 0x74
#define FOE_MAGIC_GE 0x75
#define FOE_MAGIC_PPE 0x76
#define FOE_MAGIC_WED0 0x78
#define FOE_MAGIC_WED1 0x79
#define FOE_MAGIC_MED 0x80
#define FOE_MAGIC_EDMA0 0x81
#define FOE_MAGIC_EDMA1 0x82
#define TAG_PROTECT 0x6789
#define USE_HEAD_ROOM 0
#define USE_TAIL_ROOM 1
#define USE_CB 2
#define ALL_INFO_ERROR 3
/**************************DMAD FORMAT********************************/
#define FOE_TAG_PROTECT(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
#define FOE_ENTRY_NUM(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->foe_entry_num)
#define FOE_ALG(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->ALG)
#define FOE_AI(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->CRSN)
#define FOE_SP(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->SPORT)
#define FOE_MAGIC_TAG(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->IF)
#define FOE_WDMA_ID(skb) \
(((struct dmad_rx_descinfo4 *)((skb)->head))->WDMAID)
#define FOE_RX_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->RXID)
#define FOE_WC_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->WCID)
#define FOE_BSS_ID(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->BSSID)
#define FOE_PPE(skb) (((struct dmad_rx_descinfo4 *)((skb)->head))->ppe)
/***********************HEAD FORMAT*************************************/
#define FOE_TAG_PROTECT_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->MAGIC_TAG_PROTECT)
#define FOE_ENTRY_NUM_LSB_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num)
#define FOE_ENTRY_NUM_MSB_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->foe_entry_num_1)
#define FOE_ENTRY_NUM_HEAD(skb) \
(((FOE_ENTRY_NUM_MSB_HEAD(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_HEAD(skb))
#define FOE_ALG_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->ALG)
#define FOE_AI_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->CRSN)
#define FOE_SP_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->SPORT)
#define FOE_MAGIC_TAG_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->IF)
#define FOE_WDMA_ID_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->WDMAID)
#define FOE_RX_ID_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->RXID)
#define FOE_WC_ID_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->WCID)
#define FOE_BSS_ID_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->BSSID)
#define FOE_PPE_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->PPE)
/****************************TAIL FORMAT***************************************/
#define FOE_TAG_PROTECT_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->MAGIC_TAG_PROTECT)
#define FOE_ENTRY_NUM_LSB_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num)
#define FOE_ENTRY_NUM_MSB_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->foe_entry_num_1)
#define FOE_ENTRY_NUM_TAIL(skb) \
(((FOE_ENTRY_NUM_MSB_TAIL(skb) & 0x1) << 14) | FOE_ENTRY_NUM_LSB_TAIL(skb))
#define FOE_ALG_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ALG)
#define FOE_AI_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->CRSN)
#define FOE_SP_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SPORT)
#define FOE_MAGIC_TAG_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->IF)
#define FOE_WDMA_ID_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WDMAID)
#define FOE_RX_ID_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->RXID)
#define FOE_WC_ID_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->WCID)
#define FOE_BSS_ID_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->BSSID)
#define FOE_PPE_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ppe)
/*********************************************************************/
#define FOE_WDMA_ID_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->head))->WDMAID)
#define FOE_RX_ID_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->head))->RXID)
#define FOE_WC_ID_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->head))->WCID)
#define FOE_BSS_ID_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->head))->BSSID)
#define FOE_MINFO(skb) (((struct head_rx_descinfo4 *)((skb)->head))->minfo)
#define FOE_MINFO_NTYPE(skb) (((struct head_rx_descinfo4 *)((skb)->head))->ntype)
#define FOE_MINFO_CHID(skb) (((struct head_rx_descinfo4 *)((skb)->head))->chid)
#define FOE_MINFO_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->minfo)
#define FOE_MINFO_NTYPE_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->ntype)
#define FOE_MINFO_CHID_HEAD(skb) (((struct head_rx_descinfo4 *)((skb)->head))->chid)
#define FOE_MINFO_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->minfo)
#define FOE_MINFO_NTYPE_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->ntype)
#define FOE_MINFO_CHID_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->chid)
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_SOURCE(skb) (((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
#define FOE_DEST(skb) (((struct head_rx_descinfo4 *)((skb)->head))->DEST)
#endif
#define IS_SPACE_AVAILABLE_HEAD(skb) \
((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
#define IS_SPACE_AVAILABLE_HEAD(skb) \
((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
#define FOE_INFO_START_ADDR_HEAD(skb) (skb->head)
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_SOURCE_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
#define FOE_DEST_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
#endif
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_SOURCE_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->SOURCE)
#define FOE_DEST_HEAD(skb) \
(((struct head_rx_descinfo4 *)((skb)->head))->DEST)
#endif
#define IS_SPACE_AVAILABLE_TAIL(skb) \
(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
#define IS_SPACE_AVAILABLE_TAIL(skb) \
(((skb_tailroom(skb) >= FOE_INFO_LEN) ? 1 : 0))
#define FOE_INFO_START_ADDR_TAIL(skb) \
((unsigned char *)(long)(skb_end_pointer(skb) - FOE_INFO_LEN))
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_SOURCE_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->SOURCE)
#define FOE_DEST_TAIL(skb) \
(((struct pdma_rx_desc_info4 *)((long)((skb_end_pointer(skb)) - FOE_INFO_LEN)))->DEST)
#endif
/* change the position of skb_CB if necessary */
#define CB_OFFSET 40
#define IS_SPACE_AVAILABLE_CB(skb) 1
#define FOE_INFO_START_ADDR_CB(skb) (skb->cb + CB_OFFSET)
#define FOE_TAG_PROTECT_CB0(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT0)
#define FOE_TAG_PROTECT_CB1(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->MAGIC_TAG_PROTECT1)
#define FOE_ENTRY_NUM_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->foe_entry_num)
#define FOE_ALG_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->ALG)
#define FOE_AI_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->CRSN)
#define FOE_SP_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SPORT)
#define FOE_MAGIC_TAG_CB(skb) \
(((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->IF)
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
#define FOE_SOURCE_CB(skb) (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->SOURCE)
#define FOE_DEST_CB(skb) (((struct cb_rx_desc_info4 *)((skb)->cb + CB_OFFSET))->DEST)
#endif
#define IS_MAGIC_TAG_PROTECT_VALID_HEAD(skb) \
(FOE_TAG_PROTECT_HEAD(skb) == TAG_PROTECT)
#define IS_MAGIC_TAG_PROTECT_VALID_TAIL(skb) \
(FOE_TAG_PROTECT_TAIL(skb) == TAG_PROTECT)
#define IS_MAGIC_TAG_PROTECT_VALID_CB(skb) \
((FOE_TAG_PROTECT_CB0(skb) == TAG_PROTECT) && \
(FOE_TAG_PROTECT_CB0(skb) == FOE_TAG_PROTECT_CB1(skb)))
#define IS_IF_PCIE_WLAN_HEAD(skb) \
((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE))
#define IS_IF_PCIE_WLAN_TAIL(skb) \
((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN))
#define IS_IF_PCIE_WLAN_CB(skb) \
((FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_PCI) || \
(FOE_MAGIC_TAG_CB(skb) == FOE_MAGIC_WLAN))
/* macros */
#define magic_tag_set_zero(skb) \
{ \
if ((FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_PCI) || \
(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_WLAN) || \
(FOE_MAGIC_TAG_HEAD(skb) == FOE_MAGIC_GE)) { \
if (IS_SPACE_AVAILABLE_HEAD(skb)) \
FOE_MAGIC_TAG_HEAD(skb) = 0; \
} \
if ((FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_PCI) || \
(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_WLAN) || \
(FOE_MAGIC_TAG_TAIL(skb) == FOE_MAGIC_GE)) { \
if (IS_SPACE_AVAILABLE_TAIL(skb)) \
FOE_MAGIC_TAG_TAIL(skb) = 0; \
} \
}
static inline void hwnat_set_l2tp_unhit(struct iphdr *iph, struct sk_buff *skb)
{
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
/* only clear headeroom for TCP OR not L2TP packets */
if ((iph->protocol == 0x6) || (ntohs(udp_hdr(skb)->dest) != 1701)) {
if (IS_SPACE_AVAILABLE_HEAD(skb)) {
FOE_MAGIC_TAG(skb) = 0;
FOE_AI(skb) = UN_HIT;
}
}
#endif
}
static inline void hwnat_set_l2tp_fast_path(u32 l2tp_fast_path, u32 pptp_fast_path)
{
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
l2tp_fast_path = 1;
pptp_fast_path = 0;
#endif
}
static inline void hwnat_clear_l2tp_fast_path(u32 l2tp_fast_path)
{
#if defined(CONFIG_RA_HW_NAT_PPTP_L2TP)
l2tp_fast_path = 0;
#endif
}
/* #define CONFIG_HW_NAT_IPI */
#if defined(CONFIG_HW_NAT_IPI)
extern int debug_level;
int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp);
uint32_t ppe_extif_rx_handler(struct sk_buff *skb);
int hitbind_force_to_cpu_handler(struct sk_buff *skb, struct foe_entry *entry);
extern unsigned int ipidbg[num_possible_cpus()][10];
extern unsigned int ipidbg2[num_possible_cpus()][10];
/* #define HNAT_IPI_RXQUEUE 1 */
#define HNAT_IPI_DQ 1
#define HNAT_IPI_HASH_NORMAL 0
#define HNAT_IPI_HASH_VTAG 1
#define HNAT_IPI_HASH_FROM_EXTIF 2
#define HNAT_IPI_HASH_FROM_GMAC 4
struct hnat_ipi_s {
#if defined(HNAT_IPI_DQ)
struct sk_buff_head skb_input_queue;
struct sk_buff_head skb_process_queue;
#elif defined(HNAT_IPI_RXQUEUE)
atomic_t rx_queue_num;
unsigned int rx_queue_ridx;
unsigned int rx_queue_widx;
struct sk_buff **rx_queue;
#else
/* unsigned int dummy0[0]; */
struct sk_buff_head skb_ipi_queue;
/* unsigned int dummy1[8]; */
#endif
unsigned long time_rec, recv_time;
unsigned int ipi_accum;
/*hwnat ipi use*/
spinlock_t ipilock;
struct tasklet_struct smp_func_call_tsk;
} ____cacheline_aligned_in_smp;
struct hnat_ipi_stat {
unsigned long drop_pkt_num_from_extif;
unsigned long drop_pkt_num_from_ppehit;
unsigned int smp_call_cnt_from_extif;
unsigned int smp_call_cnt_from_ppehit;
atomic_t cpu_status;
/* atomic_t cpu_status_from_extif; */
/* atomic_t cpu_status_from_ppehit; */
/* atomic_t hook_status_from_extif; */
/* atomic_t hook_status_from_ppehit; */
} ____cacheline_aligned_in_smp;
#define cpu_status_from_extif cpu_status
#define cpu_status_from_ppehit cpu_status
struct hnat_ipi_cfg {
unsigned int enable_from_extif;
unsigned int enable_from_ppehit;
unsigned int queue_thresh_from_extif;
unsigned int queue_thresh_from_ppehit;
unsigned int drop_pkt_from_extif;
unsigned int drop_pkt_from_ppehit;
unsigned int ipi_cnt_mod_from_extif;
unsigned int ipi_cnt_mod_from_ppehit;
} ____cacheline_aligned_in_smp;
int hnat_ipi_init(void);
int hnat_ipi_de_init(void);
#endif
#define QDMA_RX 5
#define PDMA_RX 0
#endif

View File

@ -0,0 +1,22 @@
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5ba1c72f..f4239459 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -69,6 +69,7 @@
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
+#include <net/ra_nat.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
@@ -1666,6 +1667,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
+ /*headroom copy*/
+ memcpy(data, skb->head, FOE_INFO_LEN);
+
/*
* if shinfo is shared we must drop the old head gracefully, but if it
* is not we can just drop the old head and let the existing refcount

View File

@ -0,0 +1,127 @@
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 3d73c0c..960ade1 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -92,9 +92,12 @@ struct flow_offload {
#define FLOW_OFFLOAD_PATH_VLAN BIT(1)
#define FLOW_OFFLOAD_PATH_PPPOE BIT(2)
#define FLOW_OFFLOAD_PATH_DSA BIT(3)
+#define FLOW_OFFLOAD_PATH_DSLITE BIT(4)
+#define FLOW_OFFLOAD_PATH_6RD BIT(5)
struct flow_offload_hw_path {
struct net_device *dev;
+ struct net_device *virt_dev;
u32 flags;
u8 eth_src[ETH_ALEN];
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index be6801524..c51af70f6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -761,6 +761,7 @@ static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path)
path->flags |= FLOW_OFFLOAD_PATH_VLAN;
path->vlan_proto = vlan->vlan_proto;
path->vlan_id = vlan->vlan_id;
+ path->virt_dev = dev;
path->dev = vlan->real_dev;
if (vlan->real_dev->netdev_ops->ndo_flow_offload_check)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1b7e3141c..da4e34f74 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,11 @@
#include <net/netns/generic.h>
#include <net/dst_metadata.h>
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_flow_table.h>
+#endif
+
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
MODULE_LICENSE("GPL");
@@ -1880,6 +1885,22 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
}
EXPORT_SYMBOL(ip6_tnl_get_iflink);
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
+{
+ struct net_device *dev = path->dev;
+ struct ip6_tnl *tnl = netdev_priv(dev);
+
+ if (path->flags & FLOW_OFFLOAD_PATH_DSLITE)
+ return -EEXIST;
+
+ path->flags |= FLOW_OFFLOAD_PATH_DSLITE;
+ path->dev = tnl->dev;
+
+ return 0;
+}
+#endif /* CONFIG_NF_FLOW_TABLE */
+
int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
unsigned int num)
{
@@ -1941,6 +1962,9 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats = ip6_get_stats,
.ndo_get_iflink = ip6_tnl_get_iflink,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ .ndo_flow_offload_check = ipip6_dev_flow_offload_check,
+#endif
};
#define IPXIPX_FEATURES (NETIF_F_SG | \
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 98954830c..42b6e8c4c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -52,6 +52,11 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_flow_table.h>
+#endif
+
/*
This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
@@ -1345,6 +1350,22 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
+{
+ struct net_device *dev = path->dev;
+ struct ip_tunnel *tnl = netdev_priv(dev);
+
+ if (path->flags & FLOW_OFFLOAD_PATH_6RD)
+ return -EEXIST;
+
+ path->flags |= FLOW_OFFLOAD_PATH_6RD;
+ path->dev = tnl->dev;
+
+ return 0;
+}
+#endif /* CONFIG_NF_FLOW_TABLE */
+
static const struct net_device_ops ipip6_netdev_ops = {
.ndo_init = ipip6_tunnel_init,
.ndo_uninit = ipip6_tunnel_uninit,
@@ -1352,6 +1373,9 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ .ndo_flow_offload_check = ipip6_dev_flow_offload_check,
+#endif
};
static void ipip6_dev_free(struct net_device *dev)

View File

@ -0,0 +1,176 @@
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1354,9 +1354,21 @@ static int mtk_poll_rx(struct napi_struc
skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- (trxd.rxd2 & RX_DMA_VTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
+ (trxd.rxd2 & RX_DMA_VTAG)) {
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd3)),
+ RX_DMA_TCI(trxd.rxd3));
+
+ /* If netdev is attached to dsa switch, the special
+ * tag inserted in VLAN field by switch hardware can
+ * be offload by RX HW VLAN offload. Clears the VLAN
+ * information from @skb to avoid unexpected 8021d
+ * handler before packet enter dsa framework.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
if (mtk_offload_check_rx(eth, skb, trxd.rxd4) == 0) {
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -2050,19 +2062,32 @@ static netdev_features_t mtk_fix_feature
}
}
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) && netdev_uses_dsa(dev)) {
+ netdev_info(dev, "TX vlan offload cannot be enabled when dsa is attached.\n");
+
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
return features;
}
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
int err = 0;
- if (!((dev->features ^ features) & NETIF_F_LRO))
+ if (!((dev->features ^ features) & MTK_SET_FEATURES))
return 0;
if (!(features & NETIF_F_LRO))
mtk_hwlro_netdev_disable(dev);
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+ else
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
return err;
}
@@ -2326,6 +2351,15 @@ static int mtk_open(struct net_device *d
mtk_gdm_config(eth, gdm_config);
+ /* Indicates CDM to parse the MTK special tag from CPU */
+ if (netdev_uses_dsa(dev)) {
+ u32 val;
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+ }
+
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
@@ -2500,7 +2534,7 @@ static void mtk_dim_tx(struct work_struc
static int mtk_hw_init(struct mtk_eth *eth)
{
- int i, val, ret;
+ int i, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
return 0;
@@ -2555,12 +2589,6 @@ static int mtk_hw_init(struct mtk_eth *e
for (i = 0; i < MTK_MAC_COUNT; i++)
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
- /* Indicates CDM to parse the MTK special tag from CPU
- * which also is working out for untag packets.
- */
- val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
- mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
-
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -42,6 +42,8 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
+#define MTK_SET_FEATURES (NETIF_F_LRO | \
+ NETIF_F_HW_VLAN_CTAG_RX)
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
@@ -78,6 +80,10 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
@@ -307,7 +313,9 @@
#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
+#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff)
/* QDMA descriptor rxd4 */
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -73,22 +73,28 @@ static struct sk_buff *mtk_tag_rcv(struc
bool is_multicast_skb = is_multicast_ether_addr(dest) &&
!is_broadcast_ether_addr(dest);
- if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
- return NULL;
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ hdr = ntohs(skb->vlan_proto);
+ skb->vlan_proto = 0;
+ skb->vlan_tci = 0;
+ } else {
+ if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+ return NULL;
- /* The MTK header is added by the switch between src addr
- * and ethertype at this point, skb->data points to 2 bytes
- * after src addr so header should be 2 bytes right before.
- */
- phdr = (__be16 *)(skb->data - 2);
- hdr = ntohs(*phdr);
+ /* The MTK header is added by the switch between src addr
+ * and ethertype at this point, skb->data points to 2 bytes
+ * after src addr so header should be 2 bytes right before.
+ */
+ phdr = (__be16 *)(skb->data - 2);
+ hdr = ntohs(*phdr);
- /* Remove MTK tag and recalculate checksum. */
- skb_pull_rcsum(skb, MTK_HDR_LEN);
+ /* Remove MTK tag and recalculate checksum. */
+ skb_pull_rcsum(skb, MTK_HDR_LEN);
- memmove(skb->data - ETH_HLEN,
- skb->data - ETH_HLEN - MTK_HDR_LEN,
- 2 * ETH_ALEN);
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - MTK_HDR_LEN,
+ 2 * ETH_ALEN);
+ }
/* Get source port information */
port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);

View File

@ -0,0 +1,41 @@
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -125,6 +125,7 @@
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/mtk/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,6 +73,7 @@
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_RALINK) += ralink/
+obj-$(CONFIG_NET_VENDOR_RAW_MEDIATEK) += mtk/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -577,6 +577,8 @@
the Reduced Gigabit Media Independent Interface(RGMII) between
Ethernet physical media devices and the Gigabit Ethernet controller.
+source "drivers/net/phy/mtk/mt753x/Kconfig"
+
endif # PHYLIB
config MICREL_KS8995MA
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -99,6 +99,7 @@
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_MT753X_GSW) += mtk/mt753x/
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o