Files
wlan-ap/backports/0008-realtek-update-to-latest-owrt-HEAD.patch
John Crispin 5477419fd7 realtek: fix DMA bringup
A kernel option was missing.

Signed-off-by: John Crispin <john@phrozen.org>
2021-09-15 08:18:02 +02:00

20563 lines
641 KiB
Diff

From b3305eb14bf4daabee1e49eafcb04a856744db99 Mon Sep 17 00:00:00 2001
From: John Crispin <john@phrozen.org>
Date: Sat, 4 Sep 2021 05:42:30 +0200
Subject: [PATCH 01/66] realtek: update to latest owrt HEAD
Signed-off-by: John Crispin <john@phrozen.org>
---
.../realtek/base-files/etc/board.d/01_leds | 1 -
.../realtek/base-files/etc/board.d/02_network | 12 +-
.../lib/preinit/05_set_preinit_iface_realtek | 13 -
.../lib/preinit/98_remove_preinit_realtek | 6 -
target/linux/realtek/config-5.4 | 31 +-
.../realtek/dts/rtl8380_netgear_gigabit.dtsi | 11 +-
.../realtek/dts/rtl8380_zyxel_gs1900-10hp.dts | 4 +-
.../realtek/dts/rtl8380_zyxel_gs1900-8.dts | 12 -
.../realtek/dts/rtl8380_zyxel_gs1900.dtsi | 1 +
.../dts/rtl8382_allnet_all-sg8208m.dts | 1 +
.../dts/rtl8382_d-link_dgs-1210-10p.dts | 1 +
.../realtek/dts/rtl8382_d-link_dgs-1210.dtsi | 1 +
.../dts/rtl8392_edgecore_ecs4100-12ph.dts | 301 ++
target/linux/realtek/dts/rtl839x.dtsi | 197 ++
.../include/asm/mach-rtl838x/mach-rtl83xx.h | 30 +-
.../files-5.4/drivers/gpio/gpio-rtl838x.c | 119 +-
.../files-5.4/drivers/net/dsa/rtl83xx/Kconfig | 2 +-
.../drivers/net/dsa/rtl83xx/Makefile | 2 +-
.../drivers/net/dsa/rtl83xx/common.c | 1065 +++++-
.../drivers/net/dsa/rtl83xx/debugfs.c | 392 ++-
.../files-5.4/drivers/net/dsa/rtl83xx/dsa.c | 1363 +++++--
.../drivers/net/dsa/rtl83xx/rtl838x.c | 1749 ++++++++-
.../drivers/net/dsa/rtl83xx/rtl838x.h | 472 ---
.../drivers/net/dsa/rtl83xx/rtl839x.c | 1617 ++++++++-
.../drivers/net/dsa/rtl83xx/rtl83xx.h | 16 +-
.../drivers/net/dsa/rtl83xx/rtl930x.c | 2065 ++++++++++-
.../drivers/net/dsa/rtl83xx/rtl931x.c | 191 +-
.../files-5.4/drivers/net/dsa/rtl83xx/tc.c | 406 +++
.../drivers/net/ethernet/rtl838x_eth.c | 352 +-
.../drivers/net/ethernet/rtl838x_eth.h | 3 +-
.../files-5.4/drivers/net/phy/rtl83xx-phy.c | 1078 ++++--
.../realtek/files-5.4/include/linux/rtl838x.h | 1072 ++++++
.../realtek/files-5.4/net/dsa/tag_rtl83xx.c | 119 +
target/linux/realtek/image/Makefile | 44 +-
.../realtek/patches-5.4/100-dsa-lag.patch | 3123 +++++++++++++++++
.../realtek/patches-5.4/101-brflood-api.patch | 817 +++++
...0-gpio-Add-Realtek-Otto-GPIO-support.patch | 405 +++
...nclude-linux-add-phy-ops-for-rtl838x.patch | 2 +-
...04-include-linux-add-phy-hsgmii-mode.patch | 19 +
.../realtek/patches-5.4/706-sysled.patch | 288 ++
.../patches-5.4/707-dsa-trailer-hack.patch | 44 +
.../realtek/patches-5.4/707-reboot.patch | 7 +
.../patches-5.4/708-dsa-backports.patch | 239 ++
.../realtek/patches-5.4/710-adt7470.patch | 20 +
.../realtek/patches-5.4/711-ec4100.patch | 150 +
.../linux/realtek/patches-5.4/712-fixes.patch | 23 +
46 files changed, 16263 insertions(+), 1623 deletions(-)
delete mode 100644 target/linux/realtek/base-files/lib/preinit/05_set_preinit_iface_realtek
delete mode 100644 target/linux/realtek/base-files/lib/preinit/98_remove_preinit_realtek
delete mode 100644 target/linux/realtek/dts/rtl8380_zyxel_gs1900-8.dts
create mode 100644 target/linux/realtek/dts/rtl8392_edgecore_ecs4100-12ph.dts
create mode 100644 target/linux/realtek/dts/rtl839x.dtsi
delete mode 100644 target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.h
create mode 100644 target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/tc.c
create mode 100644 target/linux/realtek/files-5.4/include/linux/rtl838x.h
create mode 100644 target/linux/realtek/files-5.4/net/dsa/tag_rtl83xx.c
create mode 100644 target/linux/realtek/patches-5.4/100-dsa-lag.patch
create mode 100644 target/linux/realtek/patches-5.4/101-brflood-api.patch
create mode 100644 target/linux/realtek/patches-5.4/500-gpio-Add-Realtek-Otto-GPIO-support.patch
create mode 100644 target/linux/realtek/patches-5.4/704-include-linux-add-phy-hsgmii-mode.patch
create mode 100644 target/linux/realtek/patches-5.4/706-sysled.patch
create mode 100644 target/linux/realtek/patches-5.4/707-dsa-trailer-hack.patch
create mode 100644 target/linux/realtek/patches-5.4/707-reboot.patch
create mode 100644 target/linux/realtek/patches-5.4/708-dsa-backports.patch
create mode 100644 target/linux/realtek/patches-5.4/710-adt7470.patch
create mode 100644 target/linux/realtek/patches-5.4/711-ec4100.patch
create mode 100644 target/linux/realtek/patches-5.4/712-fixes.patch
diff --git a/target/linux/realtek/base-files/etc/board.d/01_leds b/target/linux/realtek/base-files/etc/board.d/01_leds
index 699ab817dd..36ca01a696 100755
--- a/target/linux/realtek/base-files/etc/board.d/01_leds
+++ b/target/linux/realtek/base-files/etc/board.d/01_leds
@@ -1,5 +1,4 @@
#!/bin/sh
-
. /lib/functions/uci-defaults.sh
board=$(board_name)
diff --git a/target/linux/realtek/base-files/etc/board.d/02_network b/target/linux/realtek/base-files/etc/board.d/02_network
index 45ab84ee9e..7a53e2ca9e 100755
--- a/target/linux/realtek/base-files/etc/board.d/02_network
+++ b/target/linux/realtek/base-files/etc/board.d/02_network
@@ -19,12 +19,11 @@ board=$(board_name)
board_config_update
lan_list=""
-for lan in /sys/class/net/lan*; do
- lan_list="$lan_list $(basename $lan)"
+for lan in $(ls -d /sys/class/net/lan* | cut -dn -f3 |sort -n); do
+ lan_list="$lan_list lan$lan"
done
ucidef_set_bridge_device switch
ucidef_set_interface_wan "$lan_list"
-ucidef_set_interface "lan" device "lan1:t" protocol "static" vlan 100
lan_mac=""
wan_mac=""
@@ -32,13 +31,11 @@ label_mac=""
case $board in
*)
wan_mac=$(mtd_get_mac_ascii u-boot-env ethaddr)
+ lan_mac=$(macaddr_add $wan_mac 1)
label_mac=$lan_mac
;;
esac
-lan_mac=$(macaddr_setbit_la $wan_mac)
-
-ucidef_set_interface_macaddr "lan" $lan_mac
ucidef_set_interface_macaddr "wan" $wan_mac
ucidef_set_bridge_mac "$wan_mac"
ucidef_set_network_device_mac eth0 $wan_mac
@@ -49,6 +46,9 @@ done
[ -n "$label_mac" ] && ucidef_set_label_macaddr $label_mac
case $board in
+edgecore,ecs4100-12ph)
+ ucidef_set_poe 130 "lan1 lan2 lan3 lan4 lan5 lan6 lan7 lan8"
+ ;;
netgear,gs110tpp-v1)
ucidef_set_poe 130 "$lan_list"
;;
diff --git a/target/linux/realtek/base-files/lib/preinit/05_set_preinit_iface_realtek b/target/linux/realtek/base-files/lib/preinit/05_set_preinit_iface_realtek
deleted file mode 100644
index e2a7cf5a69..0000000000
--- a/target/linux/realtek/base-files/lib/preinit/05_set_preinit_iface_realtek
+++ /dev/null
@@ -1,13 +0,0 @@
-set_preinit_iface() {
-
- # Create a switch on lan1 to configure the VLAN 1.
- # Without configuring VLAN ID 1 RTL8380 - RTL9300 will not
- # forward packets.
- ip link add name switch type bridge vlan_filtering 1
- ip link set dev lan1 master switch
- ip link set lan1 up
-
- pi_ifname=switch
-}
-
-boot_hook_add preinit_main set_preinit_iface
diff --git a/target/linux/realtek/base-files/lib/preinit/98_remove_preinit_realtek b/target/linux/realtek/base-files/lib/preinit/98_remove_preinit_realtek
deleted file mode 100644
index dc5fdb059b..0000000000
--- a/target/linux/realtek/base-files/lib/preinit/98_remove_preinit_realtek
+++ /dev/null
@@ -1,6 +0,0 @@
-remove_switch() {
- # delete switch created in 05_set_preinit_iface_realtek again
- ip link del name switch
-}
-
-boot_hook_add preinit_main remove_switch
diff --git a/target/linux/realtek/config-5.4 b/target/linux/realtek/config-5.4
index 51025d5d89..cba448acb6 100644
--- a/target/linux/realtek/config-5.4
+++ b/target/linux/realtek/config-5.4
@@ -2,17 +2,18 @@ CONFIG_ARCH_32BIT_OFF_T=y
CONFIG_ARCH_CLOCKSOURCE_DATA=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_MMAP_RND_BITS_MAX=15
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_CEVT_R4K=y
-CONFIG_CLONE_BACKWARDS=y
-CONFIG_COMPAT_32BIT_TIME=y
-CONFIG_HAVE_CLK=y
CONFIG_CLKDEV_LOOKUP=y
+CONFIG_CLKSRC_MMIO=y
+CONFIG_CLONE_BACKWARDS=y
CONFIG_COMMON_CLK=y
CONFIG_COMMON_CLK_BOSTON=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_CPU_GENERIC_DUMP_TLB=y
@@ -40,14 +41,11 @@ CONFIG_DMA_NONCOHERENT_CACHE_SYNC=y
CONFIG_DTC=y
CONFIG_EARLY_PRINTK=y
CONFIG_EARLY_PRINTK_8250=y
-CONFIG_EFI_EARLYCON=y
CONFIG_ETHERNET_PACKET_MANGLE=y
CONFIG_EXTRA_FIRMWARE="rtl838x_phy/rtl838x_8214fc.fw rtl838x_phy/rtl838x_8218b.fw rtl838x_phy/rtl838x_8380.fw"
CONFIG_EXTRA_FIRMWARE_DIR="firmware"
CONFIG_FIXED_PHY=y
-CONFIG_FONT_8x16=y
-CONFIG_FONT_AUTOSELECT=y
-CONFIG_FONT_SUPPORT=y
+CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_FW_LOADER_PAGED_BUF=y
CONFIG_GENERIC_ATOMIC64=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -74,7 +72,9 @@ CONFIG_GENERIC_TIME_VSYSCALL=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_RTL8231=y
CONFIG_GPIO_RTL838X=y
-CONFIG_REALTEK_SOC_PHY=y
+CONFIG_GPIO_REALTEK_OTTO=y
+CONFIG_GPIO_WATCHDOG=y
+# CONFIG_GPIO_WATCHDOG_ARCH_INITCALL is not set
CONFIG_GRO_CELLS=y
CONFIG_HANDLE_DOMAIN_IRQ=y
CONFIG_HARDWARE_WATCHPOINTS=y
@@ -89,6 +89,7 @@ CONFIG_HZ_PERIODIC=y
CONFIG_I2C=y
CONFIG_I2C_ALGOBIT=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
CONFIG_I2C_GPIO=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_IRQCHIP=y
@@ -107,6 +108,8 @@ CONFIG_MDIO_BUS=y
CONFIG_MDIO_DEVICE=y
CONFIG_MDIO_I2C=y
CONFIG_MEMFD_CREATE=y
+CONFIG_MFD_CORE=y
+CONFIG_MFD_REALTEK_EIO=y
CONFIG_MFD_SYSCON=y
CONFIG_MIGRATION=y
CONFIG_MIPS=y
@@ -138,7 +141,7 @@ CONFIG_NEED_PER_CPU_KM=y
CONFIG_NET_DEVLINK=y
CONFIG_NET_DSA=y
CONFIG_NET_DSA_RTL83XX=y
-CONFIG_NET_DSA_TAG_TRAILER=y
+CONFIG_NET_DSA_TAG_RTL83XX=y
CONFIG_NET_RTL838X=y
CONFIG_NET_SWITCHDEV=y
CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y
@@ -161,12 +164,17 @@ CONFIG_PINCTRL=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_PSB6970_PHY=y
+CONFIG_RATIONAL=y
CONFIG_REALTEK_PHY=y
+CONFIG_REALTEK_SOC_PHY=y
CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
CONFIG_REGMAP_MMIO=y
CONFIG_RESET_CONTROLLER=y
CONFIG_RTL838X=y
CONFIG_RTL9300_TIMER=y
+CONFIG_SENSORS_GPIO_FAN=y
+CONFIG_SENSORS_LM75=y
CONFIG_SERIAL_MCTRL_GPIO=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SFP=y
@@ -175,7 +183,7 @@ CONFIG_SPI_MASTER=y
CONFIG_SPI_MEM=y
CONFIG_SPI_RTL838X=y
CONFIG_SRCU=y
-CONFIG_SWAP_IO_SPACE=y
+CONFIG_SWCONFIG=y
CONFIG_SWPHY=y
CONFIG_SYSCTL_EXCEPTION_TRACE=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
@@ -187,8 +195,11 @@ CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
CONFIG_SYS_SUPPORTS_MIPS16=y
CONFIG_TARGET_ISA_REV=2
CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
CONFIG_TINY_SRCU=y
CONFIG_USE_GENERIC_EARLY_PRINTK_8250=y
CONFIG_USE_OF=y
+CONFIG_WATCHDOG_CORE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_ZLIB_INFLATE=y
diff --git a/target/linux/realtek/dts/rtl8380_netgear_gigabit.dtsi b/target/linux/realtek/dts/rtl8380_netgear_gigabit.dtsi
index 0d34ca5dc2..2cda3c15a3 100644
--- a/target/linux/realtek/dts/rtl8380_netgear_gigabit.dtsi
+++ b/target/linux/realtek/dts/rtl8380_netgear_gigabit.dtsi
@@ -47,30 +47,31 @@
#size-cells = <1>;
partition@0 {
- label = "u-boot";
+ label = "loader";
reg = <0x0000000 0x00e0000>;
read-only;
};
partition@e0000 {
- label = "u-boot-env";
+ label = "bdinfo";
reg = <0x00e0000 0x0010000>;
read-only;
};
partition@f0000 {
- label = "u-boot-env2";
+ label = "sysinfo";
reg = <0x00f0000 0x0010000>;
+ read-only;
};
partition@100000 {
- label = "jffs";
+ label = "jffs2_cfg";
reg = <0x0100000 0x0100000>;
read-only;
};
partition@200000 {
- label = "jffs2";
+ label = "jffs2_log";
reg = <0x0200000 0x0100000>;
read-only;
};
diff --git a/target/linux/realtek/dts/rtl8380_zyxel_gs1900-10hp.dts b/target/linux/realtek/dts/rtl8380_zyxel_gs1900-10hp.dts
index c16028788e..a590450055 100644
--- a/target/linux/realtek/dts/rtl8380_zyxel_gs1900-10hp.dts
+++ b/target/linux/realtek/dts/rtl8380_zyxel_gs1900-10hp.dts
@@ -17,7 +17,7 @@
};
sfp0: sfp-p9 {
- compatible = "sff,sfp";
+ compatible = "_sff,sfp";
i2c-bus = <&i2c0>;
los-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
tx-fault-gpio = <&gpio1 22 GPIO_ACTIVE_HIGH>;
@@ -36,7 +36,7 @@
};
sfp1: sfp-p10 {
- compatible = "sff,sfp";
+ compatible = "_sff,sfp";
i2c-bus = <&i2c1>;
los-gpio = <&gpio1 33 GPIO_ACTIVE_HIGH>;
tx-fault-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
diff --git a/target/linux/realtek/dts/rtl8380_zyxel_gs1900-8.dts b/target/linux/realtek/dts/rtl8380_zyxel_gs1900-8.dts
deleted file mode 100644
index e9c5efe603..0000000000
--- a/target/linux/realtek/dts/rtl8380_zyxel_gs1900-8.dts
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "rtl8380_zyxel_gs1900.dtsi"
-
-/ {
- compatible = "zyxel,gs1900-8", "realtek,rtl838x-soc";
- model = "ZyXEL GS1900-8 Switch";
-};
-
-&gpio1 {
- /delete-node/ poe_enable;
-};
diff --git a/target/linux/realtek/dts/rtl8380_zyxel_gs1900.dtsi b/target/linux/realtek/dts/rtl8380_zyxel_gs1900.dtsi
index d61ac3b2b8..c4441ec30e 100644
--- a/target/linux/realtek/dts/rtl8380_zyxel_gs1900.dtsi
+++ b/target/linux/realtek/dts/rtl8380_zyxel_gs1900.dtsi
@@ -79,6 +79,7 @@
partition@50000 {
label = "u-boot-env2";
reg = <0x50000 0x10000>;
+ read-only;
};
partition@60000 {
label = "jffs";
diff --git a/target/linux/realtek/dts/rtl8382_allnet_all-sg8208m.dts b/target/linux/realtek/dts/rtl8382_allnet_all-sg8208m.dts
index fdcc01fdac..681d699e8a 100644
--- a/target/linux/realtek/dts/rtl8382_allnet_all-sg8208m.dts
+++ b/target/linux/realtek/dts/rtl8382_allnet_all-sg8208m.dts
@@ -79,6 +79,7 @@
partition@90000 {
label = "u-boot-env2";
reg = <0x90000 0x10000>;
+ read-only;
};
partition@a0000 {
diff --git a/target/linux/realtek/dts/rtl8382_d-link_dgs-1210-10p.dts b/target/linux/realtek/dts/rtl8382_d-link_dgs-1210-10p.dts
index e2f5e7a4c0..a2ebdad11b 100644
--- a/target/linux/realtek/dts/rtl8382_d-link_dgs-1210-10p.dts
+++ b/target/linux/realtek/dts/rtl8382_d-link_dgs-1210-10p.dts
@@ -77,6 +77,7 @@
partition@c0000 {
label = "u-boot-env2";
reg = <0x000c0000 0x40000>;
+ read-only;
};
partition@280000 {
label = "firmware";
diff --git a/target/linux/realtek/dts/rtl8382_d-link_dgs-1210.dtsi b/target/linux/realtek/dts/rtl8382_d-link_dgs-1210.dtsi
index a14738c8a9..e41c6f9f22 100644
--- a/target/linux/realtek/dts/rtl8382_d-link_dgs-1210.dtsi
+++ b/target/linux/realtek/dts/rtl8382_d-link_dgs-1210.dtsi
@@ -61,6 +61,7 @@
partition@c0000 {
label = "u-boot-env2";
reg = <0x000c0000 0x40000>;
+ read-only;
};
partition@280000 {
label = "firmware";
diff --git a/target/linux/realtek/dts/rtl8392_edgecore_ecs4100-12ph.dts b/target/linux/realtek/dts/rtl8392_edgecore_ecs4100-12ph.dts
new file mode 100644
index 0000000000..7de5c5f053
--- /dev/null
+++ b/target/linux/realtek/dts/rtl8392_edgecore_ecs4100-12ph.dts
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "rtl839x.dtsi"
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "edgecore,ecs4100-12ph", "realtek,rtl838x-soc";
+ model = "Edgecore ECS4100-12PH Switch";
+
+ aliases {
+ led-boot = &led_sys;
+ led-failsafe = &led_sys;
+ led-running = &led_sys;
+ led-upgrade = &led_sys;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x10000000>;
+ };
+
+ /* i2c of the left SFP cage: port 9 */
+ i2c0: i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 7 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ sfp0: sfp-p9 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c0>;
+ los-gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ };
+
+ i2c1: i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio1 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ sfp1: sfp-p10 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c1>;
+ los-gpio = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ };
+
+ i2c2: i2c-gpio-2 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio1 22 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 23 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ sfp2: sfp-p11 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c2>;
+ los-gpio = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpio1 24 GPIO_ACTIVE_LOW>;
+ };
+
+ i2c3: i2c-gpio-3 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio1 11 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ sfp3: sfp-p12 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c3>;
+ los-gpio = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpio = <&gpio1 13 GPIO_ACTIVE_LOW>;
+ };
+
+ i2c4: i2c-gpio-4 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio1 29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adt7470@2f {
+ compatible = "adi,adt7470";
+ reg = <0x2f>;
+ };
+
+ lm75b@48 {
+ compatible = "nxp,lm75a";
+ reg = <0x48>;
+ };
+
+ eeprom@506 {
+ compatible = "atmel,24c32";
+ reg = <0x56>;
+ };
+ };
+
+ watchdog {
+ compatible = "linux,wdt-gpio";
+ gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
+ hw_algo = "toggle";
+ hw_margin_ms = <1200>;
+ };
+
+ reboot@0 {
+ compatible = "edgecore,reboot";
+ gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan0: gpio-fan {
+ #cooling-cells = <2>;
+ compatible = "gpio-fan";
+ gpio-fan,speed-map = <0 0 3000 1>;
+ gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ status = "okay";
+ };
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&gpio0 {
+ poe_enable {
+ gpio-hog;
+ gpios = <16 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+
+ poe_reset {
+ gpio-hog;
+ gpios = <18 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+};
+
+&spi0 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+ partition@100000 {
+ label = "u-boot-env";
+ reg = <0x100000 0x100000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "firmware";
+ reg = <0x200000 0xdf0000>;
+ compatible = "openwrt,uimage", "denx,uimage";
+ };
+ partition@ff0000 {
+ label = "certificates";
+ reg = <0xff0000 0x10000>;
+ };
+ };
+ };
+};
+
+&ethernet0 {
+ mdio: mdio-bus {
+ compatible = "realtek,rtl838x-mdio";
+ regmap = <&ethernet0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ INTERNAL_PHY(0)
+ INTERNAL_PHY(1)
+ INTERNAL_PHY(2)
+ INTERNAL_PHY(3)
+ INTERNAL_PHY(4)
+ INTERNAL_PHY(5)
+ INTERNAL_PHY(6)
+ INTERNAL_PHY(7)
+
+ phy48: ethernet-phy@48 {
+ reg = <48>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ sfp = <&sfp0>;
+ };
+
+ phy49: ethernet-phy@49 {
+ reg = <49>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ sfp = <&sfp1>;
+ };
+
+ phy50: ethernet-phy@50 {
+ reg = <50>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ sfp = <&sfp2>;
+ };
+
+ phy51: ethernet-phy@51 {
+ reg = <51>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ sfp = <&sfp3>;
+ };
+ };
+};
+
+&switch0 {
+ ext_io: ext-io@e4 {
+ compatible = "realtek,rtl8390-eio", "syscon";
+ reg = <0xe4 0x17c>;
+
+ led_sys: sys-led {
+ active-low;
+ label = "green:status";
+ linux,default-trigger = "default-on";
+ };
+ };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ SWITCH_PORT(0, 1, qsgmii)
+ SWITCH_PORT(1, 2, qsgmii)
+ SWITCH_PORT(2, 3, qsgmii)
+ SWITCH_PORT(3, 4, qsgmii)
+ SWITCH_PORT(4, 5, qsgmii)
+ SWITCH_PORT(5, 6, qsgmii)
+ SWITCH_PORT(6, 7, qsgmii)
+ SWITCH_PORT(7, 8, qsgmii)
+
+ port@48 {
+ reg = <48>;
+ label = "lan9";
+ phy-mode = "sgmii";
+ phy-handle = <&phy48>;
+ managed = "in-band-status";
+ };
+
+ port@49 {
+ reg = <49>;
+ label = "lan10";
+ phy-mode = "sgmii";
+ phy-handle = <&phy49>;
+ managed = "in-band-status";
+ };
+
+ port@50 {
+ reg = <50>;
+ label = "lan11";
+ phy-mode = "sgmii";
+ phy-handle = <&phy50>;
+ managed = "in-band-status";
+ };
+
+ port@51 {
+ reg = <51>;
+ label = "lan12";
+ phy-mode = "sgmii";
+ phy-handle = <&phy51>;
+ managed = "in-band-status";
+ };
+
+ port@52 {
+ ethernet = <&ethernet0>;
+ reg = <52>;
+ phy-mode = "qsgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+};
diff --git a/target/linux/realtek/dts/rtl839x.dtsi b/target/linux/realtek/dts/rtl839x.dtsi
new file mode 100644
index 0000000000..9816b2a09b
--- /dev/null
+++ b/target/linux/realtek/dts/rtl839x.dtsi
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+/dts-v1/;
+
+#define STRINGIZE(s) #s
+#define LAN_LABEL(p, s) STRINGIZE(p ## s)
+#define SWITCH_PORT_LABEL(n) LAN_LABEL(lan, n)
+
+#define INTERNAL_PHY(n) \
+ phy##n: ethernet-phy@##n { \
+ reg = <##n>; \
+ compatible = "ethernet-phy-ieee802.3-c22"; \
+ phy-is-integrated; \
+ };
+
+#define EXTERNAL_PHY(n) \
+ phy##n: ethernet-phy@##n { \
+ reg = <##n>; \
+ compatible = "ethernet-phy-ieee802.3-c22"; \
+ };
+
+#define EXTERNAL_SFP_PHY(n) \
+ phy##n: ethernet-phy@##n { \
+ compatible = "ethernet-phy-ieee802.3-c22"; \
+ sfp; \
+ media = "fibre"; \
+ reg = <##n>; \
+ };
+
+#define SWITCH_PORT(n, s, m) \
+ port@##n { \
+ reg = <##n>; \
+ label = SWITCH_PORT_LABEL(s) ; \
+ phy-handle = <&phy##n>; \
+ phy-mode = #m ; \
+ };
+
+#define SWITCH_SFP_PORT(n, s, m) \
+ port@##n { \
+ reg = <##n>; \
+ label = SWITCH_PORT_LABEL(s) ; \
+ phy-handle = <&phy##n>; \
+ phy-mode = #m ; \
+ fixed-link { \
+ speed = <1000>; \
+ full-duplex; \
+ }; \
+ };
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "realtek,rtl838x-soc";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ frequency = <700000000>;
+
+ cpu@0 {
+ compatible = "mips,mips34Kc";
+ reg = <0>;
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,38400";
+ };
+
+
+ cpuintc: cpuintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: rtlintc {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "realtek,rt8380-intc";
+ reg = <0xb8003000 0x20>;
+ };
+
+ spi0: spi@b8001200 {
+ status = "okay";
+
+ compatible = "realtek,rtl838x-nor";
+ reg = <0xb8001200 0x100>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ uart0: uart@b8002000 {
+ status = "okay";
+
+ compatible = "ns16550a";
+ reg = <0xb8002000 0x100>;
+
+ clock-frequency = <200000000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <31>;
+
+ reg-io-width = <1>;
+ reg-shift = <2>;
+ fifo-size = <1>;
+ no-loopback-test;
+ };
+
+ uart1: uart@b8002100 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&enable_uart1>;
+
+ status = "okay";
+
+ compatible = "ns16550a";
+ reg = <0xb8002100 0x100>;
+
+ clock-frequency = <200000000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <30>;
+
+ reg-io-width = <1>;
+ reg-shift = <2>;
+ fifo-size = <1>;
+ no-loopback-test;
+ };
+
+ gpio0: gpio-controller@b8003500 {
+ compatible = "realtek,rtl8380-gpio", "realtek,otto-gpio";
+ reg = <0xb8003500 0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <24>;
+ };
+
+ gpio1: rtl8231-gpio {
+ status = "disabled";
+ compatible = "realtek,rtl8231-gpio";
+ #gpio-cells = <2>;
+ indirect-access-bus-id = <3>;
+ gpio-controller;
+ };
+
+ pinmux: pinmux@bb001000 {
+ compatible = "pinctrl-single";
+ reg = <0xbb000004 0x4>;
+
+ pinctrl-single,bit-per-mux;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x1>;
+ #pinctrl-cells = <2>;
+
+ enable_uart1: pinmux_enable_uart1 {
+ pinctrl-single,bits = <0x0 0x01 0x03>;
+ };
+ };
+
+ ethernet0: ethernet@bb00a300 {
+ status = "okay";
+
+ compatible = "realtek,rtl838x-eth";
+ reg = <0xbb00a300 0x100>;
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+ #interrupt-cells = <1>;
+ phy-mode = "internal";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ switch0: switch@bb000000 {
+ status = "okay";
+
+ interrupt-parent = <&intc>;
+ interrupts = <20>;
+
+ compatible = "realtek,rtl83xx-switch", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xbb000000 0x10000>;
+
+ };
+};
diff --git a/target/linux/realtek/files-5.4/arch/mips/include/asm/mach-rtl838x/mach-rtl83xx.h b/target/linux/realtek/files-5.4/arch/mips/include/asm/mach-rtl838x/mach-rtl83xx.h
index fc401e5481..fa45401182 100644
--- a/target/linux/realtek/files-5.4/arch/mips/include/asm/mach-rtl838x/mach-rtl83xx.h
+++ b/target/linux/realtek/files-5.4/arch/mips/include/asm/mach-rtl838x/mach-rtl83xx.h
@@ -214,6 +214,13 @@
#define RTL838X_GPIO_PAB_IMR (GPIO_CTRL_REG_BASE + 0x14)
#define RTL838X_GPIO_PC_IMR (GPIO_CTRL_REG_BASE + 0x18)
+#define RTL930X_GPIO_CTRL_REG_BASE ((volatile void *) 0xb8003300)
+#define RTL930X_GPIO_PABCD_DIR (RTL930X_GPIO_CTRL_REG_BASE + 0x8)
+#define RTL930X_GPIO_PABCD_DAT (RTL930X_GPIO_CTRL_REG_BASE + 0xc)
+#define RTL930X_GPIO_PABCD_ISR (RTL930X_GPIO_CTRL_REG_BASE + 0x10)
+#define RTL930X_GPIO_PAB_IMR (RTL930X_GPIO_CTRL_REG_BASE + 0x14)
+#define RTL930X_GPIO_PCD_IMR (RTL930X_GPIO_CTRL_REG_BASE + 0x18)
+
#define RTL838X_MODEL_NAME_INFO (0x00D4)
#define RTL839X_MODEL_NAME_INFO (0x0FF0)
#define RTL93XX_MODEL_NAME_INFO (0x0004)
@@ -313,8 +320,29 @@
#define RTL839X_SMI_PORT_POLLING_CTRL (0x03fc)
#define RTL839X_PHYREG_ACCESS_CTRL (0x03DC)
#define RTL839X_PHYREG_CTRL (0x03E0)
-#define RTL839X_PHYREG_PORT_CTRL(p) (0x03E4 + ((p >> 5) << 2))
+#define RTL839X_PHYREG_PORT_CTRL (0x03E4)
#define RTL839X_PHYREG_DATA_CTRL (0x03F0)
+#define RTL839X_PHYREG_MMD_CTRL (0x3F4)
+
+#define RTL930X_SMI_GLB_CTRL (0xCA00)
+#define RTL930X_SMI_POLL_CTRL (0xca90)
+#define RTL930X_SMI_PORT0_15_POLLING_SEL (0xCA08)
+#define RTL930X_SMI_PORT16_27_POLLING_SEL (0xCA0C)
+#define RTL930X_SMI_PORT0_5_ADDR (0xCB80)
+#define RTL930X_SMI_ACCESS_PHY_CTRL_0 (0xCB70)
+#define RTL930X_SMI_ACCESS_PHY_CTRL_1 (0xCB74)
+#define RTL930X_SMI_ACCESS_PHY_CTRL_2 (0xCB78)
+#define RTL930X_SMI_ACCESS_PHY_CTRL_3 (0xCB7C)
+
+#define RTL931X_SMI_GLB_CTRL1 (0x0CBC)
+#define RTL931X_SMI_GLB_CTRL0 (0x0CC0)
+#define RTL931X_SMI_PORT_POLLING_CTRL (0x0CCC)
+#define RTL931X_SMI_INDRT_ACCESS_CTRL_0 (0x0C00)
+#define RTL931X_SMI_INDRT_ACCESS_CTRL_1 (0x0C04)
+#define RTL931X_SMI_INDRT_ACCESS_CTRL_2 (0x0C08)
+#define RTL931X_SMI_INDRT_ACCESS_CTRL_3 (0x0C10)
+#define RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL (0x0C14)
+#define RTL931X_SMI_INDRT_ACCESS_MMD_CTRL (0xC18)
#define RTL930X_SMI_GLB_CTRL (0xCA00)
#define RTL930X_SMI_POLL_CTRL (0xca90)
diff --git a/target/linux/realtek/files-5.4/drivers/gpio/gpio-rtl838x.c b/target/linux/realtek/files-5.4/drivers/gpio/gpio-rtl838x.c
index 8207e4bb73..2009ebcce9 100644
--- a/target/linux/realtek/files-5.4/drivers/gpio/gpio-rtl838x.c
+++ b/target/linux/realtek/files-5.4/drivers/gpio/gpio-rtl838x.c
@@ -126,6 +126,16 @@ void rtl838x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
__asm__ volatile ("sync");
}
+void rtl930x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ pr_debug("rtl838x_set: %d, value: %d\n", offset, value);
+ /* Internal GPIO of the RTL9300 */
+ if (value)
+ rtl83xx_w32_mask(0, BIT(offset), RTL930X_GPIO_PABCD_DAT);
+ else
+ rtl83xx_w32_mask(BIT(offset), 0, RTL930X_GPIO_PABCD_DAT);
+}
+
static int rtl838x_direction_input(struct gpio_chip *gc, unsigned int offset)
{
pr_debug("%s: %d\n", __func__, offset);
@@ -139,12 +149,30 @@ static int rtl838x_direction_input(struct gpio_chip *gc, unsigned int offset)
return -ENOTSUPP;
}
+static int rtl930x_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ pr_debug("%s: %d\n", __func__, offset);
+
+ rtl83xx_w32_mask(BIT(offset), 0, RTL930X_GPIO_PABCD_DIR);
+ return 0;
+}
+
static int rtl838x_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
{
pr_debug("%s: %d\n", __func__, offset);
if (offset < 32)
rtl83xx_w32_mask(0, BIT(offset), RTL838X_GPIO_PABC_DIR);
- rtl838x_gpio_set(gc, offset, value);
+ rtl930x_gpio_set(gc, offset, value);
+
+ /* LED for PWR and SYS driver is direction output by default */
+ return 0;
+}
+
+static int rtl930x_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ pr_debug("%s: %d\n", __func__, offset);
+ rtl83xx_w32_mask(0, BIT(offset), RTL930X_GPIO_PABCD_DIR);
+ rtl930x_gpio_set(gc, offset, value);
/* LED for PWR and SYS driver is direction output by default */
return 0;
@@ -169,6 +197,16 @@ static int rtl838x_get_direction(struct gpio_chip *gc, unsigned int offset)
return 0;
}
+static int rtl930x_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ u32 v = 0;
+
+ v = rtl83xx_r32(RTL930X_GPIO_PABCD_DIR);
+ if (v & BIT(offset))
+ return 0;
+ return 1;
+}
+
static int rtl838x_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
u32 v;
@@ -192,24 +230,14 @@ static int rtl838x_gpio_get(struct gpio_chip *gc, unsigned int offset)
return 0;
}
-/* BUG:
- bit = (offset - 64) % 32;
- if (offset >= 64 && offset < 96) {
- if (sw_r32(RTL838X_LED1_SW_P_EN_CTRL) & BIT(bit))
- return 1;
- return 0;
- }
- if (offset >= 96 && offset < 128) {
- if (sw_r32(RTL838X_LED1_SW_P_EN_CTRL) & BIT(bit))
- return 1;
- return 0;
- }
- if (offset >= 128 && offset < 160) {
- if (sw_r32(RTL838X_LED1_SW_P_EN_CTRL) & BIT(bit))
- return 1;
- return 0;
- }
- */
+ return 0;
+}
+
+static int rtl930x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ u32 v = rtl83xx_r32(RTL930X_GPIO_PABCD_DAT);
+ if (v & BIT(offset))
+ return 1;
return 0;
}
@@ -319,7 +347,7 @@ static int rtl838x_gpio_probe(struct platform_device *pdev)
struct rtl838x_gpios *gpios;
int err;
- pr_info("Probing RTL838X GPIOs\n");
+ pr_info("Probing RTL838X GPIOs for %08x\n", soc_info.id);
if (!np) {
dev_err(&pdev->dev, "No DT found\n");
@@ -351,6 +379,9 @@ static int rtl838x_gpio_probe(struct platform_device *pdev)
case 0x8393:
pr_debug("Found RTL8393 GPIO\n");
break;
+ case 0x9302:
+ pr_info("Found RTL9302 GPIO\n");
+ break;
default:
pr_err("Unknown GPIO chip id (%04x)\n", gpios->id);
return -ENODEV;
@@ -363,6 +394,7 @@ static int rtl838x_gpio_probe(struct platform_device *pdev)
gpios->led_sw_p_en_ctrl = rtl838x_led_sw_p_en_ctrl;
gpios->ext_gpio_dir = rtl838x_ext_gpio_dir;
gpios->ext_gpio_data = rtl838x_ext_gpio_data;
+ gpios->irq = 31;
}
if (soc_info.family == RTL8390_FAMILY_ID) {
@@ -372,30 +404,46 @@ static int rtl838x_gpio_probe(struct platform_device *pdev)
gpios->led_sw_p_en_ctrl = rtl839x_led_sw_p_en_ctrl;
gpios->ext_gpio_dir = rtl839x_ext_gpio_dir;
gpios->ext_gpio_data = rtl839x_ext_gpio_data;
+ gpios->irq = 31;
+ }
+
+ if (soc_info.family == RTL9300_FAMILY_ID) {
+ gpios->irq = 13;
}
- gpios->dev = dev;
- gpios->gc.base = 0;
- /* 0-31: internal
- * 32-63, LED control register
- * 64-95: PORT-LED 0
- * 96-127: PORT-LED 1
- * 128-159: PORT-LED 2
- */
- gpios->gc.ngpio = 160;
gpios->gc.label = "rtl838x";
gpios->gc.parent = dev;
gpios->gc.owner = THIS_MODULE;
gpios->gc.can_sleep = true;
- gpios->irq = 31;
+ gpios->dev = dev;
+ gpios->gc.base = 0;
- gpios->gc.direction_input = rtl838x_direction_input;
- gpios->gc.direction_output = rtl838x_direction_output;
- gpios->gc.set = rtl838x_gpio_set;
- gpios->gc.get = rtl838x_gpio_get;
- gpios->gc.get_direction = rtl838x_get_direction;
+ if (soc_info.family != RTL9300_FAMILY_ID) {
+ /* 0-31: internal
+ * 32-63, LED control register
+ * 64-95: PORT-LED 0
+ * 96-127: PORT-LED 1
+ * 128-159: PORT-LED 2
+ */
+ gpios->gc.ngpio = 160;
+
+ gpios->gc.direction_input = rtl838x_direction_input;
+ gpios->gc.direction_output = rtl838x_direction_output;
+ gpios->gc.set = rtl838x_gpio_set;
+ gpios->gc.get = rtl838x_gpio_get;
+ gpios->gc.get_direction = rtl838x_get_direction;
+ } else {
+ gpios->gc.ngpio = 32;
+
+ gpios->gc.direction_input = rtl930x_direction_input;
+ gpios->gc.direction_output = rtl930x_direction_output;
+ gpios->gc.set = rtl930x_gpio_set;
+ gpios->gc.get = rtl930x_gpio_get;
+ gpios->gc.get_direction = rtl930x_get_direction;
+ }
if (of_property_read_bool(np, "take-port-leds")) {
+ pr_info("A1\n");
if (of_property_read_u32(np, "leds-per-port", &gpios->leds_per_port))
gpios->leds_per_port = 2;
if (of_property_read_u32(np, "led-mode", &gpios->led_mode))
@@ -408,6 +456,7 @@ static int rtl838x_gpio_probe(struct platform_device *pdev)
}
err = devm_gpiochip_add_data(dev, &gpios->gc, gpios);
+
return err;
}
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Kconfig b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Kconfig
index f293832eb5..05d12bace9 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Kconfig
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Kconfig
@@ -2,7 +2,7 @@
config NET_DSA_RTL83XX
tristate "Realtek RTL838x/RTL839x switch support"
depends on RTL838X
- select NET_DSA_TAG_TRAILER
+ select NET_DSA_TAG_RTL83XX
---help---
This driver adds support for Realtek RTL83xx series switching.
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Makefile b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Makefile
index 016184c3d9..8752c79700 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Makefile
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_RTL83XX) += common.o dsa.o \
- rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o
+ rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o tc.o
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/common.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/common.c
index 6940afa7f2..f16bdb998e 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/common.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/common.c
@@ -2,6 +2,11 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <net/arp.h>
+#include <net/nexthop.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <linux/inetdevice.h>
#include <asm/mach-rtl838x/mach-rtl83xx.h>
#include "rtl83xx.h"
@@ -18,27 +23,6 @@ extern const struct dsa_switch_ops rtl930x_switch_ops;
DEFINE_MUTEX(smi_lock);
-// TODO: unused
-static void dump_fdb(struct rtl838x_switch_priv *priv)
-{
- struct rtl838x_l2_entry e;
- int i;
-
- mutex_lock(&priv->reg_mutex);
-
- for (i = 0; i < priv->fib_entries; i++) {
- priv->r->read_l2_entry_using_hash(i >> 2, i & 0x3, &e);
-
- if (!e.valid) /* Check for invalid entry */
- continue;
-
- pr_debug("-> port %02d: mac %pM, vid: %d, rvid: %d, MC: %d, %d\n",
- e.port, &e.mac[0], e.vid, e.rvid, e.is_ip_mc, e.is_ipv6_mc);
- }
-
- mutex_unlock(&priv->reg_mutex);
-}
-
int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
{
u32 msti = 0;
@@ -144,7 +128,6 @@ void rtl_table_read(struct table_reg *r, int idx)
cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
sw_w32(cmd, r->addr);
- pr_debug("Writing %08x to %x for read\n", cmd, r->addr);
do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
}
@@ -156,8 +139,6 @@ void rtl_table_write(struct table_reg *r, int idx)
u32 cmd = r->rmode ? 0 : BIT(r->c_bit);
cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
- pr_debug("Writing %08x to %x for write, value %08x\n",
- cmd, r->addr, sw_r32(0xb344));
sw_w32(cmd, r->addr);
do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
}
@@ -202,23 +183,27 @@ u64 rtl838x_get_port_reg(int reg)
/* Port register accessor functions for the RTL839x and RTL931X SoCs */
void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
{
- sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
- sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
+ u32 *setmask = (u32*)&set;
+ u32 *clearmask = (u32*)&clear;
+
+ sw_w32_mask(clearmask[0], setmask[0], reg);
+ sw_w32_mask(clearmask[1], setmask[1], reg + 4);
}
u64 rtl839x_get_port_reg_be(int reg)
{
- u64 v = sw_r32(reg);
-
- v <<= 32;
- v |= sw_r32(reg + 4);
+ u64 v;
+ u32 *get = (u32*)&v;
+ get[0]=sw_r32(reg);
+ get[1]=sw_r32(reg + 4);
return v;
}
void rtl839x_set_port_reg_be(u64 set, int reg)
{
- sw_w32(set >> 32, reg);
- sw_w32(set & 0xffffffff, reg + 4);
+ u32 *setmask = (u32*)&set;
+ sw_w32(setmask[0], reg);
+ sw_w32(setmask[1], reg + 4);
}
void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
@@ -326,8 +311,6 @@ static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
if (of_property_read_u32(dn, "reg", &pn))
continue;
- priv->ports[pn].dp = dsa_to_port(priv->ds, pn);
-
// Check for the integrated SerDes of the RTL8380M first
if (of_property_read_bool(dn, "phy-is-integrated")
&& priv->id == 0x8380 && pn >= 24) {
@@ -355,12 +338,10 @@ static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
}
}
- // TODO: Do this needs to come from the .dts, at least the SerDes number
+ // TODO: Do this needs to come from the .dts
if (priv->family_id == RTL9300_FAMILY_ID) {
priv->ports[24].is2G5 = true;
priv->ports[25].is2G5 = true;
- priv->ports[24].sds_num = 1;
- priv->ports[24].sds_num = 2;
}
/* Disable MAC polling the PHY so that we can start configuration */
@@ -370,27 +351,20 @@ static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
if (priv->family_id == RTL8380_FAMILY_ID) {
/* Enable SerDes NWAY and PHY control via SoC */
sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
- } else {
+ } else if (priv->family_id == RTL8390_FAMILY_ID) {
/* Disable PHY polling via SoC */
sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
}
- /* Power on fibre ports and reset them if necessary */
+ /* Power on fibre ports and reset them if necessary
+ * TODO: Put this in rtl83xx_phylink_mac_config ? see rtl93xx_phylink_mac_config
+ */
if (priv->ports[24].phy == PHY_RTL838X_SDS) {
pr_debug("Powering on fibre ports & reset\n");
rtl8380_sds_power(24, 1);
rtl8380_sds_power(26, 1);
}
- // TODO: Only power on SerDes with external PHYs connected
- if (priv->family_id == RTL9300_FAMILY_ID) {
- pr_info("RTL9300 Powering on SerDes ports\n");
- rtl9300_sds_power(24, 1);
- rtl9300_sds_power(25, 1);
- rtl9300_sds_power(26, 1);
- rtl9300_sds_power(27, 1);
- }
-
pr_debug("%s done\n", __func__);
return 0;
}
@@ -412,11 +386,15 @@ static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
}
/* Caller must hold priv->reg_mutex */
-int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port)
+int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port,struct netdev_lag_upper_info *info)
{
struct rtl838x_switch_priv *priv = ds->priv;
int i;
-
+ u32 algomsk = 0;
+ u32 algoidx = 0;
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ return -EINVAL;
+ }
pr_info("%s: Adding port %d to LA-group %d\n", __func__, port, group);
if (group >= priv->n_lags) {
pr_err("Link Agrregation group too large.\n");
@@ -429,18 +407,40 @@ int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port)
}
for (i = 0; i < priv->n_lags; i++) {
- if (priv->lags_port_members[i] & BIT_ULL(i))
+ if (priv->lags_port_members[i] & BIT_ULL(port))
break;
}
if (i != priv->n_lags) {
pr_err("%s: Port already member of LAG: %d\n", __func__, i);
return -ENOSPC;
}
-
+ switch(info->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT; //DMAC
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT; //DMAC
+ break;
+ case NETDEV_LAG_HASH_L23:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT; //DMAC
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT; //DMAC
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algoidx = 1;
+ break;
+ case NETDEV_LAG_HASH_L34:
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algoidx = 2;
+ break;
+ default:
+ algomsk |= 0x7f;
+ }
+ priv->r->set_distribution_algorithm(group, algoidx, algomsk);
priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
priv->lags_port_members[group] |= BIT_ULL(port);
- pr_info("lags_port_members %d now %016llx\n", group, priv->lags_port_members[group]);
+ pr_debug("lags_port_members %d now %016llx\n", group, priv->lags_port_members[group]);
return 0;
}
@@ -450,7 +450,7 @@ int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
struct rtl838x_switch_priv *priv = ds->priv;
pr_info("%s: Removing port %d from LA-group %d\n", __func__, port, group);
-
+
if (group >= priv->n_lags) {
pr_err("Link Agrregation group too large.\n");
return -EINVAL;
@@ -463,11 +463,10 @@ int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
- pr_err("%s: Port not member of LAG: %d\n", __func__, group
- );
+ pr_err("%s: Port not member of LAG: %d\n", __func__, group);
return -ENOSPC;
}
-
+ // 0x7f algo mask all
priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
priv->lags_port_members[group] &= ~BIT_ULL(port);
@@ -475,6 +474,163 @@ int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
return 0;
}
+/*
+ * Allocate a 64 bit octet counter located in the LOG HW table
+ */
+static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
+{
+ int idx;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
+ if (idx >= priv->n_counters) {
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+
+ set_bit(idx, priv->octet_cntr_use_bm);
+ mutex_unlock(&priv->reg_mutex);
+
+ return idx;
+}
+
+/*
+ * Allocate a 32-bit packet counter
+ * 2 32-bit packet counters share the location of a 64-bit octet counter
+ * Initially there are no free packet counters and 2 new ones need to be freed
+ * by allocating the corresponding octet counter
+ */
+int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
+{
+ int idx, j;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Because initially no packet counters are free, the logic is reversed:
+ * a 0-bit means the counter is already allocated (for octets)
+ */
+ idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
+ pr_info("%s got bit %d\n", __func__, idx);
+ if (idx >= priv->n_counters * 2) {
+ j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
+ if (j >= priv->n_counters) {
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+ set_bit(j, priv->octet_cntr_use_bm);
+ idx = j * 2;
+ set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
+
+ } else {
+ clear_bit(idx, priv->packet_cntr_use_bm);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return idx;
+}
+
+
+/*
+ * Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
+ * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
+ * or mark an existing entry as a nexthop by setting it's nexthop bit
+ * Called from the L3 layer
+ * The index in the L2 hash table is filled into nh->l2_id;
+ */
+int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
+{
+ struct rtl838x_l2_entry e;
+ u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
+ u32 key = priv->r->l2_hash_key(priv, seed);
+ int i, idx = -1;
+ u64 entry;
+
+ pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
+ __func__, nh->mac, nh->rvid, key, seed);
+
+ e.type = L2_UNICAST;
+ u64_to_ether_addr(nh->mac, &e.mac[0]);
+ e.port = nh->port;
+
+ // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ for (i = 0; i < priv->l2_bucket_size; i++) {
+ entry = priv->r->read_l2_entry_using_hash(key, i, &e);
+
+ if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
+ idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
+ : ((key << 2) | i) & 0xffff;
+ break;
+ }
+ }
+
+ if (idx < 0) {
+ pr_err("%s: No more L2 forwarding entries available\n", __func__);
+ return -1;
+ }
+
+ // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
+ nh->l2_id = idx;
+ if (e.valid) {
+ nh->port = e.port;
+ nh->vid = e.vid; // Save VID
+ nh->rvid = e.rvid;
+ nh->dev_id = e.stack_dev;
+ // If the entry is already a valid next hop entry, don't change it
+ if (e.next_hop)
+ return 0;
+ } else {
+ e.valid = true;
+ e.is_static = true;
+ e.rvid = nh->rvid;
+ e.is_ip_mc = false;
+ e.is_ipv6_mc = false;
+ e.block_da = false;
+ e.block_sa = false;
+ e.suspended = false;
+ e.age = 0; // With port-ignore
+ e.port = priv->port_ignore;
+ u64_to_ether_addr(nh->mac, &e.mac[0]);
+ }
+ e.next_hop = true;
+ e.nh_route_id = nh->id; // NH route ID takes place of VID
+ e.nh_vlan_target = false;
+
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+
+ return 0;
+}
+
+/*
+ * Removes a Layer 2 next hop entry in the forwarding database
+ * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
+ * and we wait until the entry ages out
+ */
+int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
+{
+ struct rtl838x_l2_entry e;
+ u32 key = nh->l2_id >> 2;
+ int i = nh->l2_id & 0x3;
+ u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
+
+ pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
+ if (!e.valid) {
+ dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
+ return -1;
+ }
+
+ if (e.is_static)
+ e.valid = false;
+ e.next_hop = false;
+ e.vid = nh->vid; // Restore VID
+ e.rvid = nh->rvid;
+
+ priv->r->write_l2_entry_using_hash(key, i, &e);
+
+ return 0;
+}
+#if 0
static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
struct net_device *ndev,
struct netdev_notifier_changeupper_info *info)
@@ -524,6 +680,7 @@ out:
mutex_unlock(&priv->reg_mutex);
return 0;
}
+#endif
static int rtl83xx_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
@@ -540,7 +697,7 @@ static int rtl83xx_netdevice_event(struct notifier_block *this,
priv = container_of(this, struct rtl838x_switch_priv, nb);
switch (event) {
case NETDEV_CHANGEUPPER:
- err = rtl83xx_handle_changeupper(priv, ndev, ptr);
+// err = rtl83xx_handle_changeupper(priv, ndev, ptr);
break;
}
@@ -550,6 +707,707 @@ static int rtl83xx_netdevice_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+const static struct rhashtable_params route_ht_params = {
+ .key_len = sizeof(u32),
+ .key_offset = offsetof(struct rtl83xx_route, gw_ip),
+ .head_offset = offsetof(struct rtl83xx_route, linkage),
+};
+
+/*
+ * Updates an L3 next hop entry in the ROUTING table
+ */
+static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
+{
+ struct rtl83xx_route *r;
+ struct rhlist_head *tmp, *list;
+
+ rcu_read_lock();
+ list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
+ if (!list) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ rhl_for_each_entry_rcu(r, tmp, list, linkage) {
+ pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
+ __func__, &ip_addr, mac);
+
+ // Reads the ROUTING table entry associated with the route
+ priv->r->route_read(r->id, r);
+ pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
+
+ r->nh.mac = r->nh.gw = mac;
+ r->nh.port = priv->port_ignore;
+ r->nh.id = r->id;
+
+ // Do we need to explicitly add a DMAC entry with the route's nh index?
+ if (priv->r->set_l3_egress_mac)
+ priv->r->set_l3_egress_mac(r->id, mac);
+
+ // Update ROUTING table: map gateway-mac and switch-mac id to route id
+ rtl83xx_l2_nexthop_add(priv, &r->nh);
+
+ r->attr.valid = true;
+ r->attr.action = ROUTE_ACT_FORWARD;
+ r->attr.type = 0;
+ r->attr.hit = false; // Reset route-used indicator
+
+ // Add PIE entry with dst_ip and prefix_len
+ r->pr.dip = r->dst_ip;
+ r->pr.dip_m = inet_make_mask(r->prefix_len);
+
+ if (r->is_host_route) {
+ int slot = priv->r->find_l3_slot(r, false);
+
+ pr_info("%s: Got slot for route: %d\n", __func__, slot);
+ priv->r->host_route_write(slot, r);
+ } else {
+ priv->r->route_write(r->id, r);
+ r->pr.fwd_sel = true;
+ r->pr.fwd_data = r->nh.l2_id;
+ r->pr.fwd_act = PIE_ACT_ROUTE_UC;
+ }
+
+ if (priv->r->set_l3_nexthop)
+ priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
+
+ if (r->pr.id < 0) {
+ r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
+ if (r->pr.packet_cntr >= 0) {
+ pr_info("Using packet counter %d\n", r->pr.packet_cntr);
+ r->pr.log_sel = true;
+ r->pr.log_data = r->pr.packet_cntr;
+ }
+ priv->r->pie_rule_add(priv, &r->pr);
+ } else {
+ int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
+ pr_info("%s: total packets: %d\n", __func__, pkts);
+
+ priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
+ struct net_device *dev, __be32 ip_addr)
+{
+ struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
+ int err = 0;
+ u64 mac;
+
+ if (!n) {
+ n = neigh_create(&arp_tbl, &ip_addr, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ }
+
+ /* If the neigh is already resolved, then go ahead and
+ * install the entry, otherwise start the ARP process to
+ * resolve the neigh.
+ */
+ if (n->nud_state & NUD_VALID) {
+ mac = ether_addr_to_u64(n->ha);
+ pr_info("%s: resolved mac: %016llx\n", __func__, mac);
+ rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
+ } else {
+ pr_info("%s: need to wait\n", __func__);
+ neigh_event_send(n, NULL);
+ }
+
+ neigh_release(n);
+ return err;
+}
+
+/*
+ * Is the lower network device a DSA slave network device of our RTL930X-switch?
+ * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
+ * DSA master device.
+ */
+int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+// TODO: On 5.12:
+// if(!dsa_slave_dev_check(dev)) {
+// netdev_info(dev, "%s: not a DSA device.\n", __func__);
+// return -EINVAL;
+// }
+
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (!priv->ports[i].dp)
+ continue;
+ pr_debug("dp-port: %08x, dev: %08x\n", (u32)(priv->ports[i].dp->slave), (u32)dev);
+ if (priv->ports[i].dp->slave == dev)
+ return i;
+ }
+ return -EINVAL;
+}
+
+struct rtl83xx_walk_data {
+ struct rtl838x_switch_priv *priv;
+ int port;
+};
+
+static int rtl83xx_port_lower_walk(struct net_device *lower, void *_data)
+{
+ struct rtl83xx_walk_data *data = _data;
+ struct rtl838x_switch_priv *priv = data->priv;
+ int ret = 0;
+ int index;
+
+ index = rtl83xx_port_is_under(lower, priv);
+ data->port = index;
+ if (index >= 0) {
+ pr_debug("Found DSA-port, index %d\n", index);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
+{
+ struct rtl83xx_walk_data data;
+
+ data.priv = priv;
+ data.port = 0;
+
+ netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &data);
+
+ return data.port;
+}
+
+static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
+{
+ struct rtl83xx_route *r;
+ int idx = 0, err;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
+ pr_info("%s id: %d, ip %pI4\n", __func__, idx, &ip);
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ mutex_unlock(&priv->reg_mutex);
+ return r;
+ }
+
+ r->id = idx;
+ r->gw_ip = ip;
+ r->pr.id = -1; // We still need to allocate a rule in HW
+ r->is_host_route = false;
+
+ err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
+ if (err) {
+ pr_err("Could not insert new rule\n");
+ mutex_unlock(&priv->reg_mutex);
+ goto out_free;
+ }
+
+ set_bit(idx, priv->route_use_bm);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return r;
+
+out_free:
+ kfree(r);
+ return NULL;
+}
+
+
+static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
+{
+ struct rtl83xx_route *r;
+ int idx = 0, err;
+
+ mutex_lock(&priv->reg_mutex);
+
+ idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
+ pr_info("%s id: %d, ip %pI4\n", __func__, idx, &ip);
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ mutex_unlock(&priv->reg_mutex);
+ return r;
+ }
+
+ /* We require a unique route ID irrespective of whether it is a prefix or host
+ * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry */
+ r->id = idx + MAX_ROUTES;
+
+ r->gw_ip = ip;
+ r->pr.id = -1; // We still need to allocate a rule in HW
+ r->is_host_route = true;
+
+ err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
+ if (err) {
+ pr_err("Could not insert new rule\n");
+ mutex_unlock(&priv->reg_mutex);
+ goto out_free;
+ }
+
+ set_bit(idx, priv->host_route_use_bm);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return r;
+
+out_free:
+ kfree(r);
+ return NULL;
+}
+
+
+
+static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
+{
+ int id;
+
+ if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
+ dev_warn(priv->dev, "Could not remove route\n");
+
+ if (r->is_host_route) {
+ id = priv->r->find_l3_slot(r, false);
+ pr_info("%s: Got id for host route: %d\n", __func__, id);
+ r->attr.valid = false;
+ priv->r->host_route_write(id, r);
+ clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
+ } else {
+ // If there is a HW representation of the route, delete it
+ if (priv->r->route_lookup_hw) {
+ id = priv->r->route_lookup_hw(r);
+ pr_info("%s: Got id for prefix route: %d\n", __func__, id);
+ r->attr.valid = false;
+ priv->r->route_write(id, r);
+ }
+ clear_bit(r->id, priv->route_use_bm);
+ }
+
+ kfree(r);
+}
+
+static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
+ struct fib_entry_notifier_info *info)
+{
+ struct fib_nh *nh = fib_info_nh(info->fi, 0);
+ struct rtl83xx_route *r = NULL;
+ struct rhlist_head *tmp, *list;
+
+ pr_info("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
+ rcu_read_lock();
+ list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
+ if (!list) {
+ rcu_read_unlock();
+ pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
+ return -ENOENT;
+ }
+ rhl_for_each_entry_rcu(r, tmp, list, linkage) {
+ if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
+ pr_info("%s: found a route with id %d, nh-id %d\n",
+ __func__, r->id, r->nh.id);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ /* if there is no route has been found, this is NULL here */
+ if (r) {
+ rtl83xx_l2_nexthop_rm(priv, &r->nh);
+ pr_info("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
+ set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
+ priv->r->pie_rule_rm(priv, &r->pr);
+ rtl83xx_route_rm(priv, r);
+ }
+
+ nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+
+ return 0;
+}
+
+/*
+ * On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
+ * for packets to be routed needs to be allocated.
+ */
+static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
+{
+ int i, free_mac = -1;
+ struct rtl93xx_rt_mac m;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < MAX_ROUTER_MACS; i++) {
+ priv->r->get_l3_router_mac(i, &m);
+ if (free_mac < 0 && !m.valid) {
+ free_mac = i;
+ continue;
+ }
+ if (m.valid && m.mac == mac) {
+ free_mac = i;
+ break;
+ }
+ }
+
+ if (free_mac < 0) {
+ pr_err("No free router MACs, cannot offload\n");
+ mutex_unlock(&priv->reg_mutex);
+ return -1;
+ }
+
+ m.valid = true;
+ m.mac = mac;
+ m.p_type = 0; // An individual port, not a trunk port
+ m.p_id = 0x3f; // Listen on any port
+ m.p_id_mask = 0;
+ m.vid = 0; // Listen on any VLAN...
+ m.vid_mask = 0; // ... so mask needs to be 0
+ m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
+ m.action = L3_FORWARD; // Route the packet
+ priv->r->set_l3_router_mac(free_mac, &m);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
+{
+ int i, free_mac = -1;
+ struct rtl838x_l3_intf intf;
+ u64 m;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i = 0; i < MAX_SMACS; i++) {
+ m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
+ if (free_mac < 0 && !m) {
+ free_mac = i;
+ continue;
+ }
+ if (m == mac) {
+ mutex_unlock(&priv->reg_mutex);
+ return i;
+ }
+ }
+
+ if (free_mac < 0) {
+ pr_err("No free egress interface, cannot offload\n");
+ return -1;
+ }
+
+ // Set up default egress interface 1
+ intf.vid = vlan;
+ intf.smac_idx = free_mac;
+ intf.ip4_mtu_id = 1;
+ intf.ip6_mtu_id = 1;
+ intf.ttl_scope = 1; // TTL
+ intf.hl_scope = 1; // Hop Limit
+ intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
+ intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
+ priv->r->set_l3_egress_intf(free_mac, &intf);
+
+ priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return free_mac;
+}
+
+static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
+ struct fib_entry_notifier_info *info)
+{
+ struct fib_nh *nh = fib_info_nh(info->fi, 0);
+ struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
+ int port;
+ struct rtl83xx_route *r;
+ bool to_localhost;
+ int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
+
+ pr_info("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
+ if (!info->dst) {
+ pr_info("Not offloading default route for now\n");
+ return 0;
+ }
+
+ pr_info("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
+ ether_addr_to_u64(dev->dev_addr), vlan
+ );
+
+ port = rtl83xx_port_dev_lower_find(dev, priv);
+ if (port < 0)
+ return -1;
+
+ // For now we only work with routes that have a gateway and are not ourself
+// if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
+// return 0;
+
+ if ((info->dst & 0xff) == 0xff)
+ return 0;
+
+ // Do not offload routes to 192.168.100.x
+// if ((info->dst & 0xffffff00) == 0xc0a86400)
+// return 0;
+
+ // Do not offload routes to 127.x.x.x
+ if ((info->dst & 0xff000000) == 0x7f000000)
+ return 0;
+
+ // Allocate route or host-route (entry if hardware supports this)
+ if (info->dst_len == 32 && priv->r->host_route_write)
+ r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
+ else
+ r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
+
+ if (!r) {
+ pr_err("%s: No more free route entries\n", __func__);
+ return -1;
+ }
+
+ r->dst_ip = info->dst;
+ r->prefix_len = info->dst_len;
+ r->nh.rvid = vlan;
+ to_localhost = !nh->fib_nh_gw4;
+
+ if (priv->r->set_l3_router_mac) {
+ u64 mac = ether_addr_to_u64(dev->dev_addr);
+
+ pr_info("Local route and router mac %016llx\n", mac);
+
+ if (rtl83xx_alloc_router_mac(priv, mac))
+ goto out_free_rt;
+
+ // vid = 0: Do not care about VID
+ r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
+ if (r->nh.if_id < 0)
+ goto out_free_rmac;
+
+ if (to_localhost) {
+ int slot;
+
+ r->nh.mac = mac;
+ r->nh.port = priv->port_ignore;
+ r->attr.valid = true;
+ r->attr.action = ROUTE_ACT_TRAP2CPU;
+ r->attr.type = 0;
+
+ slot = priv->r->find_l3_slot(r, false);
+ pr_info("%s: Got slot for route: %d\n", __func__, slot);
+ priv->r->host_route_write(slot, r);
+ }
+ }
+
+ // We need to resolve the mac address of the GW
+ if (!to_localhost)
+ rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
+
+ nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+
+ return 0;
+
+out_free_rmac:
+out_free_rt:
+ return 0;
+}
+
+static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
+ struct fib6_entry_notifier_info *info)
+{
+ pr_info("In %s\n", __func__);
+// nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+ return 0;
+}
+
+struct net_event_work {
+ struct work_struct work;
+ struct rtl838x_switch_priv *priv;
+ u64 mac;
+ u32 gw_addr;
+};
+
+static void rtl83xx_net_event_work_do(struct work_struct *work)
+{
+ struct net_event_work *net_work =
+ container_of(work, struct net_event_work, work);
+ struct rtl838x_switch_priv *priv = net_work->priv;
+
+ rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
+}
+
+static int rtl83xx_netevent_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct rtl838x_switch_priv *priv;
+ struct net_device *dev;
+ struct neighbour *n = ptr;
+ int err, port;
+ struct net_event_work *net_work;
+
+ priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
+ net_work->priv = priv;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+ dev = n->dev;
+ port = rtl83xx_port_dev_lower_find(dev, priv);
+ if (port < 0 || !(n->nud_state & NUD_VALID)) {
+ pr_debug("%s: Neigbour invalid, not updating\n", __func__);
+ kfree(net_work);
+ return NOTIFY_DONE;
+ }
+
+ net_work->mac = ether_addr_to_u64(n->ha);
+ net_work->gw_addr = *(__be32 *) n->primary_key;
+
+ pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
+ __func__, port, net_work->mac);
+ schedule_work(&net_work->work);
+ if (err)
+ netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct rtl83xx_fib_event_work {
+ struct work_struct work;
+ union {
+ struct fib_entry_notifier_info fen_info;
+ struct fib6_entry_notifier_info fen6_info;
+ struct fib_rule_notifier_info fr_info;
+ };
+ struct rtl838x_switch_priv *priv;
+ bool is_fib6;
+ unsigned long event;
+};
+
+static void rtl83xx_fib_event_work_do(struct work_struct *work)
+{
+ struct rtl83xx_fib_event_work *fib_work =
+ container_of(work, struct rtl83xx_fib_event_work, work);
+ struct rtl838x_switch_priv *priv = fib_work->priv;
+ struct fib_rule *rule;
+ int err;
+
+ /* Protect internal structures from changes */
+ rtnl_lock();
+ pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ if (fib_work->is_fib6) {
+ err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
+ } else {
+ err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
+ }
+ if (err)
+ pr_err("%s: FIB4 failed\n", __func__);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ if (!fib_work->is_fib6) {
+ rtl83xx_fib4_del(priv, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
+ }
+ break;
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ if (!fib_work->is_fib6) {
+ rule = fib_work->fr_info.rule;
+ if (!fib4_rule_default(rule))
+ pr_err("%s: FIB4 default rule failed\n", __func__);
+ fib_rule_put(rule);
+ }
+ break;
+ }
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct fib_notifier_info *info = ptr;
+ struct rtl838x_switch_priv *priv;
+ struct rtl83xx_fib_event_work *fib_work;
+
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
+ info->family != RTNL_FAMILY_IPMR &&
+ info->family != RTNL_FAMILY_IP6MR))
+ return NOTIFY_DONE;
+
+ priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
+ fib_work->priv = priv;
+ fib_work->event = event;
+ fib_work->is_fib6 = false;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ case FIB_EVENT_ENTRY_DEL:
+ pr_debug("%s: FIB_ENTRY ADD/DELL, event %ld\n", __func__, event);
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "IPv6 gateway with IPv4 route is not supported");
+ kfree(fib_work);
+ return notifier_from_errno(-EINVAL);
+ }
+
+ memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
+ /* Take referece on fib_info to prevent it from being
+ * freed while work is queued. Release it afterwards.
+ */
+ fib_info_hold(fib_work->fen_info.fi);
+
+ } else if (info->family == AF_INET6) {
+ struct fib6_entry_notifier_info *fen6_info;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "IPv6 route with nexthop objects is not supported");
+ kfree(fib_work);
+ return notifier_from_errno(-EINVAL);
+ }
+ pr_debug("%s: FIB_RULE ADD/DELL for IPv6\n", __func__);
+ fib_work->is_fib6 = true;
+ memcpy(&fib_work->fen6_info, fen6_info, sizeof(fib_work->fen6_info));
+ }
+ break;
+
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ pr_debug("%s: FIB_RULE ADD/DELL, event: %ld\n", __func__, event);
+ memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
+ fib_rule_get(fib_work->fr_info.rule);
+ break;
+ }
+
+ schedule_work(&fib_work->work);
+
+ return NOTIFY_DONE;
+}
+
static int __init rtl83xx_sw_probe(struct platform_device *pdev)
{
int err = 0, i;
@@ -570,19 +1428,24 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
-
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+
+ priv->ds->num_ports = DSA_MAX_PORTS;
priv->ds->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &rtl83xx_switch_ops;
priv->dev = dev;
+ mutex_init(&priv->reg_mutex);
+
priv->family_id = soc_info.family;
priv->id = soc_info.id;
+ pr_info("SOC ID %X, FAMILY ID %X\n", priv->id, priv->family_id);
switch(soc_info.family) {
case RTL8380_FAMILY_ID:
+ rtl8380_get_version(priv); // TODO: Make this a function pointer call
priv->ds->ops = &rtl83xx_switch_ops;
priv->cpu_port = RTL838X_CPU_PORT;
priv->port_mask = 0x1f;
@@ -591,10 +1454,14 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
priv->r = &rtl838x_reg;
priv->ds->num_ports = 29;
priv->fib_entries = 8192;
- rtl8380_get_version(priv);
priv->n_lags = 8;
+ priv->l2_bucket_size = 4;
+ priv->n_pie_blocks = 12;
+ priv->port_ignore = 0x1f;
+ priv->n_counters = 128;
break;
case RTL8390_FAMILY_ID:
+ rtl8390_get_version(priv);
priv->ds->ops = &rtl83xx_switch_ops;
priv->cpu_port = RTL839X_CPU_PORT;
priv->port_mask = 0x3f;
@@ -603,10 +1470,14 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
priv->r = &rtl839x_reg;
priv->ds->num_ports = 53;
priv->fib_entries = 16384;
- rtl8390_get_version(priv);
priv->n_lags = 16;
+ priv->l2_bucket_size = 4;
+ priv->n_pie_blocks = 18;
+ priv->port_ignore = 0x3f;
+ priv->n_counters = 1024;
break;
case RTL9300_FAMILY_ID:
+ priv->version = RTL8390_VERSION_A; // TODO: Understand RTL9300 versions
priv->ds->ops = &rtl930x_switch_ops;
priv->cpu_port = RTL930X_CPU_PORT;
priv->port_mask = 0x1f;
@@ -615,11 +1486,15 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
priv->r = &rtl930x_reg;
priv->ds->num_ports = 29;
priv->fib_entries = 16384;
- priv->version = RTL8390_VERSION_A;
priv->n_lags = 16;
sw_w32(1, RTL930X_ST_CTRL);
+ priv->l2_bucket_size = 8;
+ priv->n_pie_blocks = 16;
+ priv->port_ignore = 0x3f;
+ priv->n_counters = 2048;
break;
case RTL9310_FAMILY_ID:
+ priv->version = RTL8390_VERSION_A; // TODO: Fix me
priv->ds->ops = &rtl930x_switch_ops;
priv->cpu_port = RTL931X_CPU_PORT;
priv->port_mask = 0x3f;
@@ -628,16 +1503,24 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
priv->r = &rtl931x_reg;
priv->ds->num_ports = 57;
priv->fib_entries = 16384;
- priv->version = RTL8390_VERSION_A;
priv->n_lags = 16;
+ priv->l2_bucket_size = 8;
+ priv->n_pie_blocks = 32;
+ priv->port_ignore = 0x3f;
+ priv->n_counters = 0; // TODO: Figure out logs on RTL9310
break;
}
+ memset(priv->mc_group_saves, -1, sizeof(priv->mc_group_saves));
+ memset(priv->lag_primary, -1, sizeof(priv->lag_primary));
+
+ priv->ds->num_lag_ids = priv->n_lags;
+
pr_debug("Chip version %c\n", priv->version);
err = rtl83xx_mdio_probe(priv);
if (err) {
/* Probing fails the 1st time because of missing ethernet driver
- * initialization. Use this to disable traffic in case the bootloader left if on
+ * initialization. Use this to disable traffic in case the bootloader left it on
*/
return err;
}
@@ -647,6 +1530,9 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
return err;
}
+ for (i = 0; i <= priv->cpu_port; i++)
+ priv->ports[i].dp = dsa_to_port(priv->ds, i);
+
/* Enable link and media change interrupts. Are the SERDES masks needed? */
sw_w32_mask(0, 3, priv->r->isr_glb_src);
@@ -686,16 +1572,50 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
rtl83xx_setup_qos(priv);
+ priv->r->l3_setup(priv);
+
/* Clear all destination ports for mirror groups */
for (i = 0; i < 4; i++)
priv->mirror_group_ports[i] = -1;
+ /*
+ * Register netdevice event callback to catch changes in link aggregation groups
+ */
priv->nb.notifier_call = rtl83xx_netdevice_event;
- if (register_netdevice_notifier(&priv->nb)) {
- priv->nb.notifier_call = NULL;
- dev_err(dev, "Failed to register LAG netdev notifier\n");
+ if (register_netdevice_notifier(&priv->nb)) {
+ priv->nb.notifier_call = NULL;
+ dev_err(dev, "Failed to register LAG netdev notifier\n");
+ goto err_register_nb;
+ }
+
+ // Initialize hash table for L3 routing
+ rhltable_init(&priv->routes, &route_ht_params);
+
+ /*
+ * Register netevent notifier callback to catch notifications about neighboring
+ * changes to update nexthop entries for L3 routing.
+ */
+ priv->ne_nb.notifier_call = rtl83xx_netevent_event;
+ if (register_netevent_notifier(&priv->ne_nb)) {
+ priv->ne_nb.notifier_call = NULL;
+ dev_err(dev, "Failed to register netevent notifier\n");
+ goto err_register_ne_nb;
}
+ priv->fib_nb.notifier_call = rtl83xx_fib_event;
+
+ /*
+ * Register Forwarding Information Base notifier to offload routes where
+ * where possible
+ * Only FIBs pointing to our own netdevs are programmed into
+ * the device, so no need to pass a callback.
+ */
+ // TODO 5.9: err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
+ err = register_fib_notifier(&priv->fib_nb, NULL);
+ if (err)
+ goto err_register_fib_nb;
+
+ // TODO: put this into l2_setup()
// Flood BPDUs to all ports including cpu-port
if (soc_info.family != RTL9300_FAMILY_ID) { // TODO: Port this functionality
bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
@@ -705,8 +1625,17 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
rtl838x_dbgfs_init(priv);
+ } else {
+ rtl930x_dbgfs_init(priv);
}
+ return 0;
+
+err_register_fib_nb:
+ unregister_netevent_notifier(&priv->ne_nb);
+err_register_ne_nb:
+ unregister_netdevice_notifier(&priv->nb);
+err_register_nb:
return err;
}
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/debugfs.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/debugfs.c
index 4f81408453..0425bae4ab 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/debugfs.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/debugfs.c
@@ -40,7 +40,10 @@
#define RTL839X_MIR_RSPAN_TX_CTRL (0x69b0)
#define RTL839X_MIR_RSPAN_TX_TAG_RM_CTRL (0x2550)
#define RTL839X_MIR_RSPAN_TX_TAG_EN_CTRL (0x2554)
-#define RTL839X_MIR_SAMPLE_RATE_CTRL (0x2558)
+
+#define RTL838X_STAT_PRVTE_DROP_COUNTERS (0x6A00)
+#define RTL839X_STAT_PRVTE_DROP_COUNTERS (0x3E00)
+#define RTL930X_STAT_PRVTE_DROP_COUNTERS (0xB5B8)
int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port);
void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
@@ -50,6 +53,58 @@ u32 rtl839x_get_egress_rate(struct rtl838x_switch_priv *priv, int port);
int rtl838x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate);
int rtl839x_set_egress_rate(struct rtl838x_switch_priv *priv, int port, u32 rate);
+
+const char *rtl838x_drop_cntr[] = {
+ "ALE_TX_GOOD_PKTS", "MAC_RX_DROP", "ACL_FWD_DROP", "HW_ATTACK_PREVENTION_DROP",
+ "RMA_DROP", "VLAN_IGR_FLTR_DROP", "INNER_OUTER_CFI_EQUAL_1_DROP", "PORT_MOVE_DROP",
+ "NEW_SA_DROP", "MAC_LIMIT_SYS_DROP", "MAC_LIMIT_VLAN_DROP", "MAC_LIMIT_PORT_DROP",
+ "SWITCH_MAC_DROP", "ROUTING_EXCEPTION_DROP", "DA_LKMISS_DROP", "RSPAN_DROP",
+ "ACL_LKMISS_DROP", "ACL_DROP", "INBW_DROP", "IGR_METER_DROP",
+ "ACCEPT_FRAME_TYPE_DROP", "STP_IGR_DROP", "INVALID_SA_DROP", "SA_BLOCKING_DROP",
+ "DA_BLOCKING_DROP", "L2_INVALID_DPM_DROP", "MCST_INVALID_DPM_DROP", "RX_FLOW_CONTROL_DROP",
+ "STORM_SPPRS_DROP", "LALS_DROP", "VLAN_EGR_FILTER_DROP", "STP_EGR_DROP",
+ "SRC_PORT_FILTER_DROP", "PORT_ISOLATION_DROP", "ACL_FLTR_DROP", "MIRROR_FLTR_DROP",
+ "TX_MAX_DROP", "LINK_DOWN_DROP", "FLOW_CONTROL_DROP", "BRIDGE .1d discards"
+};
+
+const char *rtl839x_drop_cntr[] = {
+ "ALE_TX_GOOD_PKTS", "ERROR_PKTS", "EGR_ACL_DROP", "EGR_METER_DROP",
+ "OAM", "CFM" "VLAN_IGR_FLTR", "VLAN_ERR",
+ "INNER_OUTER_CFI_EQUAL_1", "VLAN_TAG_FORMAT", "SRC_PORT_SPENDING_TREE", "INBW",
+ "RMA", "HW_ATTACK_PREVENTION", "PROTO_STORM", "MCAST_SA",
+ "IGR_ACL_DROP", "IGR_METER_DROP", "DFLT_ACTION_FOR_MISS_ACL_AND_C2SC", "NEW_SA",
+ "PORT_MOVE", "SA_BLOCKING", "ROUTING_EXCEPTION", "SRC_PORT_SPENDING_TREE_NON_FWDING",
+ "MAC_LIMIT", "UNKNOW_STORM", "MISS_DROP", "CPU_MAC_DROP",
+ "DA_BLOCKING", "SRC_PORT_FILTER_BEFORE_EGR_ACL", "VLAN_EGR_FILTER", "SPANNING_TRE",
+ "PORT_ISOLATION", "OAM_EGRESS_DROP", "MIRROR_ISOLATION", "MAX_LEN_BEFORE_EGR_ACL",
+ "SRC_PORT_FILTER_BEFORE_MIRROR", "MAX_LEN_BEFORE_MIRROR", "SPECIAL_CONGEST_BEFORE_MIRROR",
+ "LINK_STATUS_BEFORE_MIRROR",
+ "WRED_BEFORE_MIRROR", "MAX_LEN_AFTER_MIRROR", "SPECIAL_CONGEST_AFTER_MIRROR",
+ "LINK_STATUS_AFTER_MIRROR",
+ "WRED_AFTER_MIRROR"
+};
+
+const char *rtl930x_drop_cntr[] = {
+ "OAM_PARSER", "UC_RPF", "DEI_CFI", "MAC_IP_SUBNET_BASED_VLAN", "VLAN_IGR_FILTER",
+ "L2_UC_MC", "IPV_IP6_MC_BRIDGE", "PTP", "USER_DEF_0_3", "RESERVED",
+ "RESERVED1", "RESERVED2", "BPDU_RMA", "LACP", "LLDP",
+ "EAPOL", "XX_RMA", "L3_IPUC_NON_IP", "IP4_IP6_HEADER_ERROR", "L3_BAD_IP",
+ "L3_DIP_DMAC_MISMATCH", "IP4_IP_OPTION", "IP_UC_MC_ROUTING_LOOK_UP_MISS", "L3_DST_NULL_INTF",
+ "L3_PBR_NULL_INTF",
+ "HOST_NULL_INTF", "ROUTE_NULL_INTF", "BRIDGING_ACTION", "ROUTING_ACTION", "IPMC_RPF",
+ "L2_NEXTHOP_AGE_OUT", "L3_UC_TTL_FAIL", "L3_MC_TTL_FAIL", "L3_UC_MTU_FAIL", "L3_MC_MTU_FAIL",
+ "L3_UC_ICMP_REDIR", "IP6_MLD_OTHER_ACT", "ND", "IP_MC_RESERVED", "IP6_HBH",
+ "INVALID_SA", "L2_HASH_FULL", "NEW_SA", "PORT_MOVE_FORBID", "STATIC_PORT_MOVING",
+ "DYNMIC_PORT_MOVING", "L3_CRC", "MAC_LIMIT", "ATTACK_PREVENT", "ACL_FWD_ACTION",
+ "OAMPDU", "OAM_MUX", "TRUNK_FILTER", "ACL_DROP", "IGR_BW",
+ "ACL_METER", "VLAN_ACCEPT_FRAME_TYPE", "MSTP_SRC_DROP_DISABLED_BLOCKING", "SA_BLOCK", "DA_BLOCK",
+ "STORM_CONTROL", "VLAN_EGR_FILTER", "MSTP_DESTINATION_DROP", "SRC_PORT_FILTER", "PORT_ISOLATION",
+ "TX_MAX_FRAME_SIZE", "EGR_LINK_STATUS", "MAC_TX_DISABLE", "MAC_PAUSE_FRAME", "MAC_RX_DROP",
+ "MIRROR_ISOLATE", "RX_FC", "EGR_QUEUE", "HSM_RUNOUT", "ROUTING_DISABLE", "INVALID_L2_NEXTHOP_ENTRY",
+ "L3_MC_SRC_FLT", "CPUTAG_FLT", "FWD_PMSK_NULL", "IPUC_ROUTING_LOOKUP_MISS", "MY_DEV_DROP",
+ "STACK_NONUC_BLOCKING_PMSK", "STACK_PORT_NOT_FOUND", "ACL_LOOPBACK_DROP", "IP6_ROUTING_EXT_HEADER"
+};
+
static ssize_t rtl838x_common_read(char __user *buffer, size_t count,
loff_t *ppos, unsigned int value)
{
@@ -134,6 +189,61 @@ static const struct file_operations stp_state_fops = {
.write = stp_state_write,
};
+static ssize_t drop_counter_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_switch_priv *priv = filp->private_data;
+ int i;
+ const char **d;
+ u32 v;
+ char *buf;
+ int n = 0, len, offset;
+ int num;
+
+ switch (priv->family_id) {
+ case RTL8380_FAMILY_ID:
+ d = rtl838x_drop_cntr;
+ offset = RTL838X_STAT_PRVTE_DROP_COUNTERS;
+ num = 40;
+ break;
+ case RTL8390_FAMILY_ID:
+ d = rtl839x_drop_cntr;
+ offset = RTL839X_STAT_PRVTE_DROP_COUNTERS;
+ num = 45;
+ break;
+ case RTL9300_FAMILY_ID:
+ d = rtl930x_drop_cntr;
+ offset = RTL930X_STAT_PRVTE_DROP_COUNTERS;
+ num = 85;
+ break;
+ }
+
+ buf = kmalloc(30 * num, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ v = sw_r32(offset + (i << 2)) & 0xffff;
+ n += sprintf(buf + n, "%s: %d\n", d[i], v);
+ }
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static const struct file_operations drop_counter_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = drop_counter_read,
+};
+
static ssize_t age_out_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
@@ -213,11 +323,159 @@ static const struct file_operations port_egress_fops = {
.write = port_egress_rate_write,
};
+static ssize_t port_838x_bpdu_action_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 port = p->dp->index;
+ u32 value = (sw_r32(priv->r->rma_bpdu_ctrl + ((port / priv->r->rma_bpdu_ctrl_div) << 2)) >> (((port % priv->r->rma_bpdu_ctrl_div) <<1) & 0x3) >> ((port % priv->r->rma_bpdu_ctrl_div) << 1)) & 0x3;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t port_838x_bpdu_action_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 value;
+ u32 port = p->dp->index;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ sw_w32_mask(3 << ((port % priv->r->rma_bpdu_ctrl_div) << 1), (value & 0x3) << ((port % priv->r->rma_bpdu_ctrl_div) << 1), priv->r->rma_bpdu_ctrl + ((port / priv->r->rma_bpdu_ctrl_div) << 2));
+ return res;
+}
+
+static const struct file_operations port_838x_action_bpdu_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = port_838x_bpdu_action_read,
+ .write = port_838x_bpdu_action_write,
+};
+
+
+static ssize_t port_838x_ptp_action_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 port = p->dp->index;
+ u32 value = (sw_r32(priv->r->rma_ptp_ctrl + ((port / priv->r->rma_ptp_ctrl_div) << 2)) >> (((port % priv->r->rma_ptp_ctrl_div) <<1) & 0x3) >> ((port % priv->r->rma_ptp_ctrl_div) << 1)) & 0x3;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t port_838x_ptp_action_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 value;
+ u32 port = p->dp->index;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ sw_w32_mask(3 << ((port % priv->r->rma_ptp_ctrl_div) << 1), (value & 0x3) << ((port % priv->r->rma_ptp_ctrl_div) << 1), priv->r->rma_ptp_ctrl + ((port / priv->r->rma_ptp_ctrl_div) << 2));
+ return res;
+}
+
+static const struct file_operations port_838x_action_ptp_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = port_838x_ptp_action_read,
+ .write = port_838x_ptp_action_write,
+};
+
+static ssize_t port_838x_lltp_action_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 port = p->dp->index;
+ u32 value = (sw_r32(priv->r->rma_lltp_ctrl + ((port / priv->r->rma_lltp_ctrl_div) << 2)) >> (((port % priv->r->rma_lltp_ctrl_div) <<1) & 0x3) >> ((port % priv->r->rma_lltp_ctrl_div) << 1)) & 0x3;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t port_838x_lltp_action_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 value;
+ u32 port = p->dp->index;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ sw_w32_mask(3 << ((port% priv->r->rma_lltp_ctrl_div) << 1), (value & 0x3) << ((port % priv->r->rma_lltp_ctrl_div) << 1), priv->r->rma_lltp_ctrl + ((port / priv->r->rma_lltp_ctrl_div) << 2));
+ return res;
+}
+
+static const struct file_operations port_838x_action_lltp_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = port_838x_lltp_action_read,
+ .write = port_838x_lltp_action_write,
+};
+
+
+static ssize_t port_838x_eapol_action_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 port = p->dp->index;
+ u32 value = (sw_r32(priv->r->rma_eapol_ctrl + ((port / priv->r->rma_eapol_ctrl_div) << 2)) >> (((port % priv->r->rma_eapol_ctrl_div) <<1) & 0x3) >> ((port % priv->r->rma_eapol_ctrl_div) << 1)) & 0x3;
+
+ return rtl838x_common_read(buffer, count, ppos, (u32)value);
+}
+
+static ssize_t port_838x_eapol_action_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rtl838x_port *p = filp->private_data;
+ struct dsa_switch *ds = p->dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u32 value;
+ u32 port = p->dp->index;
+ size_t res = rtl838x_common_write(buffer, count, ppos, &value);
+ if (res < 0)
+ return res;
+
+ sw_w32_mask(3 << ((port % priv->r->rma_eapol_ctrl_div) << 1), (value & 0x3) << ((port % priv->r->rma_eapol_ctrl_div) << 1), priv->r->rma_eapol_ctrl + ((port / priv->r->rma_eapol_ctrl_div) << 2));
+ return res;
+}
+
+static const struct file_operations port_838x_action_eapol_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = port_838x_eapol_action_read,
+ .write = port_838x_eapol_action_write,
+};
+
+
+
static const struct debugfs_reg32 port_ctrl_regs[] = {
{ .name = "port_isolation", .offset = RTL838X_PORT_ISO_CTRL(0), },
{ .name = "mac_force_mode", .offset = RTL838X_MAC_FORCE_MODE_CTRL, },
};
+static const struct debugfs_reg32 port_ctrl_regs_839x[] = {
+ { .name = "port_isolation", .offset = RTL839X_PORT_ISO_CTRL(0), },
+ { .name = "mac_force_mode", .offset = RTL839X_MAC_FORCE_MODE_CTRL, },
+};
void rtl838x_dbgfs_cleanup(struct rtl838x_switch_priv *priv)
{
@@ -234,33 +492,26 @@ static int rtl838x_dbgfs_port_init(struct dentry *parent, struct rtl838x_switch_
port_dir = debugfs_create_dir(priv->ports[port].dp->name, parent);
- if (priv->family_id == RTL8380_FAMILY_ID) {
- debugfs_create_x32("storm_rate_uc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_UC(port)));
-
- debugfs_create_x32("storm_rate_mc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_MC(port)));
+ debugfs_create_x32("sflow_port_rate", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->sflow_port_rate_ctrl + (port << 2)));
+ debugfs_create_x32("storm_rate_uc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->storm_ctrl_port_uc + (port << priv->r->storm_ctrl_port_uc_shift)));
- debugfs_create_x32("storm_rate_bc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL838X_STORM_CTRL_PORT_BC(port)));
+ debugfs_create_x32("storm_rate_mc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->storm_ctrl_port_mc + (port << priv->r->storm_ctrl_port_mc_shift)));
- debugfs_create_x32("vlan_port_tag_sts_ctrl", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL838X_VLAN_PORT_TAG_STS_CTRL
- + (port << 2)));
- } else {
- debugfs_create_x32("storm_rate_uc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_UC_0(port)));
+ debugfs_create_x32("storm_rate_bc", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->storm_ctrl_port_bc + (port << priv->r->storm_ctrl_port_bc_shift)));
- debugfs_create_x32("storm_rate_mc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_MC_0(port)));
+ debugfs_create_x32("vlan_port_tag_sts_ctrl", 0644, port_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->vlan_port_tag_sts_ctrl + (port << 2)));
- debugfs_create_x32("storm_rate_bc", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL839X_STORM_CTRL_PORT_BC_0(port)));
- debugfs_create_x32("vlan_port_tag_sts_ctrl", 0644, port_dir,
- (u32 *)(RTL838X_SW_BASE + RTL839X_VLAN_PORT_TAG_STS_CTRL
- + (port << 2)));
- }
+ debugfs_create_file("action_bpdu", 0600, port_dir, &priv->ports[port],&port_838x_action_bpdu_fops);
+ debugfs_create_file("action_ptp", 0600, port_dir, &priv->ports[port],&port_838x_action_ptp_fops);
+ debugfs_create_file("action_lltp", 0600, port_dir, &priv->ports[port],&port_838x_action_lltp_fops);
+ if (priv->r->rma_lltp_ctrl)
+ debugfs_create_file("action_eapol", 0600, port_dir, &priv->ports[port],&port_838x_action_eapol_fops);
debugfs_create_u32("id", 0444, port_dir, (u32 *)&priv->ports[port].dp->index);
@@ -268,7 +519,11 @@ static int rtl838x_dbgfs_port_init(struct dentry *parent, struct rtl838x_switch_
if (!port_ctrl_regset)
return -ENOMEM;
- port_ctrl_regset->regs = port_ctrl_regs;
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ port_ctrl_regset->regs = port_ctrl_regs;
+ else
+ port_ctrl_regset->regs = port_ctrl_regs_839x;
+
port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs);
port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (port << 2));
debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset);
@@ -362,7 +617,7 @@ void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv)
struct dentry *mirror_dir;
struct debugfs_regset32 *port_ctrl_regset;
int ret, i;
- char lag_name[10];
+ char lag_name[32];
char mirror_name[10];
pr_info("%s called\n", __func__);
@@ -392,7 +647,11 @@ void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv)
goto err;
}
- port_ctrl_regset->regs = port_ctrl_regs;
+ if (priv->family_id == RTL8380_FAMILY_ID)
+ port_ctrl_regset->regs = port_ctrl_regs;
+ else
+ port_ctrl_regset->regs = port_ctrl_regs_839x;
+
port_ctrl_regset->nregs = ARRAY_SIZE(port_ctrl_regs);
port_ctrl_regset->base = (void *)(RTL838X_SW_BASE + (priv->cpu_port << 2));
debugfs_create_regset32("port_ctrl", 0400, port_dir, port_ctrl_regset);
@@ -401,14 +660,22 @@ void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv)
/* Create entries for LAGs */
for (i = 0; i < priv->n_lags; i++) {
snprintf(lag_name, sizeof(lag_name), "lag.%02d", i);
- if (priv->family_id == RTL8380_FAMILY_ID)
- debugfs_create_x32(lag_name, 0644, rtl838x_dir,
- (u32 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i)));
- else
- debugfs_create_x64(lag_name, 0644, rtl838x_dir,
- (u64 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i)));
+ debugfs_create_x64(lag_name, 0644, rtl838x_dir, (u64 *)(RTL838X_SW_BASE + priv->r->trk_mbr_ctr(i)));
+ }
+ if (priv->r->trk_hash_ctrl) {
+ for (i = 0; i < 4; i++) {
+ snprintf(lag_name, sizeof(lag_name), "lag.hash_algo.%02d", i);
+ debugfs_create_x64(lag_name, 0644, rtl838x_dir, (u64 *)(RTL838X_SW_BASE + priv->r->trk_hash_ctrl + (i << 2)));
+ }
+ }
+ if (priv->r->trk_hash_idx_ctrl) {
+ for (i = 0; i < priv->n_lags; i++) {
+ if (priv->family_id == RTL8390_FAMILY_ID) {
+ snprintf(lag_name, sizeof(lag_name), "lag.hash_algo_idx.%02d", i);
+ debugfs_create_x64(lag_name, 0644, rtl838x_dir, (u64 *)(RTL838X_SW_BASE + priv->r->trk_hash_idx_ctrl + ((i >> 4) << 2)));
+ }
+ }
}
-
/* Create directories for mirror groups */
for (i = 0; i < 4; i++) {
snprintf(mirror_name, sizeof(mirror_name), "mirror.%1d", i);
@@ -452,25 +719,58 @@ void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv)
}
}
- if (priv->family_id == RTL8380_FAMILY_ID)
- debugfs_create_x32("bpdu_flood_mask", 0644, rtl838x_dir,
- (u32 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask));
- else
- debugfs_create_x64("bpdu_flood_mask", 0644, rtl838x_dir,
- (u64 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask));
-
- if (priv->family_id == RTL8380_FAMILY_ID)
- debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir,
- (u32 *)(RTL838X_SW_BASE + RTL838X_VLAN_CTRL));
- else
- debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir,
- (u32 *)(RTL838X_SW_BASE + RTL839X_VLAN_CTRL));
+ debugfs_create_x32("bpdu_flood_mask", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->rma_bpdu_fld_pmask));
+ debugfs_create_x32("vlan_ctrl", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->vlan_ctrl));
+ debugfs_create_x32("sflow_ctrl", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->sflow_ctrl));
+ if (priv->r->spcl_trap_eapol_ctrl)
+ debugfs_create_x32("trap_eapol", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_eapol_ctrl));
+ if (priv->r->spcl_trap_arp_ctrl)
+ debugfs_create_x32("trap_arp", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_arp_ctrl));
+ if (priv->r->spcl_trap_igmp_ctrl)
+ debugfs_create_x32("trap_igmp", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_igmp_ctrl));
+ if (priv->r->spcl_trap_ipv6_ctrl)
+ debugfs_create_x32("trap_ipv6", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_ipv6_ctrl));
+ if (priv->r->spcl_trap_switch_mac_ctrl)
+ debugfs_create_x32("trap_switch_mac", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_switch_mac_ctrl));
+ if (priv->r->spcl_trap_switch_ipv4_addr_ctrl)
+ debugfs_create_x32("trap_switch_ipv4_addr", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_switch_ipv4_addr_ctrl));
+ if (priv->r->spcl_trap_crc_ctrl)
+ debugfs_create_x32("trap_crc", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_crc_ctrl));
+ if (priv->r->spcl_trap_ctrl)
+ debugfs_create_x32("trap", 0644, rtl838x_dir,
+ (u32 *)(RTL838X_SW_BASE + priv->r->spcl_trap_ctrl));
ret = rtl838x_dbgfs_leds(rtl838x_dir, priv);
if (ret)
goto err;
+ debugfs_create_file("drop_counters", 0400, rtl838x_dir, priv, &drop_counter_fops);
+
return;
err:
rtl838x_dbgfs_cleanup(priv);
}
+
+void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv)
+{
+ struct dentry *dbg_dir;
+
+ pr_info("%s called\n", __func__);
+ dbg_dir = debugfs_lookup(RTL838X_DRIVER_NAME, NULL);
+ if (!dbg_dir)
+ dbg_dir = debugfs_create_dir(RTL838X_DRIVER_NAME, NULL);
+
+ priv->dbgfs_dir = dbg_dir;
+
+ debugfs_create_file("drop_counters", 0400, dbg_dir, priv, &drop_counter_fops);
+}
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/dsa.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/dsa.c
index 987b47dc8f..a17208b72b 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/dsa.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/dsa.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0-only
#include <net/dsa.h>
#include <linux/if_bridge.h>
@@ -6,7 +5,8 @@
#include <asm/mach-rtl838x/mach-rtl83xx.h>
#include "rtl83xx.h"
-
+extern int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg);
+extern int rtl930x_write_sds_phy(int phy_addr, int page, int phy_reg, u16 v);
extern struct rtl83xx_soc_info soc_info;
@@ -26,50 +26,6 @@ static void rtl83xx_init_stats(struct rtl838x_switch_priv *priv)
mutex_unlock(&priv->reg_mutex);
}
-static void rtl83xx_write_cam(int idx, u32 *r)
-{
- u32 cmd = BIT(16) /* Execute cmd */
- | BIT(15) /* Read */
- | BIT(13) /* Table type 0b01 */
- | (idx & 0x3f);
-
- sw_w32(r[0], RTL838X_TBL_ACCESS_L2_DATA(0));
- sw_w32(r[1], RTL838X_TBL_ACCESS_L2_DATA(1));
- sw_w32(r[2], RTL838X_TBL_ACCESS_L2_DATA(2));
-
- sw_w32(cmd, RTL838X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL838X_TBL_ACCESS_L2_CTRL) & BIT(16));
-}
-
-static u64 rtl83xx_hash_key(struct rtl838x_switch_priv *priv, u64 mac, u32 vid)
-{
- switch (priv->family_id) {
- case RTL8380_FAMILY_ID:
- return rtl838x_hash(priv, mac << 12 | vid);
- case RTL8390_FAMILY_ID:
- return rtl839x_hash(priv, mac << 12 | vid);
- case RTL9300_FAMILY_ID:
- return rtl930x_hash(priv, ((u64)vid) << 48 | mac);
- default:
- pr_err("Hash not implemented\n");
- }
- return 0;
-}
-
-static void rtl83xx_write_hash(int idx, u32 *r)
-{
- u32 cmd = BIT(16) /* Execute cmd */
- | 0 << 15 /* Write */
- | 0 << 13 /* Table type 0b00 */
- | (idx & 0x1fff);
-
- sw_w32(0, RTL838X_TBL_ACCESS_L2_DATA(0));
- sw_w32(0, RTL838X_TBL_ACCESS_L2_DATA(1));
- sw_w32(0, RTL838X_TBL_ACCESS_L2_DATA(2));
- sw_w32(cmd, RTL838X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL838X_TBL_ACCESS_L2_CTRL) & BIT(16));
-}
-
static void rtl83xx_enable_phy_polling(struct rtl838x_switch_priv *priv)
{
int i;
@@ -79,7 +35,7 @@ static void rtl83xx_enable_phy_polling(struct rtl838x_switch_priv *priv)
/* Enable all ports with a PHY, including the SFP-ports */
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
- v |= BIT(i);
+ v |= BIT_ULL(i);
}
pr_debug("%s: %16llx\n", __func__, v);
@@ -150,7 +106,48 @@ static enum dsa_tag_protocol rtl83xx_get_tag_protocol(struct dsa_switch *ds, int
/* The switch does not tag the frames, instead internally the header
* structure for each packet is tagged accordingly.
*/
- return DSA_TAG_PROTO_TRAILER;
+ return DSA_TAG_PROTO_RTL83XX;
+}
+
+/*
+ * Initialize all VLANS
+ */
+static void rtl83xx_vlan_setup(struct rtl838x_switch_priv *priv)
+{
+ struct rtl838x_vlan_info info;
+ int i;
+
+ pr_debug("In %s\n", __func__);
+
+ priv->r->vlan_profile_setup(0);
+ priv->r->vlan_profile_setup(1);
+ pr_info("UNKNOWN_MC_PMASK: %016llx\n", priv->r->read_mcast_pmask(UNKNOWN_MC_PMASK));
+ priv->r->vlan_profile_dump(0);
+
+ info.fid = 0; // Default Forwarding ID / MSTI
+ info.hash_uc_fid = false; // Do not build the L2 lookup hash with FID, but VID
+ info.hash_mc_fid = false; // Do the same for Multicast packets
+ info.profile_id = 0; // Use default Vlan Profile 0
+ info.tagged_ports = 0; // Initially no port members
+
+ // Initialize all vlans 0-4095
+ for (i = 0; i < MAX_VLANS; i ++)
+ priv->r->vlan_set_tagged(i, &info);
+
+ // reset PVIDs; defaults to 1 on reset
+ for (i = 0; i <= priv->ds->num_ports; i++)
+ sw_w32(0, priv->r->vlan_port_pb + (i << 2));
+
+ // Set forwarding action based on inner VLAN tag
+ for (i = 0; i < priv->cpu_port; i++)
+ priv->r->vlan_fwd_on_inner(i, true);
+}
+
+static void rtl83xx_setup_bpdu_traps(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ for (i = 0; i < priv->cpu_port; i++)
+ priv->r->set_receive_management_action(i, BPDU, TRAP2CPU);
}
static int rtl83xx_setup(struct dsa_switch *ds)
@@ -174,7 +171,7 @@ static int rtl83xx_setup(struct dsa_switch *ds)
*/
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
- priv->r->set_port_reg_be(BIT_ULL(priv->cpu_port) | BIT(i),
+ priv->r->set_port_reg_be(BIT_ULL(priv->cpu_port) | BIT_ULL(i),
priv->r->port_iso_ctrl(i));
port_bitmap |= BIT_ULL(i);
}
@@ -188,12 +185,21 @@ static int rtl83xx_setup(struct dsa_switch *ds)
rtl83xx_init_stats(priv);
+ rtl83xx_vlan_setup(priv);
+
+ rtl83xx_setup_bpdu_traps(priv);
+
ds->configure_vlan_while_not_filtering = true;
+ priv->r->l2_learning_setup();
+
/* Enable MAC Polling PHY again */
rtl83xx_enable_phy_polling(priv);
pr_debug("Please wait until PHY is settled\n");
msleep(1000);
+
+ priv->r->pie_init(priv);
+
return 0;
}
@@ -203,7 +209,7 @@ static int rtl930x_setup(struct dsa_switch *ds)
struct rtl838x_switch_priv *priv = ds->priv;
u32 port_bitmap = BIT(priv->cpu_port);
- pr_info("%s called\n", __func__);
+ pr_debug("%s called\n", __func__);
// Enable CSTI STP mode
// sw_w32(1, RTL930X_ST_CTRL);
@@ -218,8 +224,8 @@ static int rtl930x_setup(struct dsa_switch *ds)
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
- priv->r->traffic_set(i, BIT(priv->cpu_port) | BIT(i));
- port_bitmap |= 1ULL << i;
+ priv->r->traffic_set(i, BIT_ULL(priv->cpu_port) | BIT_ULL(i));
+ port_bitmap |= BIT_ULL(i);
}
}
priv->r->traffic_set(priv->cpu_port, port_bitmap);
@@ -228,10 +234,14 @@ static int rtl930x_setup(struct dsa_switch *ds)
// TODO: Initialize statistics
+ rtl83xx_vlan_setup(priv);
+
ds->configure_vlan_while_not_filtering = true;
rtl83xx_enable_phy_polling(priv);
+ priv->r->pie_init(priv);
+
return 0;
}
@@ -242,7 +252,7 @@ static void rtl83xx_phylink_validate(struct dsa_switch *ds, int port,
struct rtl838x_switch_priv *priv = ds->priv;
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- pr_debug("In %s port %d", __func__, port);
+ pr_debug("In %s port %d, state is %d", __func__, port, state->interface);
if (!phy_interface_mode_is_rgmii(state->interface) &&
state->interface != PHY_INTERFACE_MODE_NA &&
@@ -279,6 +289,68 @@ static void rtl83xx_phylink_validate(struct dsa_switch *ds, int port,
if (port >= 24 && port <= 27 && priv->family_id == RTL8380_FAMILY_ID)
phylink_set(mask, 1000baseX_Full);
+ /* On the RTL839x family of SoCs, ports 48 to 51 are SFP ports */
+ if (port >= 48 && port <= 51 && priv->family_id == RTL8390_FAMILY_ID)
+ phylink_set(mask, 1000baseX_Full);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void rtl93xx_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ pr_info("In %s port %d, state is %d (%s)", __func__, port, state->interface,
+ phy_modes(state->interface));
+
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_XGMII &&
+ state->interface != PHY_INTERFACE_MODE_HSGMII &&
+ state->interface != PHY_INTERFACE_MODE_10GKR &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev,
+ "Unsupported interface: %d for port %d\n",
+ state->interface, port);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII and Reverse MII, we support Gigabit,
+ * including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ /* On the RTL9300 family of SoCs, ports 26 to 27 may be SFP ports TODO: take out of .dts */
+ if (port >= 26 && port <= 27)
+ phylink_set(mask, 1000baseX_Full);
+
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
@@ -300,18 +372,11 @@ static int rtl83xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
if (port < 0 || port > priv->cpu_port)
return -EINVAL;
- /*
- * On the RTL9300 for at least the RTL8226B PHY, the MAC-side link
- * state needs to be read twice in order to read a correct result.
- * This would not be necessary for ports connected e.g. to RTL8218D
- * PHYs.
- */
state->link = 0;
link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
- link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
if (link & BIT_ULL(port))
state->link = 1;
- pr_debug("%s: link state: %llx\n", __func__, link & BIT_ULL(port));
+ pr_debug("%s: link state port %d: %llx\n", __func__, port, link & BIT_ULL(port));
state->duplex = 0;
if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port))
@@ -330,7 +395,8 @@ static int rtl83xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->speed = SPEED_1000;
break;
case 3:
- if (port == 24 || port == 26) /* Internal serdes */
+ if (priv->family_id == RTL9300_FAMILY_ID
+ && (port == 24 || port == 26)) /* Internal serdes */
state->speed = SPEED_2500;
else
state->speed = SPEED_100; /* Is in fact 500Mbit */
@@ -344,6 +410,69 @@ static int rtl83xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
return 1;
}
+static int rtl93xx_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 speed;
+ u64 link;
+
+ if (port < 0 || port > priv->cpu_port)
+ return -EINVAL;
+
+ /*
+ * On the RTL9300 for at least the RTL8226B PHY, the MAC-side link
+ * state needs to be read twice in order to read a correct result.
+ * This would not be necessary for ports connected e.g. to RTL8218D
+ * PHYs.
+ */
+ state->link = 0;
+ link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
+ link = priv->r->get_port_reg_le(priv->r->mac_link_sts);
+ if (link & BIT_ULL(port))
+ state->link = 1;
+ pr_info("%s: link state port %d: %llx, media %08x\n", __func__, port,
+ link & BIT_ULL(port), sw_r32(RTL930X_MAC_LINK_MEDIA_STS));
+
+ state->duplex = 0;
+ if (priv->r->get_port_reg_le(priv->r->mac_link_dup_sts) & BIT_ULL(port))
+ state->duplex = 1;
+
+ speed = priv->r->get_port_reg_le(priv->r->mac_link_spd_sts(port));
+ speed >>= (port % 8) << 2;
+ switch (speed & 0xf) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ case 7:
+ state->speed = SPEED_1000;
+ break;
+ case 4:
+ state->speed = SPEED_10000;
+ break;
+ case 5:
+ case 8:
+ state->speed = SPEED_2500;
+ break;
+ case 6:
+ state->speed = SPEED_5000;
+ break;
+ default:
+ pr_err("%s: unknown speed: %d\n", __func__, (u32)speed & 0xf);
+ }
+
+ pr_info("%s: speed is: %d %d\n", __func__, (u32)speed & 0xf, state->speed);
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (priv->r->get_port_reg_le(priv->r->mac_rx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_RX;
+ if (priv->r->get_port_reg_le(priv->r->mac_tx_pause_sts) & BIT_ULL(port))
+ state->pause |= MLO_PAUSE_TX;
+ return 1;
+}
static void rtl83xx_config_interface(int port, phy_interface_t interface)
{
@@ -389,12 +518,10 @@ static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port,
struct rtl838x_switch_priv *priv = ds->priv;
u32 reg;
int speed_bit = priv->family_id == RTL8380_FAMILY_ID ? 4 : 3;
+// int force_fc = priv->family_id == RTL8380_FAMILY_ID ? RTL838X_MAC_FORCE_FC_EN : RTL839X_MAC_FORCE_FC_EN;
- pr_debug("%s port %d, mode %x\n", __func__, port, mode);
-
- // BUG: Make this work on RTL93XX
- if (priv->family_id >= RTL9300_FAMILY_ID)
- return;
+ pr_info("%s port %d, mode %x, phy-mode: %s, speed %d, link %d\n", __func__,
+ port, mode, phy_modes(state->interface), state->speed, state->link);
if (port == priv->cpu_port) {
/* Set Speed, duplex, flow control
@@ -417,7 +544,7 @@ static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port,
if (priv->family_id == RTL8380_FAMILY_ID) {
if (mode == MLO_AN_PHY || phylink_autoneg_inband(mode)) {
pr_debug("PHY autonegotiates\n");
- reg |= BIT(2);
+ reg |= RTL830X_NWAY_EN;
sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
rtl83xx_config_interface(port, state->interface);
return;
@@ -427,20 +554,46 @@ static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port,
if (mode != MLO_AN_FIXED)
pr_debug("Fixed state.\n");
- if (priv->family_id == RTL8380_FAMILY_ID) {
- /* Clear id_mode_dis bit, and the existing port mode, let
- * RGMII_MODE_EN bet set by mac_link_{up,down}
- */
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
+ /* Clear id_mode_dis bit, and the existing port mode, let
+ * RGMII_MODE_EN bet set by mac_link_{up,down}
+ */
+ switch(priv->family_id)
+ {
+ case RTL8380_FAMILY_ID:
+ reg &= ~(RTL830X_RX_PAUSE_EN | RTL830X_TX_PAUSE_EN);
if (state->pause & MLO_PAUSE_TXRX_MASK) {
if (state->pause & MLO_PAUSE_TX)
- reg |= TX_PAUSE_EN;
- reg |= RX_PAUSE_EN;
+ reg |= RTL830X_TX_PAUSE_EN;
+ reg |= RTL830X_RX_PAUSE_EN;
}
+ break;
+ case RTL8390_FAMILY_ID:
+ reg &= ~(RTL839X_RX_PAUSE_EN | RTL839X_TX_PAUSE_EN);
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
+ reg |= RTL839X_TX_PAUSE_EN;
+ reg |= RTL839X_RX_PAUSE_EN;
+ }
+ break;
+ case RTL9300_FAMILY_ID:
+ reg &= ~(RTL930X_RX_PAUSE_EN | RTL930X_TX_PAUSE_EN);
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
+ reg |= RTL930X_TX_PAUSE_EN;
+ reg |= RTL930X_RX_PAUSE_EN;
+ }
+ break;
+ case RTL9310_FAMILY_ID:
+ reg &= ~(RTL931X_RX_PAUSE_EN | RTL931X_TX_PAUSE_EN);
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
+ reg |= RTL931X_TX_PAUSE_EN;
+ reg |= RTL931X_RX_PAUSE_EN;
+ }
+ break;
}
-
reg &= ~(3 << speed_bit);
+// reg &= ~(1 << force_fc);
switch (state->speed) {
case SPEED_1000:
reg |= 2 << speed_bit;
@@ -448,29 +601,191 @@ static void rtl83xx_phylink_mac_config(struct dsa_switch *ds, int port,
case SPEED_100:
reg |= 1 << speed_bit;
break;
+ case SPEED_10:
+ reg = 0;
+ break;
+ case SPEED_2500:
+ reg = 5 << speed_bit;
+ break;
+ case SPEED_5000:
+ reg = 6 << speed_bit;
+ break;
+ case SPEED_10000:
+ reg = 4 << speed_bit;
+ }
+ switch(priv->family_id)
+ {
+ case RTL8380_FAMILY_ID:
+ reg &= ~(RTL830X_DUPLEX_MODE | RTL830X_FORCE_LINK_EN);
+ break;
+ case RTL8390_FAMILY_ID:
+ reg &= ~(RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN);
+ break;
+ case RTL9300_FAMILY_ID:
+ reg &= ~(RTL930X_DUPLEX_MODE | RTL930X_FORCE_LINK_EN);
+ break;
+ case RTL9310_FAMILY_ID:
+ reg &= ~(RTL931X_DUPLEX_MODE | RTL931X_FORCE_LINK_EN);
+ break;
+ }
+ if (priv->lagmembers & (1ULL << port)) {
+ switch(priv->family_id)
+ {
+ case RTL8380_FAMILY_ID:
+ reg |= (RTL830X_DUPLEX_MODE | RTL830X_FORCE_LINK_EN);
+ break;
+ case RTL8390_FAMILY_ID:
+ reg |= (RTL839X_DUPLEX_MODE | RTL839X_FORCE_LINK_EN);
+ break;
+ case RTL9300_FAMILY_ID:
+ reg |= (RTL930X_DUPLEX_MODE | RTL930X_FORCE_LINK_EN);
+ break;
+ case RTL9310_FAMILY_ID:
+ reg |= (RTL931X_DUPLEX_MODE | RTL931X_FORCE_LINK_EN);
+ break;
+ }
}
- if (priv->family_id == RTL8380_FAMILY_ID) {
- reg &= ~(DUPLEX_FULL | FORCE_LINK_EN);
+ switch(priv->family_id)
+ {
+ case RTL8380_FAMILY_ID:
+ if (state->link)
+ reg |= RTL830X_FORCE_LINK_EN;
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL830X_DUPLEX_MODE;
+ break;
+ case RTL8390_FAMILY_ID:
+ if (state->link)
+ reg |= RTL839X_FORCE_LINK_EN;
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL839X_DUPLEX_MODE;
+ break;
+ case RTL9300_FAMILY_ID:
+ if (state->link)
+ reg |= RTL930X_FORCE_LINK_EN;
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL930X_DUPLEX_MODE;
+ break;
+ case RTL9310_FAMILY_ID:
if (state->link)
- reg |= FORCE_LINK_EN;
+ reg |= RTL931X_FORCE_LINK_EN;
if (state->duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ reg |= RTL931X_DUPLEX_MODE;
+ break;
}
-
// Disable AN
if (priv->family_id == RTL8380_FAMILY_ID)
- reg &= ~BIT(2);
+ reg &= ~RTL830X_NWAY_EN;
+ sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
+}
+
+static void rtl93xx_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int sds_num, sds_mode;
+ u32 reg, v;
+ u32 *p1 = 0xb8003308, *p2 = 0xb800330c;
+
+ pr_info("%s port %d, mode %x, phy-mode: %s, speed %d, link %d\n", __func__,
+ port, mode, phy_modes(state->interface), state->speed, state->link);
+ pr_info("%s: %08x %08x\n", __func__, *p1, *p2);
+ *p1 |= BIT(15);
+ *p2 &= ~BIT(15);
+
+ // BUG: Make this work on RTL93XX
+ if (priv->family_id >= RTL9310_FAMILY_ID)
+ return;
+
+ // Nothing to be done for the CPU-port
+ if (port == priv->cpu_port)
+ return;
+
+ // On the RTL930X, ports 24 to 27 are using an internal SerDes
+ if (port >=24 && port <= 27) {
+ sds_num = port - 18; // Port 24 mapped to SerDes 6, 25 to 7 ...
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_HSGMII:
+ sds_mode = 0x12;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ sds_mode = 0x04;
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ sds_mode = 0x10;
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ sds_mode = 0x1b;
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ sds_mode = 0x0d;
+ break;
+ default:
+ pr_err("%s: unknown serdes mode: %s\n",
+ __func__, phy_modes(state->interface));
+ return;
+ }
+ rtl9300_sds_rst(sds_num, sds_mode);
+ }
+
+ reg = sw_r32(priv->r->mac_force_mode_ctrl(port));
+ reg &= ~(0xf << 3);
+ switch (state->speed) {
+ case SPEED_10000:
+ reg |= 4 << 3;
+ break;
+ case SPEED_2500:
+ reg |= 5 << 3;
+ break;
+ case SPEED_5000:
+ reg = 6 << 3;
+ break;
+ case SPEED_1000:
+ pr_info("Setting PHY speed to 1000M\n");
+ // BUG: SDS-num is hard-coded!
+ v = rtl930x_read_sds_phy(8, 2, 0);
+ v &= ~(BIT(6) | BIT(13));
+ v |= BIT(6);
+ rtl930x_write_sds_phy(8, 2, 0, v);
+
+ reg |= 2 << 3;
+ break;
+ default:
+ reg |= 2 << 3;
+ break;
+ }
+
+ if (state->link)
+ reg |= RTL930X_FORCE_LINK_EN;
+
+ if (state->duplex == DUPLEX_FULL)
+ reg |= RTL930X_DUPLEX_MODE;
+
+ reg |= 1; // Force Link up
sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
}
static void rtl83xx_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ /* Stop TX/RX to port */
+ pr_debug("%s: port %d down\n", __func__, port);
+ sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
+}
+
+static void rtl93xx_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
{
struct rtl838x_switch_priv *priv = ds->priv;
/* Stop TX/RX to port */
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
+
+ // No longer force link
+ sw_w32_mask(3, 0, priv->r->mac_force_mode_ctrl(port));
}
static void rtl83xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -479,6 +794,18 @@ static void rtl83xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct rtl838x_switch_priv *priv = ds->priv;
+
+ /* Restart TX/RX to port */
+ sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
+}
+
+static void rtl93xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
/* Restart TX/RX to port */
sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
}
@@ -523,13 +850,40 @@ static int rtl83xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(rtl83xx_mib);
}
-static int rtl83xx_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static u64 rtl83xx_mc_group_del_port(struct rtl838x_switch_priv *priv, int mc_group, int port);
+static u64 rtl83xx_mc_group_add_port(struct rtl838x_switch_priv *priv, int mc_group, int port);
+
+static void store_mcgroups(struct rtl838x_switch_priv *priv, int port)
{
- struct rtl838x_switch_priv *priv = ds->priv;
+ int mc_group;
+ for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+ if (portmask & BIT_ULL(port)) {
+ priv->mc_group_saves[mc_group] = port;
+ rtl83xx_mc_group_del_port(priv, mc_group, port);
+ }
+ }
+}
+static void load_mcgroups(struct rtl838x_switch_priv *priv, int port)
+{
+ int mc_group;
+ for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ if (priv->mc_group_saves[mc_group] == port) {
+ rtl83xx_mc_group_add_port(priv, mc_group, port);
+ priv->mc_group_saves[mc_group] = -1;
+ }
+ }
+}
+
+static int rtl83xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
u64 v;
pr_debug("%s: %x %d", __func__, (u32) priv, port);
+
+
priv->ports[port].enable = true;
/* enable inner tagging on egress, do not keep any tags */
@@ -541,20 +895,31 @@ static int rtl83xx_port_enable(struct dsa_switch *ds, int port,
if (dsa_is_cpu_port(ds, port))
return 0;
+
/* add port to switch mask of CPU_PORT */
priv->r->traffic_enable(priv->cpu_port, port);
-
+ load_mcgroups(priv, port);
+
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
/* add all other ports in the same bridge to switch mask of port */
v = priv->r->traffic_get(port);
v |= priv->ports[port].pm;
priv->r->traffic_set(port, v);
- sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_SABLK_CTRL);
- sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_DABLK_CTRL);
+
+ // TODO: Figure out if this is necessary
+ if (priv->family_id == RTL9300_FAMILY_ID) {
+ sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_SABLK_CTRL);
+ sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_DABLK_CTRL);
+ }
return 0;
}
+
static void rtl83xx_port_disable(struct dsa_switch *ds, int port)
{
struct rtl838x_switch_priv *priv = ds->priv;
@@ -568,6 +933,7 @@ static void rtl83xx_port_disable(struct dsa_switch *ds, int port)
// BUG: This does not work on RTL931X
/* remove port from switch mask of CPU_PORT */
priv->r->traffic_disable(priv->cpu_port, port);
+ store_mcgroups(priv, port);
/* remove all other ports in the same bridge from switch mask of port */
v = priv->r->traffic_get(port);
@@ -577,58 +943,57 @@ static void rtl83xx_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
}
+static int rtl83xx_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (e->eee_enabled && !priv->eee_enabled) {
+ pr_info("Globally enabling EEE\n");
+ priv->r->init_eee(priv, true);
+ }
+
+ priv->r->port_eee_set(priv, port, e->eee_enabled);
+
+ if (e->eee_enabled)
+ pr_info("Enabled EEE for port %d\n", port);
+ else
+ pr_info("Disabled EEE for port %d\n", port);
+ return 0;
+}
+
static int rtl83xx_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
struct rtl838x_switch_priv *priv = ds->priv;
- pr_debug("%s: port %d", __func__, port);
+ pr_debug("%s: %d\n", __func__, port);
e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full;
- if (sw_r32(priv->r->mac_force_mode_ctrl(port)) & BIT(9))
- e->advertised |= ADVERTISED_100baseT_Full;
- if (sw_r32(priv->r->mac_force_mode_ctrl(port)) & BIT(10))
- e->advertised |= ADVERTISED_1000baseT_Full;
+ priv->r->eee_port_ability(priv, e, port);
e->eee_enabled = priv->ports[port].eee_enabled;
- pr_debug("enabled: %d, active %x\n", e->eee_enabled, e->advertised);
-
- if (sw_r32(RTL838X_MAC_EEE_ABLTY) & BIT(port)) {
- e->lp_advertised = ADVERTISED_100baseT_Full;
- e->lp_advertised |= ADVERTISED_1000baseT_Full;
- }
e->eee_active = !!(e->advertised & e->lp_advertised);
- pr_debug("active: %d, lp %x\n", e->eee_active, e->lp_advertised);
return 0;
}
-static int rtl83xx_set_mac_eee(struct dsa_switch *ds, int port,
+static int rtl93xx_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
struct rtl838x_switch_priv *priv = ds->priv;
- pr_debug("%s: port %d", __func__, port);
- if (e->eee_enabled) {
- pr_debug("Globally enabling EEE\n");
- sw_w32_mask(0x4, 0, RTL838X_SMI_GLB_CTRL);
- }
- if (e->eee_enabled) {
- pr_debug("Enabling EEE for MAC %d\n", port);
- sw_w32_mask(0, 3 << 9, priv->r->mac_force_mode_ctrl(port));
- sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_TX_EN);
- sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_RX_EN);
- priv->ports[port].eee_enabled = true;
- e->eee_enabled = true;
- } else {
- pr_debug("Disabling EEE for MAC %d\n", port);
- sw_w32_mask(3 << 9, 0, priv->r->mac_force_mode_ctrl(port));
- sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_TX_EN);
- sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_RX_EN);
- priv->ports[port].eee_enabled = false;
- e->eee_enabled = false;
- }
+ e->supported = SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full
+ | SUPPORTED_2500baseX_Full;
+
+ priv->r->eee_port_ability(priv, e, port);
+
+ e->eee_enabled = priv->ports[port].eee_enabled;
+
+ e->eee_active = !!(e->advertised & e->lp_advertised);
+
return 0;
}
@@ -658,26 +1023,32 @@ static int rtl83xx_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct rtl838x_switch_priv *priv = ds->priv;
- u64 port_bitmap = 1ULL << priv->cpu_port, v;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
int i;
pr_debug("%s %x: %d %llx", __func__, (u32)priv, port, port_bitmap);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
mutex_lock(&priv->reg_mutex);
for (i = 0; i < ds->num_ports; i++) {
/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_is_user_port(ds, i) && i != port && !priv->is_lagmember[i]) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
priv->r->traffic_enable(i, port);
- priv->ports[i].pm |= 1ULL << port;
- port_bitmap |= 1ULL << i;
+ priv->ports[i].pm |= BIT_ULL(port);
+ port_bitmap |= BIT_ULL(i);
}
}
+ load_mcgroups(priv, port);
/* Add all other ports to this port matrix. */
if (priv->ports[port].enable) {
@@ -696,7 +1067,7 @@ static void rtl83xx_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct rtl838x_switch_priv *priv = ds->priv;
- u64 port_bitmap = 1ULL << priv->cpu_port, v;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
int i;
pr_debug("%s %x: %d", __func__, (u32)priv, port);
@@ -711,13 +1082,14 @@ static void rtl83xx_port_bridge_leave(struct dsa_switch *ds, int port,
if (dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
- if (priv->ports[i].enable)
+ if (priv->ports[i].enable) {
priv->r->traffic_disable(i, port);
-
- priv->ports[i].pm |= 1ULL << port;
+ }
+ priv->ports[i].pm |= BIT_ULL(port);
port_bitmap &= ~BIT_ULL(i);
}
}
+ store_mcgroups(priv, port);
/* Add all other ports to this port matrix. */
if (priv->ports[port].enable) {
@@ -814,6 +1186,7 @@ void rtl930x_fast_age(struct dsa_switch *ds, int port)
{
struct rtl838x_switch_priv *priv = ds->priv;
+
pr_debug("FAST AGE port %d\n", port);
mutex_lock(&priv->reg_mutex);
sw_w32(port << 11, RTL930X_L2_TBL_FLUSH_CTRL + 4);
@@ -829,8 +1202,7 @@ static int rtl83xx_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
struct rtl838x_switch_priv *priv = ds->priv;
-
- pr_debug("%s: port %d\n", __func__, port);
+ pr_debug("%s: port %d state %d\n", __func__, port, vlan_filtering);
mutex_lock(&priv->reg_mutex);
if (vlan_filtering) {
@@ -844,14 +1216,15 @@ static int rtl83xx_vlan_filtering(struct dsa_switch *ds, int port,
*/
if (port != priv->cpu_port)
sw_w32_mask(0b10 << ((port % 16) << 1), 0b01 << ((port % 16) << 1),
- priv->r->vlan_port_igr_filter + ((port >> 5) << 2));
- sw_w32_mask(0, BIT(port % 32), priv->r->vlan_port_egr_filter + ((port >> 4) << 2));
+ priv->r->vlan_port_igr_filter + ((port >> 4) << 2));
+ sw_w32_mask(0, BIT(port % 32), priv->r->vlan_port_egr_filter + ((port >> 5) << 2));
+ //sw_w32_mask(BIT(port % 32), 0, priv->r->vlan_port_egr_filter + ((port >> 5) << 2)); //BUG
} else {
/* Disable ingress and egress filtering */
if (port != priv->cpu_port)
sw_w32_mask(0b11 << ((port % 16) << 1), 0,
- priv->r->vlan_port_igr_filter + ((port >> 5) << 2));
- sw_w32_mask(BIT(port % 32), 0, priv->r->vlan_port_egr_filter + ((port >> 4) << 2));
+ priv->r->vlan_port_igr_filter + ((port >> 4) << 2));
+ sw_w32_mask(BIT(port % 32), 0, priv->r->vlan_port_egr_filter + ((port >> 5) << 2));
}
/* Do we need to do something to the CPU-Port, too? */
@@ -866,17 +1239,17 @@ static int rtl83xx_vlan_prepare(struct dsa_switch *ds, int port,
struct rtl838x_vlan_info info;
struct rtl838x_switch_priv *priv = ds->priv;
- pr_info("%s: port %d\n", __func__, port);
-
mutex_lock(&priv->reg_mutex);
+ priv->r->vlan_tables_read(0, &info);
- priv->r->vlan_profile_dump(1);
- priv->r->vlan_tables_read(1, &info);
-
- pr_info("Tagged ports %llx, untag %llx, prof %x, MC# %d, UC# %d, FID %x\n",
+ pr_debug("VLAN 0: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n",
info.tagged_ports, info.untagged_ports, info.profile_id,
info.hash_mc_fid, info.hash_uc_fid, info.fid);
+ priv->r->vlan_tables_read(1, &info);
+ pr_debug("VLAN 1: Tagged ports %llx, untag %llx, profile %d, MC# %d, UC# %d, FID %x\n",
+ info.tagged_ports, info.untagged_ports, info.profile_id,
+ info.hash_mc_fid, info.hash_uc_fid, info.fid);
priv->r->vlan_set_untagged(1, info.untagged_ports);
pr_debug("SET: Untagged ports, VLAN %d: %llx\n", 1, info.untagged_ports);
@@ -894,7 +1267,7 @@ static void rtl83xx_vlan_add(struct dsa_switch *ds, int port,
struct rtl838x_switch_priv *priv = ds->priv;
int v;
- pr_info("%s port %d, vid_end %d, vid_end %d, flags %x\n", __func__,
+ pr_debug("%s port %d, vid_end %d, vid_end %d, flags %x\n", __func__,
port, vlan->vid_begin, vlan->vid_end, vlan->flags);
if (vlan->vid_begin > 4095 || vlan->vid_end > 4095) {
@@ -916,9 +1289,6 @@ static void rtl83xx_vlan_add(struct dsa_switch *ds, int port,
}
for (v = vlan->vid_begin; v <= vlan->vid_end; v++) {
- if (!v)
- continue;
-
/* Get port memberships of this vlan */
priv->r->vlan_tables_read(v, &info);
@@ -939,10 +1309,10 @@ static void rtl83xx_vlan_add(struct dsa_switch *ds, int port,
info.untagged_ports |= BIT_ULL(port);
priv->r->vlan_set_untagged(v, info.untagged_ports);
- pr_info("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports);
+ pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports);
priv->r->vlan_set_tagged(v, &info);
- pr_info("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports);
+ pr_debug("Tagged ports, VLAN %d: %llx\n", v, info.tagged_ports);
}
mutex_unlock(&priv->reg_mutex);
@@ -978,9 +1348,7 @@ static int rtl83xx_vlan_del(struct dsa_switch *ds, int port,
/* remove port from both tables */
info.untagged_ports &= (~BIT_ULL(port));
- /* always leave vid 1 */
- if (v != 1)
- info.tagged_ports &= (~BIT_ULL(port));
+ info.tagged_ports &= (~BIT_ULL(port));
priv->r->vlan_set_untagged(v, info.untagged_ports);
pr_debug("Untagged ports, VLAN %d: %llx\n", v, info.untagged_ports);
@@ -993,59 +1361,140 @@ static int rtl83xx_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int rtl83xx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static void dump_l2_entry(struct rtl838x_l2_entry *e)
{
- struct rtl838x_switch_priv *priv = ds->priv;
- u64 mac = ether_addr_to_u64(addr);
- u32 key = rtl83xx_hash_key(priv, mac, vid);
- struct rtl838x_l2_entry e;
- u32 r[3];
+ pr_info("MAC: %02x:%02x:%02x:%02x:%02x:%02x vid: %d, rvid: %d, port: %d, valid: %d\n",
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5],
+ e->vid, e->rvid, e->port, e->valid);
+
+ if (e->type != L2_MULTICAST) {
+ pr_info("Type: %d, is_static: %d, is_ip_mc: %d, is_ipv6_mc: %d, block_da: %d\n",
+ e->type, e->is_static, e->is_ip_mc, e->is_ipv6_mc, e->block_da);
+ pr_info(" block_sa: %d, susp: %d, nh: %d, age: %d, is_trunk: %d, trunk: %d\n",
+ e->block_sa, e->suspended, e->next_hop, e->age, e->is_trunk, e->trunk);
+ }
+ if (e->type == L2_MULTICAST)
+ pr_info(" L2_MULTICAST mc_portmask_index: %d\n", e->mc_portmask_index);
+ if (e->is_ip_mc || e->is_ipv6_mc)
+ pr_info(" mc_portmask_index: %d, mc_gip: %d, mc_sip: %d\n",
+ e->mc_portmask_index, e->mc_gip, e->mc_sip);
+ pr_info(" stack_dev: %d\n", e->stack_dev);
+ if (e->next_hop)
+ pr_info(" nh_route_id: %d\n", e->nh_route_id);
+}
+
+static void rtl83xx_setup_l2_uc_entry(struct rtl838x_l2_entry *e, int port, int vid, u64 mac)
+{
+ e->is_ip_mc = e->is_ipv6_mc = false;
+ e->valid = true;
+ e->age = 3;
+ e->port = port,
+ e->vid = vid;
+ u64_to_ether_addr(mac, e->mac);
+}
+
+static void rtl83xx_setup_l2_mc_entry(struct rtl838x_switch_priv *priv,
+ struct rtl838x_l2_entry *e, int vid, u64 mac, int mc_group)
+{
+ e->is_ip_mc = e->is_ipv6_mc = false;
+ e->valid = true;
+ e->mc_portmask_index = mc_group;
+ e->type = L2_MULTICAST;
+ e->rvid = e->vid = vid;
+ pr_debug("%s: vid: %d, rvid: %d\n", __func__, e->vid, e->rvid);
+ u64_to_ether_addr(mac, e->mac);
+}
+
+/*
+ * Uses the seed to identify a hash bucket in the L2 using the derived hash key and then loops
+ * over the entries in the bucket until either a matching entry is found or an empty slot
+ * Returns the filled in rtl838x_l2_entry and the index in the bucket when an entry was found
+ * when an empty slot was found and must exist is false, the index of the slot is returned
+ * when no slots are available returns -1
+ */
+static int rtl83xx_find_l2_hash_entry(struct rtl838x_switch_priv *priv, u64 seed,
+ bool must_exist, struct rtl838x_l2_entry *e)
+{
+ int i, idx = -1;
+ u32 key = priv->r->l2_hash_key(priv, seed);
u64 entry;
- int idx = -1, err = 0, i;
- mutex_lock(&priv->reg_mutex);
- for (i = 0; i < 4; i++) {
- entry = priv->r->read_l2_entry_using_hash(key, i, &e);
- if (!e.valid) {
- idx = (key << 2) | i;
- break;
- }
- if ((entry & 0x0fffffffffffffffULL) == ((mac << 12) | vid)) {
- idx = (key << 2) | i;
+ pr_debug("%s: using key %x, for seed %016llx\n", __func__, key, seed);
+ // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ for (i = 0; i < priv->l2_bucket_size; i++) {
+ entry = priv->r->read_l2_entry_using_hash(key, i, e);
+ pr_debug("valid %d, mac %016llx\n", e->valid, ether_addr_to_u64(&e->mac[0]));
+ if (must_exist && !e->valid)
+ continue;
+ if (!e->valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
+ idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1 : ((key << 2) | i) & 0xffff;
break;
}
}
- if (idx >= 0) {
- r[0] = 3 << 17 | port << 12; // Aging and port
- r[0] |= vid;
- r[1] = mac >> 16;
- r[2] = (mac & 0xffff) << 12; /* rvid = 0 */
- rtl83xx_write_hash(idx, r);
- goto out;
- }
- /* Hash buckets full, try CAM */
+ return idx;
+}
+
+/*
+ * Uses the seed to identify an entry in the CAM by looping over all its entries
+ * Returns the filled in rtl838x_l2_entry and the index in the CAM when an entry was found
+ * when an empty slot was found the index of the slot is returned
+ * when no slots are available returns -1
+ */
+static int rtl83xx_find_l2_cam_entry(struct rtl838x_switch_priv *priv, u64 seed,
+ bool must_exist, struct rtl838x_l2_entry *e)
+{
+ int i, idx = -1;
+ u64 entry;
+
for (i = 0; i < 64; i++) {
- entry = priv->r->read_cam(i, &e);
- if (!e.valid) {
+ entry = priv->r->read_cam(i, e);
+ if (!must_exist && !e->valid) {
if (idx < 0) /* First empty entry? */
idx = i;
break;
- } else if ((entry & 0x0fffffffffffffffULL) == ((mac << 12) | vid)) {
+ } else if ((entry & 0x0fffffffffffffffULL) == seed) {
pr_debug("Found entry in CAM\n");
idx = i;
break;
}
}
+ return idx;
+}
+
+static int rtl83xx_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
+
+ // Found an existing or empty entry
if (idx >= 0) {
- r[0] = 3 << 17 | port << 12; // Aging
- r[0] |= vid;
- r[1] = mac >> 16;
- r[2] = (mac & 0xffff) << 12; /* rvid = 0 */
- rtl83xx_write_cam(idx, r);
+ rtl83xx_setup_l2_uc_entry(&e, port, vid, mac);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
goto out;
}
+
+ // Hash buckets full, try CAM
+ rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
+
+ if (idx >= 0) {
+ rtl83xx_setup_l2_uc_entry(&e, port, vid, mac);
+ priv->r->write_cam(idx, &e);
+ goto out;
+ }
+
err = -ENOTSUPP;
out:
mutex_unlock(&priv->reg_mutex);
@@ -1057,41 +1506,29 @@ static int rtl83xx_port_fdb_del(struct dsa_switch *ds, int port,
{
struct rtl838x_switch_priv *priv = ds->priv;
u64 mac = ether_addr_to_u64(addr);
- u32 key = rtl83xx_hash_key(priv, mac, vid);
struct rtl838x_l2_entry e;
- u32 r[3];
- u64 entry;
- int idx = -1, err = 0, i;
+ int err = 0, idx;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
- pr_debug("In %s, mac %llx, vid: %d, key: %x08x\n", __func__, mac, vid, key);
+ pr_debug("In %s, mac %llx, vid: %d\n", __func__, mac, vid);
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < 4; i++) {
- entry = priv->r->read_l2_entry_using_hash(key, i, &e);
- if (!e.valid)
- continue;
- if ((entry & 0x0fffffffffffffffULL) == ((mac << 12) | vid)) {
- idx = (key << 2) | i;
- break;
- }
- }
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e);
+
+ pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3);
if (idx >= 0) {
- r[0] = r[1] = r[2] = 0;
- rtl83xx_write_hash(idx, r);
+ e.valid = false;
+ dump_l2_entry(&e);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
goto out;
}
/* Check CAM for spillover from hash buckets */
- for (i = 0; i < 64; i++) {
- entry = priv->r->read_cam(i, &e);
- if ((entry & 0x0fffffffffffffffULL) == ((mac << 12) | vid)) {
- idx = i;
- break;
- }
- }
+ rtl83xx_find_l2_cam_entry(priv, seed, true, &e);
+
if (idx >= 0) {
- r[0] = r[1] = r[2] = 0;
- rtl83xx_write_cam(idx, r);
+ e.valid = false;
+ priv->r->write_cam(idx, &e);
goto out;
}
err = -ENOENT;
@@ -1106,9 +1543,8 @@ static int rtl83xx_port_fdb_dump(struct dsa_switch *ds, int port,
struct rtl838x_l2_entry e;
struct rtl838x_switch_priv *priv = ds->priv;
int i;
- u32 fid;
- u32 pkey;
- u64 mac;
+ u32 fid, pkey, key;
+ u64 mac, seed;
mutex_lock(&priv->reg_mutex);
@@ -1118,14 +1554,26 @@ static int rtl83xx_port_fdb_dump(struct dsa_switch *ds, int port,
if (!e.valid)
continue;
- if (e.port == port) {
- fid = (i & 0x3ff) | (e.rvid & ~0x3ff);
+ if (e.port == port || e.port == RTL930X_PORT_IGNORE) {
+ fid = ((i >> 2) & 0x3ff) | (e.rvid & ~0x3ff);
mac = ether_addr_to_u64(&e.mac[0]);
- pkey = rtl838x_hash(priv, mac << 12 | fid);
+ pkey = priv->r->l2_hash_key(priv, priv->r->l2_hash_seed(mac, fid));
fid = (pkey & 0x3ff) | (fid & ~0x3ff);
- pr_debug("-> mac %016llx, fid: %d\n", mac, fid);
+ pr_debug("-> index %d, key %x, bucket %d, dmac %016llx, fid: %x rvid: %x\n",
+ i, i >> 2, i & 0x3, mac, fid, e.rvid);
+ dump_l2_entry(&e);
+ seed = priv->r->l2_hash_seed(mac, e.rvid);
+ key = priv->r->l2_hash_key(priv, seed);
+ pr_debug("seed: %016llx, key based on rvid: %08x\n", seed, key);
cb(e.mac, e.vid, e.is_static, data);
}
+ if (e.type == L2_MULTICAST) {
+ u64 portmask = priv->r->read_mcast_pmask(e.mc_portmask_index);
+ if (portmask & BIT_ULL(port)) {
+ dump_l2_entry(&e);
+ pr_debug(" PM: %016llx\n", portmask);
+ }
+ }
}
for (i = 0; i < 64; i++) {
@@ -1138,21 +1586,210 @@ static int rtl83xx_port_fdb_dump(struct dsa_switch *ds, int port,
cb(e.mac, e.vid, e.is_static, data);
}
+
mutex_unlock(&priv->reg_mutex);
return 0;
}
+static int rtl83xx_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ if (priv->id >= 0x9300)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int rtl83xx_mc_group_alloc(struct rtl838x_switch_priv *priv, int port)
+{
+ int mc_group = find_first_zero_bit(priv->mc_group_bm, MAX_MC_GROUPS - 1);
+ u64 portmask;
+
+ if (mc_group >= MAX_MC_GROUPS - 1)
+ return -1;
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+ pr_debug("Using MC group %d\n", mc_group);
+ set_bit(mc_group, priv->mc_group_bm);
+ mc_group++; // We cannot use group 0, as this is used for lookup miss flooding
+ portmask = BIT_ULL(port) | BIT_ULL(priv->cpu_port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+
+ return mc_group;
+}
+
+static u64 rtl83xx_mc_group_add_port(struct rtl838x_switch_priv *priv, int mc_group, int port)
+{
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return portmask;
+ }
+ portmask |= BIT_ULL(port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+
+ return portmask;
+}
+
+static u64 rtl83xx_mc_group_del_port(struct rtl838x_switch_priv *priv, int mc_group, int port)
+{
+ u64 portmask = priv->r->read_mcast_pmask(mc_group);
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return portmask;
+ }
+ priv->r->write_mcast_pmask(mc_group, portmask);
+ if (portmask == BIT_ULL(priv->cpu_port)) {
+ portmask &= ~BIT_ULL(priv->cpu_port);
+ priv->r->write_mcast_pmask(mc_group, portmask);
+ clear_bit(mc_group, priv->mc_group_bm);
+ }
+
+ return portmask;
+}
+
+static void rtl83xx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(mdb->addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ int vid = mdb->vid;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+ int mc_group;
+
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return;
+ }
+ pr_debug("In %s port %d, mac %llx, vid: %d\n", __func__, port, mac, vid);
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
+
+ // Found an existing or empty entry
+ if (idx >= 0) {
+ if (e.valid) {
+ pr_debug("Found an existing entry %016llx, mc_group %d\n",
+ ether_addr_to_u64(e.mac), e.mc_portmask_index);
+ rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port);
+ } else {
+ pr_debug("New entry for seed %016llx\n", seed);
+ mc_group = rtl83xx_mc_group_alloc(priv, port);
+ if (mc_group < 0) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+ rtl83xx_setup_l2_mc_entry(priv, &e, vid, mac, mc_group);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ }
+ goto out;
+ }
+
+ // Hash buckets full, try CAM
+ rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
+
+ if (idx >= 0) {
+ if (e.valid) {
+ pr_debug("Found existing CAM entry %016llx, mc_group %d\n",
+ ether_addr_to_u64(e.mac), e.mc_portmask_index);
+ rtl83xx_mc_group_add_port(priv, e.mc_portmask_index, port);
+ } else {
+ pr_debug("New entry\n");
+ mc_group = rtl83xx_mc_group_alloc(priv, port);
+ if (mc_group < 0) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+ rtl83xx_setup_l2_mc_entry(priv, &e, vid, mac, mc_group);
+ priv->r->write_cam(idx, &e);
+ }
+ goto out;
+ }
+
+ err = -ENOTSUPP;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ if (err)
+ dev_err(ds->dev, "failed to add MDB entry\n");
+}
+
+int rtl83xx_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 mac = ether_addr_to_u64(mdb->addr);
+ struct rtl838x_l2_entry e;
+ int err = 0, idx;
+ int vid = mdb->vid;
+ u64 seed = priv->r->l2_hash_seed(mac, vid);
+ u64 portmask;
+ pr_debug("%s: %d\n", __func__, port);
+ if (priv->is_lagmember[port]) {
+ pr_info("%s: %d is lag slave. ignore\n", __func__, port);
+ return 0;
+ }
+
+ pr_debug("In %s, port %d, mac %llx, vid: %d\n", __func__, port, mac, vid);
+ mutex_lock(&priv->reg_mutex);
+
+ idx = rtl83xx_find_l2_hash_entry(priv, seed, true, &e);
+
+ pr_debug("Found entry index %d, key %d and bucket %d\n", idx, idx >> 2, idx & 3);
+ if (idx >= 0) {
+ portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port);
+ if (!portmask) {
+ e.valid = false;
+ // dump_l2_entry(&e);
+ priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
+ }
+ goto out;
+ }
+
+ /* Check CAM for spillover from hash buckets */
+ rtl83xx_find_l2_cam_entry(priv, seed, true, &e);
+
+ if (idx >= 0) {
+ portmask = rtl83xx_mc_group_del_port(priv, e.mc_portmask_index, port);
+ if (!portmask) {
+ e.valid = false;
+ // dump_l2_entry(&e);
+ priv->r->write_cam(idx, &e);
+ }
+ goto out;
+ }
+ // TODO: Re-enable with a newer kernel: err = -ENOENT;
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+}
+
static int rtl83xx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress)
{
/* We support 4 mirror groups, one destination port per group */
- int group;
+ int group, err = 0;
struct rtl838x_switch_priv *priv = ds->priv;
- int ctrl_reg, dpm_reg, spm_reg;
-
+ struct rtl838x_vlan_info info;
+ int ctrl_reg, dpm_reg, spm_reg;
+ int v;
pr_debug("In %s\n", __func__);
+ mutex_lock(&priv->reg_mutex);
+
for (group = 0; group < 4; group++) {
if (priv->mirror_group_ports[group] == mirror->to_local_port)
break;
@@ -1164,41 +1801,42 @@ static int rtl83xx_port_mirror_add(struct dsa_switch *ds, int port,
}
}
- if (group >= 4)
- return -ENOSPC;
-
- ctrl_reg = priv->r->mir_ctrl + group * 4;
- dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width;
- spm_reg = priv->r->mir_spm + group * 4 * priv->port_width;
+ if (group >= 4) {
+ err = -ENOSPC;
+ goto out;
+ }
- pr_debug("Using group %d\n", group);
- mutex_lock(&priv->reg_mutex);
+ ctrl_reg = priv->r->mir_ctrl + (group * 4);
+ dpm_reg = priv->r->mir_dpm + ((group << 2) * priv->port_width);
+ spm_reg = priv->r->mir_spm + ((group << 2) * priv->port_width);
+ pr_debug("Using group %d local port %d, port %d\n", group, mirror->to_local_port, port);
+
if (priv->family_id == RTL8380_FAMILY_ID) {
/* Enable mirroring to port across VLANs (bit 11) */
- sw_w32(1 << 11 | (mirror->to_local_port << 4) | 1, ctrl_reg);
+ sw_w32(1 << 11 | ( mirror->to_local_port << 4) | 1, ctrl_reg);
} else {
/* Enable mirroring to destination port */
- sw_w32((mirror->to_local_port << 4) | 1, ctrl_reg);
+ sw_w32(( mirror->to_local_port << 4) | 1, ctrl_reg);
}
if (ingress && (priv->r->get_port_reg_be(spm_reg) & (1ULL << port))) {
- mutex_unlock(&priv->reg_mutex);
- return -EEXIST;
+ err = -EEXIST;
+ goto out;
}
if ((!ingress) && (priv->r->get_port_reg_be(dpm_reg) & (1ULL << port))) {
- mutex_unlock(&priv->reg_mutex);
- return -EEXIST;
+ err = -EEXIST;
+ goto out;
}
-
if (ingress)
priv->r->mask_port_reg_be(0, 1ULL << port, spm_reg);
else
priv->r->mask_port_reg_be(0, 1ULL << port, dpm_reg);
priv->mirror_group_ports[group] = mirror->to_local_port;
+ out:
mutex_unlock(&priv->reg_mutex);
- return 0;
+ return err;
}
static void rtl83xx_port_mirror_del(struct dsa_switch *ds, int port,
@@ -1209,18 +1847,18 @@ static void rtl83xx_port_mirror_del(struct dsa_switch *ds, int port,
int ctrl_reg, dpm_reg, spm_reg;
pr_debug("In %s\n", __func__);
+ mutex_lock(&priv->reg_mutex);
for (group = 0; group < 4; group++) {
if (priv->mirror_group_ports[group] == mirror->to_local_port)
break;
}
if (group >= 4)
- return;
+ goto out;
ctrl_reg = priv->r->mir_ctrl + group * 4;
- dpm_reg = priv->r->mir_dpm + group * 4 * priv->port_width;
- spm_reg = priv->r->mir_spm + group * 4 * priv->port_width;
+ dpm_reg = priv->r->mir_dpm + (group << 2) * priv->port_width;
+ spm_reg = priv->r->mir_spm + (group << 2) * priv->port_width;
- mutex_lock(&priv->reg_mutex);
if (mirror->ingress) {
/* Ingress, clear source port matrix */
priv->r->mask_port_reg_be(1ULL << port, 0, spm_reg);
@@ -1228,13 +1866,214 @@ static void rtl83xx_port_mirror_del(struct dsa_switch *ds, int port,
/* Egress, clear destination port matrix */
priv->r->mask_port_reg_be(1ULL << port, 0, dpm_reg);
}
+
+ if (!(priv->r->get_port_reg_be(dpm_reg) & (1ULL << port)) && !(priv->r->get_port_reg_be(spm_reg) & (1ULL << port))) {
- if (!(sw_r32(spm_reg) || sw_r32(dpm_reg))) {
priv->mirror_group_ports[group] = -1;
sw_w32(0, ctrl_reg);
}
+ out:
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static bool rtl83xx_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct dsa_port *dp;
+ int id;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ return false;
+ }
+ if (info->hash_type != NETDEV_LAG_HASH_L2 && info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static void add_portmatrix(struct dsa_switch *ds,int port)
+{
+ int i;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ /* Add this port to the port matrix of the other ports in the
+ * same bridge. If the port is disabled, port matrix is kept
+ * and not being setup until the port becomes enabled.
+ */
+ if (dsa_is_user_port(ds, i) && i != port && !priv->is_lagmember[i]) {
+ if (priv->ports[i].enable) {
+ priv->r->traffic_enable(i, port);
+ }
+ priv->ports[i].pm |= BIT_ULL(port);
+ port_bitmap |= BIT_ULL(i);
+ }
+ if (priv->is_lagmember[i] || i == port) {
+
+ priv->ports[i].pm &= ~BIT_ULL(i);
+ port_bitmap &= ~BIT_ULL(i);
+
+ }
+ }
+ load_mcgroups(priv, port);
+ /* Add all other ports to this port matrix. */
+ if (priv->ports[port].enable) {
+ priv->r->traffic_enable(priv->cpu_port, port);
+ v = priv->r->traffic_get(port);
+ v |= port_bitmap;
+ priv->r->traffic_set(port, v);
+ }
+ priv->ports[port].pm |= port_bitmap;
+
+
+}
+/* pointless? */
+static int rtl83xx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp;
+ struct net_device *lag;
+ unsigned int id;
+ int num_tx, i,a;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
+ pr_debug("%s: %d\n", __func__, port);
+ mutex_lock(&priv->reg_mutex);
mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+void rtl83xx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+
+static int rtl83xx_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ int i, err = 0;
+
+ if (!rtl83xx_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (i = 0; i < priv->n_lags; i++) {
+ if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == lag))
+ break;
+ }
+ if (port >= priv->cpu_port) {
+ err = -EINVAL;
+ goto out;
+ }
+ pr_info("port_lag_join: group %d, port %d\n",i, port);
+ if (!priv->lag_devs[i])
+ priv->lag_devs[i] = lag;
+
+ if (priv->lag_primary[i]==-1) {
+ priv->lag_primary[i]=port;
+ } else
+ priv->is_lagmember[port] = 1;
+
+ priv->lagmembers |= (1ULL << port);
+
+ pr_debug("lag_members = %llX\n", priv->lagmembers);
+ err = rtl83xx_lag_add(priv->ds, i, port, info);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return err;
+
+}
+
+static int rtl83xx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ int i, group = -1, err;
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ for (i=0;i<priv->n_lags;i++) {
+ if (priv->lags_port_members[i] & BIT_ULL(port)) {
+ group = i;
+ break;
+ }
+ }
+
+ if (group == -1) {
+ pr_info("port_lag_leave: port %d is not a member\n", port);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (port >= priv->cpu_port) {
+ err = -EINVAL;
+ goto out;
+ }
+ pr_info("port_lag_del: group %d, port %d\n",group, port);
+ priv->lagmembers &=~ (1ULL << port);
+ priv->lag_primary[i] = -1;
+ priv->is_lagmember[port] = 0;
+ pr_debug("lag_members = %llX\n", priv->lagmembers);
+ err = rtl83xx_lag_del(priv->ds, group, port);
+ if (err) {
+ err = -EINVAL;
+ goto out;
+ }
+ if (!priv->lags_port_members[i])
+ priv->lag_devs[i] = NULL;
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+static int rtl83xx_port_pre_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ unsigned long features = 0;
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ features |= BR_LEARNING;
+ if (priv->r->enable_flood)
+ features |= BR_FLOOD;
+ if (priv->r->enable_mcast_flood)
+ features |= BR_MCAST_FLOOD;
+ if (priv->r->enable_bcast_flood)
+ features |= BR_BCAST_FLOOD;
+ if (flags & ~(features))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtl83xx_port_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ priv->r->enable_learning(port, !!(flags & BR_LEARNING));
+
+ if (priv->r->enable_flood)
+ priv->r->enable_flood(port, !!(flags & BR_FLOOD));
+
+ if (priv->r->enable_mcast_flood)
+ priv->r->enable_mcast_flood(port, !!(flags & BR_MCAST_FLOOD));
+
+ if (priv->r->enable_bcast_flood)
+ priv->r->enable_bcast_flood(port, !!(flags & BR_BCAST_FLOOD));
+
+ return 0;
+
}
int dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg)
@@ -1243,6 +2082,8 @@ int dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg)
u32 offset = 0;
struct rtl838x_switch_priv *priv = ds->priv;
+ pr_debug("%s: %X:%X\n", __func__, phy_addr, phy_reg);
+ if (priv->family_id == RTL8380_FAMILY_ID) {
if (phy_addr >= 24 && phy_addr <= 27
&& priv->ports[24].phy == PHY_RTL838X_SDS) {
if (phy_addr == 26)
@@ -1250,7 +2091,7 @@ int dsa_phy_read(struct dsa_switch *ds, int phy_addr, int phy_reg)
val = sw_r32(RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2)) & 0xffff;
return val;
}
-
+ }
read_phy(phy_addr, 0, phy_reg, &val);
return val;
}
@@ -1259,7 +2100,9 @@ int dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val)
{
u32 offset = 0;
struct rtl838x_switch_priv *priv = ds->priv;
+ pr_debug("%s: %X:%X\n", __func__, phy_addr, phy_reg);
+ if (priv->family_id == RTL8380_FAMILY_ID) {
if (phy_addr >= 24 && phy_addr <= 27
&& priv->ports[24].phy == PHY_RTL838X_SDS) {
if (phy_addr == 26)
@@ -1267,6 +2110,7 @@ int dsa_phy_write(struct dsa_switch *ds, int phy_addr, int phy_reg, u16 val)
sw_w32(val, RTL838X_SDS4_FIB_REG0 + offset + (phy_reg << 2));
return 0;
}
+ }
return write_phy(phy_addr, 0, phy_reg, val);
}
@@ -1283,6 +2127,7 @@ const struct dsa_switch_ops rtl83xx_switch_ops = {
.phylink_mac_link_down = rtl83xx_phylink_mac_link_down,
.phylink_mac_link_up = rtl83xx_phylink_mac_link_up,
+
.get_strings = rtl83xx_get_strings,
.get_ethtool_stats = rtl83xx_get_ethtool_stats,
.get_sset_count = rtl83xx_get_sset_count,
@@ -1308,8 +2153,18 @@ const struct dsa_switch_ops rtl83xx_switch_ops = {
.port_fdb_del = rtl83xx_port_fdb_del,
.port_fdb_dump = rtl83xx_port_fdb_dump,
+ .port_mdb_prepare = rtl83xx_port_mdb_prepare,
+ .port_mdb_add = rtl83xx_port_mdb_add,
+ .port_mdb_del = rtl83xx_port_mdb_del,
+
.port_mirror_add = rtl83xx_port_mirror_add,
.port_mirror_del = rtl83xx_port_mirror_del,
+
+ .port_lag_change = rtl83xx_port_lag_change,
+ .port_lag_join = rtl83xx_port_lag_join,
+ .port_lag_leave = rtl83xx_port_lag_leave,
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
};
const struct dsa_switch_ops rtl930x_switch_ops = {
@@ -1319,11 +2174,11 @@ const struct dsa_switch_ops rtl930x_switch_ops = {
.phy_read = dsa_phy_read,
.phy_write = dsa_phy_write,
- .phylink_validate = rtl83xx_phylink_validate,
- .phylink_mac_link_state = rtl83xx_phylink_mac_link_state,
- .phylink_mac_config = rtl83xx_phylink_mac_config,
- .phylink_mac_link_down = rtl83xx_phylink_mac_link_down,
- .phylink_mac_link_up = rtl83xx_phylink_mac_link_up,
+ .phylink_validate = rtl93xx_phylink_validate,
+ .phylink_mac_link_state = rtl93xx_phylink_mac_link_state,
+ .phylink_mac_config = rtl93xx_phylink_mac_config,
+ .phylink_mac_link_down = rtl93xx_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl93xx_phylink_mac_link_up,
.get_strings = rtl83xx_get_strings,
.get_ethtool_stats = rtl83xx_get_ethtool_stats,
@@ -1332,6 +2187,9 @@ const struct dsa_switch_ops rtl930x_switch_ops = {
.port_enable = rtl83xx_port_enable,
.port_disable = rtl83xx_port_disable,
+ .get_mac_eee = rtl93xx_get_mac_eee,
+ .set_mac_eee = rtl83xx_set_mac_eee,
+
.set_ageing_time = rtl83xx_set_l2aging,
.port_bridge_join = rtl83xx_port_bridge_join,
.port_bridge_leave = rtl83xx_port_bridge_leave,
@@ -1346,4 +2204,15 @@ const struct dsa_switch_ops rtl930x_switch_ops = {
.port_fdb_add = rtl83xx_port_fdb_add,
.port_fdb_del = rtl83xx_port_fdb_del,
.port_fdb_dump = rtl83xx_port_fdb_dump,
+
+ .port_mdb_prepare = rtl83xx_port_mdb_prepare,
+ .port_mdb_add = rtl83xx_port_mdb_add,
+ .port_mdb_del = rtl83xx_port_mdb_del,
+
+ .port_lag_change = rtl83xx_port_lag_change,
+ .port_lag_join = rtl83xx_port_lag_join,
+ .port_lag_leave = rtl83xx_port_lag_leave,
+
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
};
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.c
index de9e83bb8d..72377fb79c 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.c
@@ -1,10 +1,102 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include <net/nexthop.h>
+
#include "rtl83xx.h"
extern struct mutex smi_lock;
+// see_dal_maple_acl_log2PhyTmplteField and src/app/diag_v2/src/diag_acl.c
+/* Definition of the RTL838X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPMMASK = 0,
+ TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-28
+ TEMPLATE_FIELD_RANGE_CHK = 3,
+ TEMPLATE_FIELD_DMAC0 = 4, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 5, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 6, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 7, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 8, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 9, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 10, // Ethernet typ
+ TEMPLATE_FIELD_OTAG = 11, // Outer VLAN tag
+ TEMPLATE_FIELD_ITAG = 12, // Inner VLAN tag
+ TEMPLATE_FIELD_SIP0 = 13, // IPv4 or IPv6 source IP[15:0] or ARP/RARP
+ // source protocol address in header
+ TEMPLATE_FIELD_SIP1 = 14, // IPv4 or IPv6 source IP[31:16] or ARP/RARP
+ TEMPLATE_FIELD_DIP0 = 15, // IPv4 or IPv6 destination IP[15:0]
+ TEMPLATE_FIELD_DIP1 = 16, // IPv4 or IPv6 destination IP[31:16]
+ TEMPLATE_FIELD_IP_TOS_PROTO = 17, // IPv4 TOS/IPv6 traffic class and
+ // IPv4 proto/IPv6 next header fields
+ TEMPLATE_FIELD_L34_HEADER = 18, // packet with extra tag and IPv6 with auth, dest,
+ // frag, route, hop-by-hop option header,
+ // IGMP type, TCP flag
+ TEMPLATE_FIELD_L4_SPORT = 19, // TCP/UDP source port
+ TEMPLATE_FIELD_L4_DPORT = 20, // TCP/UDP destination port
+ TEMPLATE_FIELD_ICMP_IGMP = 21,
+ TEMPLATE_FIELD_IP_RANGE = 22,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 23, // Field selector mask
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 24,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 25,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 26,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 27,
+ TEMPLATE_FIELD_SIP2 = 28, // IPv6 source IP[47:32]
+ TEMPLATE_FIELD_SIP3 = 29, // IPv6 source IP[63:48]
+ TEMPLATE_FIELD_SIP4 = 30, // IPv6 source IP[79:64]
+ TEMPLATE_FIELD_SIP5 = 31, // IPv6 source IP[95:80]
+ TEMPLATE_FIELD_SIP6 = 32, // IPv6 source IP[111:96]
+ TEMPLATE_FIELD_SIP7 = 33, // IPv6 source IP[127:112]
+ TEMPLATE_FIELD_DIP2 = 34, // IPv6 destination IP[47:32]
+ TEMPLATE_FIELD_DIP3 = 35, // IPv6 destination IP[63:48]
+ TEMPLATE_FIELD_DIP4 = 36, // IPv6 destination IP[79:64]
+ TEMPLATE_FIELD_DIP5 = 37, // IPv6 destination IP[95:80]
+ TEMPLATE_FIELD_DIP6 = 38, // IPv6 destination IP[111:96]
+ TEMPLATE_FIELD_DIP7 = 39, // IPv6 destination IP[127:112]
+ TEMPLATE_FIELD_FWD_VID = 40, // Forwarding VLAN-ID
+ TEMPLATE_FIELD_FLOW_LABEL = 41,
+};
+
+/*
+ * The RTL838X SoCs use 5 fixed templates with definitions for which data fields are to
+ * be copied from the Ethernet Frame header into the 12 User-definable fields of the Packet
+ * Inspection Engine's buffer. The following defines the field contents for each of the fixed
+ * templates. Additionally, 3 user-definable templates can be set up via the definitions
+ * in RTL838X_ACL_TMPLTE_CTRL control registers.
+ * TODO: See all src/app/diag_v2/src/diag_pie.c
+ */
+#define N_FIXED_TEMPLATES 5
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_OTAG,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_RANGE_CHK
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0,
+ TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ },
+};
+
void rtl838x_print_matrix(void)
{
unsigned volatile int *ptr8;
@@ -12,10 +104,10 @@ void rtl838x_print_matrix(void)
ptr8 = RTL838X_SW_BASE + RTL838X_PORT_ISO_CTRL(0);
for (i = 0; i < 28; i += 8)
- pr_info("> %8x %8x %8x %8x %8x %8x %8x %8x\n",
+ pr_debug("> %8x %8x %8x %8x %8x %8x %8x %8x\n",
ptr8[i + 0], ptr8[i + 1], ptr8[i + 2], ptr8[i + 3],
ptr8[i + 4], ptr8[i + 5], ptr8[i + 6], ptr8[i + 7]);
- pr_info("CPU_PORT> %8x\n", ptr8[28]);
+ pr_debug("CPU_PORT> %8x\n", ptr8[28]);
}
static inline int rtl838x_port_iso_ctrl(int p)
@@ -42,56 +134,98 @@ static inline int rtl838x_tbl_access_data_0(int i)
static void rtl838x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
- u32 cmd, v;
+ u32 v;
+ // Read VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
+
+ rtl_table_read(r, vlan);
+ info->tagged_ports = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ pr_debug("VLAN_READ %d: %016llx %08x\n", vlan, info->tagged_ports, v);
+ rtl_table_release(r);
- cmd = BIT(15) /* Execute cmd */
- | BIT(14) /* Read */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- rtl838x_exec_tbl0_cmd(cmd);
- info->tagged_ports = sw_r32(RTL838X_TBL_ACCESS_DATA_0(0));
- v = sw_r32(RTL838X_TBL_ACCESS_DATA_0(1));
info->profile_id = v & 0x7;
info->hash_mc_fid = !!(v & 0x8);
info->hash_uc_fid = !!(v & 0x10);
info->fid = (v >> 5) & 0x3f;
-
- cmd = BIT(15) /* Execute cmd */
- | BIT(14) /* Read */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- rtl838x_exec_tbl1_cmd(cmd);
- info->untagged_ports = sw_r32(RTL838X_TBL_ACCESS_DATA_1(0));
+ // Read UNTAG table (0) via table register 1
+ r = rtl_table_get(RTL8380_TBL_1, 0);
+ rtl_table_read(r, vlan);
+ info->untagged_ports = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
}
static void rtl838x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
- u32 cmd = BIT(15) /* Execute cmd */
- | 0 << 14 /* Write */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
u32 v;
+ // Access VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
- sw_w32(info->tagged_ports, RTL838X_TBL_ACCESS_DATA_0(0));
+ sw_w32(info->tagged_ports, rtl_table_data(r, 0));
v = info->profile_id;
v |= info->hash_mc_fid ? 0x8 : 0;
v |= info->hash_uc_fid ? 0x10 : 0;
v |= ((u32)info->fid) << 5;
+ sw_w32(v, rtl_table_data(r, 1));
- sw_w32(v, RTL838X_TBL_ACCESS_DATA_0(1));
- rtl838x_exec_tbl0_cmd(cmd);
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
}
static void rtl838x_vlan_set_untagged(u32 vlan, u64 portmask)
{
- u32 cmd = BIT(15) /* Execute cmd */
- | 0 << 14 /* Write */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- sw_w32(portmask & 0x1fffffff, RTL838X_TBL_ACCESS_DATA_1(0));
- rtl838x_exec_tbl1_cmd(cmd);
+ // Access UNTAG table (0) via register 1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 0);
+
+ sw_w32(portmask & 0x1fffffff, rtl_table_data(r, 0));
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
+}
+
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl838x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ if (is_set)
+ sw_w32_mask(BIT(port), 0, RTL838X_VLAN_PORT_FWD);
+ else
+ sw_w32_mask(0, BIT(port), RTL838X_VLAN_PORT_FWD);
+}
+
+static u64 rtl838x_l2_hash_seed(u64 mac, u32 vid)
+{
+ return mac << 12 | vid;
+}
+
+/*
+ * Applies the same hash algorithm as the one used currently by the ASIC to the seed
+ * and returns a key into the L2 hash table
+ */
+static u32 rtl838x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 h1, h2, h3, h;
+
+ if (sw_r32(priv->r->l2_ctrl_0) & 1) {
+ h1 = (seed >> 11) & 0x7ff;
+ h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
+
+ h2 = (seed >> 33) & 0x7ff;
+ h2 = ((h2 & 0x3f) << 5) | ((h2 >> 6) & 0x1f);
+
+ h3 = (seed >> 44) & 0x7ff;
+ h3 = ((h3 & 0x7f) << 4) | ((h3 >> 7) & 0xf);
+
+ h = h1 ^ h2 ^ h3 ^ ((seed >> 55) & 0x1ff);
+ h ^= ((seed >> 22) & 0x7ff) ^ (seed & 0x7ff);
+ } else {
+ h = ((seed >> 55) & 0x1ff) ^ ((seed >> 44) & 0x7ff)
+ ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
+ ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff);
+ }
+
+ return h;
}
static inline int rtl838x_mac_force_mode_ctrl(int p)
@@ -124,97 +258,300 @@ inline static int rtl838x_trk_mbr_ctr(int group)
return RTL838X_TRK_MBR_CTR + (group << 2);
}
-static u64 rtl838x_read_l2_entry_using_hash(u32 hash, u32 position, struct rtl838x_l2_entry *e)
+/*
+ * Fills an L2 entry structure from the SoC registers
+ */
+static void rtl838x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
+{
+ /* Table contains different entry types, we need to identify the right one:
+ * Check for MC entries, first
+ * In contrast to the RTL93xx SoCs, there is no valid bit, use heuristics to
+ * identify valid entries
+ */
+ e->is_ip_mc = !!(r[0] & BIT(22));
+ e->is_ipv6_mc = !!(r[0] & BIT(21));
+ e->type = L2_INVALID;
+
+ pr_debug("%s: REGISTERS %08x %08x %08x\n", __func__, r[0], r[1], r[2]);
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ e->mac[0] = (r[1] >> 20);
+ e->mac[1] = (r[1] >> 12);
+ e->mac[2] = (r[1] >> 4);
+ e->mac[3] = (r[1] & 0xf) << 4 | (r[2] >> 28);
+ e->mac[4] = (r[2] >> 20);
+ e->mac[5] = (r[2] >> 12);
+
+ e->rvid = r[2] & 0xfff;
+ e->vid = r[0] & 0xfff;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ e->is_static = !!((r[0] >> 19) & 1);
+ e->port = (r[0] >> 12) & 0x1f;
+ e->block_da = !!(r[1] & BIT(30));
+ e->block_sa = !!(r[1] & BIT(31));
+ e->suspended = !!(r[1] & BIT(29));
+ e->next_hop = !!(r[1] & BIT(28));
+ if (e->next_hop) {
+ pr_info("Found next hop entry, need to read extra data\n");
+ e->nh_vlan_target = !!(r[0] & BIT(9));
+ e->nh_route_id = r[0] & 0x1ff;
+ e->vid = e->rvid;
+ }
+ e->age = (r[0] >> 17) & 0x3;
+ e->valid = true;
+
+ /* A valid entry has one of mutli-cast, aging, sa/da-blocking,
+ * next-hop or static entry bit set */
+ if (!(r[0] & 0x007c0000) && !(r[1] & 0xd0000000))
+ e->valid = false;
+ else
+ e->type = L2_UNICAST;
+ } else { // L2 multicast
+ pr_debug("Got L2 MC entry: %08x %08x %08x\n", r[0], r[1], r[2]);
+ e->valid = true;
+ e->type = L2_MULTICAST;
+ e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
+ }
+ } else { // IPv4 and IPv6 multicast
+ e->valid = true;
+ e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
+ e->mc_gip = (r[1] << 20) | (r[2] >> 12);
+ e->rvid = r[2] & 0xfff;
+ }
+ if (e->is_ip_mc)
+ e->type = IP4_MULTICAST;
+ if (e->is_ipv6_mc)
+ e->type = IP6_MULTICAST;
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry
+ */
+static void rtl838x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ u64 mac = ether_addr_to_u64(e->mac);
+
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[0] = e->is_ip_mc ? BIT(22) : 0;
+ r[0] |= e->is_ipv6_mc ? BIT(21) : 0;
+
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ r[1] = mac >> 20;
+ r[2] = (mac & 0xfffff) << 12;
+
+ /* Is it a unicast entry? check multicast bit */
+ if (!(e->mac[0] & 1)) {
+ r[0] |= e->is_static ? BIT(19) : 0;
+ r[0] |= (e->port & 0x3f) << 12;
+ r[0] |= e->vid;
+ r[1] |= e->block_da ? BIT(30) : 0;
+ r[1] |= e->block_sa ? BIT(31) : 0;
+ r[1] |= e->suspended ? BIT(29) : 0;
+ r[2] |= e->rvid & 0xfff;
+ if (e->next_hop) {
+ r[1] |= BIT(28);
+ r[0] |= e->nh_vlan_target ? BIT(9) : 0;
+ r[0] |= e->nh_route_id & 0x1ff;
+ }
+ r[0] |= (e->age & 0x3) << 17;
+ } else { // L2 Multicast
+ r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
+ r[0] |= e->vid & 0xfff;
+ r[2] |= e->rvid & 0xfff;
+ pr_info("FILL MC: %08x %08x %08x\n", r[0], r[1], r[2]);
+ }
+ } else { // IPv4 and IPv6 multicast
+ r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
+ r[1] = e->mc_gip >> 20;
+ r[2] = e->mc_gip << 12;
+ r[2] |= e->rvid;
+ }
+ pr_info("%s: REGISTERS %08x %08x %08x\n", __func__, r[0], r[1], r[2]);
+}
+
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl838x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
{
u64 entry;
u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); // Access L2 Table 0
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
- /* Search in SRAM, with hash and at position in hash bucket (0-3) */
- u32 idx = (0 << 14) | (hash << 2) | position;
-
- u32 cmd = BIT(16) /* Execute cmd */
- | BIT(15) /* Read */
- | 0 << 13 /* Table type 0b00 */
- | (idx & 0x1fff);
-
- sw_w32(cmd, RTL838X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL838X_TBL_ACCESS_L2_CTRL) & BIT(16));
- r[0] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(0));
- r[1] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(1));
- r[2] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(2));
-
- e->mac[0] = (r[1] >> 20);
- e->mac[1] = (r[1] >> 12);
- e->mac[2] = (r[1] >> 4);
- e->mac[3] = (r[1] & 0xf) << 4 | (r[2] >> 28);
- e->mac[4] = (r[2] >> 20);
- e->mac[5] = (r[2] >> 12);
- e->is_static = !!((r[0] >> 19) & 1);
- e->vid = r[0] & 0xfff;
- e->rvid = r[2] & 0xfff;
- e->port = (r[0] >> 12) & 0x1f;
-
- e->valid = true;
- if (!(r[0] >> 17)) /* Check for invalid entry */
- e->valid = false;
-
- if (e->valid)
- pr_debug("Found in Hash: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
-
- entry = (((u64) r[1]) << 32) | (r[2] & 0xfffff000) | (r[0] & 0xfff);
+ rtl_table_release(q);
+
+ rtl838x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ entry = (((u64) r[1]) << 32) | (r[2]); // mac and vid concatenated as hash seed
return entry;
}
+static void rtl838x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0);
+ int i;
+
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+
+ rtl838x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
static u64 rtl838x_read_cam(int idx, struct rtl838x_l2_entry *e)
{
u64 entry;
u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
- u32 cmd = BIT(16) /* Execute cmd */
- | BIT(15) /* Read */
- | BIT(13) /* Table type 0b01 */
- | (idx & 0x3f);
- sw_w32(cmd, RTL838X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL838X_TBL_ACCESS_L2_CTRL) & BIT(16));
- r[0] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(0));
- r[1] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(1));
- r[2] = sw_r32(RTL838X_TBL_ACCESS_L2_DATA(2));
-
- e->mac[0] = (r[1] >> 20);
- e->mac[1] = (r[1] >> 12);
- e->mac[2] = (r[1] >> 4);
- e->mac[3] = (r[1] & 0xf) << 4 | (r[2] >> 28);
- e->mac[4] = (r[2] >> 20);
- e->mac[5] = (r[2] >> 12);
- e->is_static = !!((r[0] >> 19) & 1);
- e->vid = r[0] & 0xfff;
- e->rvid = r[2] & 0xfff;
- e->port = (r[0] >> 12) & 0x1f;
-
- e->valid = true;
- if (!(r[0] >> 17)) /* Check for invalid entry */
- e->valid = false;
-
- if (e->valid)
- pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
-
- entry = (((u64) r[1]) << 32) | (r[2] & 0xfffff000) | (r[0] & 0xfff);
+ rtl_table_release(q);
+
+ rtl838x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
+
+ // Return MAC with concatenated VID ac concatenated ID
+ entry = (((u64) r[1]) << 32) | r[2];
return entry;
}
-static inline int rtl838x_vlan_profile(int profile)
+static void rtl838x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl838x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+
+static u64 rtl838x_read_mcast_pmask(int idx)
+{
+ u32 portmask;
+ // Read MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ rtl_table_release(q);
+
+ return portmask;
+}
+
+static void rtl838x_write_mcast_pmask(int idx, u64 portmask)
+{
+ // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
+
+ sw_w32(((u32)portmask) & 0x1fffffff, rtl_table_data(q, 0));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static void rtl838x_vlan_profile_setup(int profile)
{
- return RTL838X_VLAN_PROFILE(profile);
+ u32 pmask_id = UNKNOWN_MC_PMASK;
+ // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for unknown MC traffic flooding
+ u32 p = 1 | pmask_id << 1 | pmask_id << 10 | pmask_id << 19;
+
+ sw_w32(p, RTL838X_VLAN_PROFILE(profile));
+
+ /* RTL8380 and RTL8390 use an index into the portmask table to set the
+ * unknown multicast portmask, setup a default at a safe location
+ * On RTL93XX, the portmask is directly set in the profile,
+ * see e.g. rtl9300_vlan_profile_setup
+ */
+ rtl838x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x1fffffff);
}
-static inline int rtl838x_vlan_port_egr_filter(int port)
+static void rtl838x_l2_learning_setup(void)
{
- return RTL838X_VLAN_PORT_EGR_FLTR;
+ /* Set portmask for broadcast traffic and unknown unicast address flooding
+ * to the reserved entry in the portmask table used also for
+ * multicast flooding */
+ sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL838X_L2_FLD_PMSK);
+
+ /* Enable learning constraint system-wide (bit 0), per-port (bit 1)
+ * and per vlan (bit 2) */
+ sw_w32(0x7, RTL838X_L2_LRN_CONSTRT_EN);
+
+ // Limit learning to maximum: 16k entries, after that just flood (bits 0-1)
+ sw_w32((0x3fff << 2) | 0, RTL838X_L2_LRN_CONSTRT);
+
+ // Do not trap ARP packets to CPU_PORT
+ sw_w32(0, RTL838X_SPCL_TRAP_ARP_CTRL);
}
-static inline int rtl838x_vlan_port_igr_filter(int port)
+static void rtl838x_enable_learning(int port, bool enable)
{
- return RTL838X_VLAN_PORT_IGR_FLTR(port);
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+
+ if (enable) {
+ // flood after 32k entries
+ sw_w32((0x3fff << 2) | 0, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // just forward
+ sw_w32(0, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+static void rtl838x_enable_flood(int port, bool enable)
+{
+ u32 flood_mask = sw_r32(RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+
+ if (enable) {
+ // flood
+ flood_mask &=~3;
+ flood_mask |=0;
+ sw_w32(flood_mask, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // drop (bit 1)
+ flood_mask &=~3;
+ flood_mask |=1;
+ sw_w32(flood_mask, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+static void rtl838x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+static void rtl838x_enable_bcast_flood(int port, bool enable)
+{
+
}
static void rtl838x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
@@ -263,6 +600,1056 @@ void rtl838x_traffic_disable(int source, int dest)
rtl838x_mask_port_reg(BIT(dest), 0, rtl838x_port_iso_ctrl(source));
}
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+static void rtl838x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL838X, ports from 24 are SFP
+ if (port >= 24)
+ return;
+
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0x3 : 0x0;
+
+ // Set EEE state for 100 (bit 9) & 1000MBit (bit 10)
+ sw_w32_mask(0x3 << 9, v << 9, priv->r->mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ if (enable) {
+ sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_TX_EN);
+ sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_RX_EN);
+ } else {
+ sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_TX_EN);
+ sw_w32_mask(BIT(port), 0, RTL838X_EEE_PORT_RX_EN);
+ }
+ priv->ports[port].eee_enabled = enable;
+}
+
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+static int rtl838x_eee_port_ability(struct rtl838x_switch_priv *priv,
+ struct ethtool_eee *e, int port)
+{
+ u64 link;
+
+ if (port >= 24)
+ return 0;
+
+ link = rtl839x_get_port_reg_le(RTL838X_MAC_LINK_STS);
+ if (!(link & BIT(port)))
+ return 0;
+
+ if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(9))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl838x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ if (sw_r32(RTL838X_MAC_EEE_ABLTY) & BIT(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void rtl838x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+ sw_w32_mask(0x4, 0, RTL838X_SMI_GLB_CTRL);
+
+ /* Set timers for EEE */
+ sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
+ sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
+
+ // Enable EEE MAC support on ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl838x_port_eee_set(priv, i, enable);
+ }
+ priv->eee_enabled = enable;
+}
+
+static void rtl838x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+ u32 block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
+
+ // Make sure rule-lookup is enabled in the block
+ if (!(block_state & BIT(block)))
+ sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
+}
+
+static void rtl838x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ int block_from = index_from / PIE_BLOCK_SIZE;
+ int block_to = index_to / PIE_BLOCK_SIZE;
+ u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0);
+ int block;
+ u32 block_state;
+
+ pr_info("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Remember currently active blocks
+ block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
+
+ // Make sure rule-lookup is disabled in the relevant blocks
+ for (block = block_from; block <= block_to; block++) {
+ if (block_state & BIT(block))
+ sw_w32(block_state & (~BIT(block)), RTL838X_ACL_BLK_LOOKUP_CTRL);
+ }
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL838X_ACL_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL838X_ACL_CLR_CTRL) & BIT(0));
+
+ // Re-enable rule lookup
+ for (block = block_from; block <= block_to; block++) {
+ if (!(block_state & BIT(block)))
+ sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ * are specific to every platform.
+ */
+static void rtl838x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ data = pr->field_range_check;
+ data_m = pr->field_range_check_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ data = pr->icmp_igmp;
+ data_m = pr->icmp_igmp_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ continue;
+ }
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] = data_m;
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 16;
+ }
+ }
+}
+
+/*
+ * Creates the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure by reading the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ */
+static void rtl838x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ if (!(i % 2)) {
+ data = r[5 - i / 2];
+ data_m = r[12 - i / 2];
+ } else {
+ data = r[5 - i / 2] >> 16;
+ data_m = r[12 - i / 2] >> 16;
+ }
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ pr->spm = (pr->spn << 16) | data;
+ pr->spm_m = (pr->spn << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ pr->spm = data;
+ pr->spm_m = data_m;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ pr->otag = data;
+ pr->otag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ pr->smac[4] = data >> 8;
+ pr->smac[5] = data;
+ pr->smac_m[4] = data >> 8;
+ pr->smac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ pr->smac[2] = data >> 8;
+ pr->smac[3] = data;
+ pr->smac_m[2] = data >> 8;
+ pr->smac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ pr->smac[0] = data >> 8;
+ pr->smac[1] = data;
+ pr->smac_m[0] = data >> 8;
+ pr->smac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ pr->dmac[4] = data >> 8;
+ pr->dmac[5] = data;
+ pr->dmac_m[4] = data >> 8;
+ pr->dmac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ pr->dmac[2] = data >> 8;
+ pr->dmac[3] = data;
+ pr->dmac_m[2] = data >> 8;
+ pr->dmac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ pr->dmac[0] = data >> 8;
+ pr->dmac[1] = data;
+ pr->dmac_m[0] = data >> 8;
+ pr->dmac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ pr->ethertype = data;
+ pr->ethertype_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ pr->itag = data;
+ pr->itag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ pr->field_range_check = data;
+ pr->field_range_check_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ pr->sip = data;
+ pr->sip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ pr->sip = (pr->sip << 16) | data;
+ pr->sip_m = (pr->sip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SIP2:
+ pr->is_ipv6 = true;
+ // Make use of limitiations on the position of the match values
+ ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ pr->dip = data;
+ pr->dip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_DIP1:
+ pr->dip = (pr->dip << 16) | data;
+ pr->dip_m = (pr->dip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_DIP2:
+ pr->is_ipv6 = true;
+ ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ break;
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ pr->tos_proto = data;
+ pr->tos_proto_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ pr->sport = data;
+ pr->sport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ pr->dport = data;
+ pr->dport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ pr->icmp_igmp = data;
+ pr->icmp_igmp_m = data_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+ }
+}
+
+static void rtl838x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->spmmask_fix = (r[6] >> 22) & 0x3;
+ pr->spn = (r[6] >> 16) & 0x3f;
+ pr->mgnt_vlan = (r[6] >> 15) & 1;
+ pr->dmac_hit_sw = (r[6] >> 14) & 1;
+ pr->not_first_frag = (r[6] >> 13) & 1;
+ pr->frame_type_l4 = (r[6] >> 10) & 7;
+ pr->frame_type = (r[6] >> 8) & 3;
+ pr->otag_fmt = (r[6] >> 7) & 1;
+ pr->itag_fmt = (r[6] >> 6) & 1;
+ pr->otag_exist = (r[6] >> 5) & 1;
+ pr->itag_exist = (r[6] >> 4) & 1;
+ pr->frame_type_l2 = (r[6] >> 2) & 3;
+ pr->tid = r[6] & 3;
+
+ pr->spmmask_fix_m = (r[13] >> 22) & 0x3;
+ pr->spn_m = (r[13] >> 16) & 0x3f;
+ pr->mgnt_vlan_m = (r[13] >> 15) & 1;
+ pr->dmac_hit_sw_m = (r[13] >> 14) & 1;
+ pr->not_first_frag_m = (r[13] >> 13) & 1;
+ pr->frame_type_l4_m = (r[13] >> 10) & 7;
+ pr->frame_type_m = (r[13] >> 8) & 3;
+ pr->otag_fmt_m = (r[13] >> 7) & 1;
+ pr->itag_fmt_m = (r[13] >> 6) & 1;
+ pr->otag_exist_m = (r[13] >> 5) & 1;
+ pr->itag_exist_m = (r[13] >> 4) & 1;
+ pr->frame_type_l2_m = (r[13] >> 2) & 3;
+ pr->tid_m = r[13] & 3;
+
+ pr->valid = r[14] & BIT(31);
+ pr->cond_not = r[14] & BIT(30);
+ pr->cond_and1 = r[14] & BIT(29);
+ pr->cond_and2 = r[14] & BIT(28);
+ pr->ivalid = r[14] & BIT(27);
+
+ pr->drop = (r[17] >> 14) & 3;
+ pr->fwd_sel = r[17] & BIT(13);
+ pr->ovid_sel = r[17] & BIT(12);
+ pr->ivid_sel = r[17] & BIT(11);
+ pr->flt_sel = r[17] & BIT(10);
+ pr->log_sel = r[17] & BIT(9);
+ pr->rmk_sel = r[17] & BIT(8);
+ pr->meter_sel = r[17] & BIT(7);
+ pr->tagst_sel = r[17] & BIT(6);
+ pr->mir_sel = r[17] & BIT(5);
+ pr->nopri_sel = r[17] & BIT(4);
+ pr->cpupri_sel = r[17] & BIT(3);
+ pr->otpid_sel = r[17] & BIT(2);
+ pr->itpid_sel = r[17] & BIT(1);
+ pr->shaper_sel = r[17] & BIT(0);
+}
+
+static void rtl838x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 22;
+ r[6] |= ((u32) (pr->spn & 0x3f)) << 16;
+ r[6] |= pr->mgnt_vlan ? BIT(15) : 0;
+ r[6] |= pr->dmac_hit_sw ? BIT(14) : 0;
+ r[6] |= pr->not_first_frag ? BIT(13) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 10;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 8;
+ r[6] |= pr->otag_fmt ? BIT(7) : 0;
+ r[6] |= pr->itag_fmt ? BIT(6) : 0;
+ r[6] |= pr->otag_exist ? BIT(5) : 0;
+ r[6] |= pr->itag_exist ? BIT(4) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 2;
+ r[6] |= ((u32) (pr->tid & 0x3));
+
+ r[13] = ((u32) (pr->spmmask_fix_m & 0x3)) << 22;
+ r[13] |= ((u32) (pr->spn_m & 0x3f)) << 16;
+ r[13] |= pr->mgnt_vlan_m ? BIT(15) : 0;
+ r[13] |= pr->dmac_hit_sw_m ? BIT(14) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(13) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 10;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 8;
+ r[13] |= pr->otag_fmt_m ? BIT(7) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(6) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(5) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(4) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 2;
+ r[13] |= ((u32) (pr->tid_m & 0x3));
+
+ r[14] = pr->valid ? BIT(31) : 0;
+ r[14] |= pr->cond_not ? BIT(30) : 0;
+ r[14] |= pr->cond_and1 ? BIT(29) : 0;
+ r[14] |= pr->cond_and2 ? BIT(28) : 0;
+ r[14] |= pr->ivalid ? BIT(27) : 0;
+
+ if (pr->drop)
+ r[17] = 0x1 << 14; // Standard drop action
+ else
+ r[17] = 0;
+ r[17] |= pr->fwd_sel ? BIT(13) : 0;
+ r[17] |= pr->ovid_sel ? BIT(12) : 0;
+ r[17] |= pr->ivid_sel ? BIT(11) : 0;
+ r[17] |= pr->flt_sel ? BIT(10) : 0;
+ r[17] |= pr->log_sel ? BIT(9) : 0;
+ r[17] |= pr->rmk_sel ? BIT(8) : 0;
+ r[17] |= pr->meter_sel ? BIT(7) : 0;
+ r[17] |= pr->tagst_sel ? BIT(6) : 0;
+ r[17] |= pr->mir_sel ? BIT(5) : 0;
+ r[17] |= pr->nopri_sel ? BIT(4) : 0;
+ r[17] |= pr->cpupri_sel ? BIT(3) : 0;
+ r[17] |= pr->otpid_sel ? BIT(2) : 0;
+ r[17] |= pr->itpid_sel ? BIT(1) : 0;
+ r[17] |= pr->shaper_sel ? BIT(0) : 0;
+}
+
+static int rtl838x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ u16 *aif = (u16 *)&r[17];
+ u16 data;
+ int fields_used = 0;
+
+ aif--;
+
+ pr_info("%s, at %08x\n", __func__, (u32)aif);
+ /* Multiple actions can be linked to a match of a PIE rule,
+ * they have different precedence depending on their type and this precedence
+ * defines which Action Information Field (0-4) in the IACL table stores
+ * the additional data of the action (like e.g. the port number a packet is
+ * forwarded to) */
+ if (pr->drop)
+ pr_info("%s: Action Drop: %d", __func__, pr->drop);
+
+ // TODO: count bits in selectors to limit to a maximum number of actions
+ if (pr->fwd_sel) { // Forwarding action
+ data = pr->fwd_act << 13;
+ data |= pr->fwd_data;
+ data |= pr->bypass_all ? BIT(12) : 0;
+ data |= pr->bypass_ibc_sc ? BIT(11) : 0;
+ data |= pr->bypass_igr_stp ? BIT(10) : 0;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->ovid_sel) { // Outer VID action
+ data = (pr->ovid_act & 0x3) << 12;
+ data |= pr->ovid_data;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->ivid_sel) { // Inner VID action
+ data = (pr->ivid_act & 0x3) << 12;
+ data |= pr->ivid_data;
+ *aif-- = data;
+ fields_used++;
+ }
+
+ if (pr->flt_sel) { // Filter action
+ *aif-- = pr->flt_data;
+ fields_used++;
+ }
+
+ if (pr->log_sel) { // Log action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->log_data;
+ fields_used++;
+ }
+
+ if (pr->rmk_sel) { // Remark action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->rmk_data;
+ fields_used++;
+ }
+
+ if (pr->meter_sel) { // Meter action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->meter_data;
+ fields_used++;
+ }
+
+ if (pr->tagst_sel) { // Egress Tag Status action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->tagst_data;
+ fields_used++;
+ }
+
+ if (pr->mir_sel) { // Mirror action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->mir_data;
+ fields_used++;
+ }
+
+ if (pr->nopri_sel) { // Normal Priority action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->nopri_data;
+ fields_used++;
+ }
+
+ if (pr->cpupri_sel) { // CPU Priority action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->nopri_data;
+ fields_used++;
+ }
+
+ if (pr->otpid_sel) { // OTPID action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->otpid_data;
+ fields_used++;
+ }
+
+ if (pr->itpid_sel) { // ITPID action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->itpid_data;
+ fields_used++;
+ }
+
+ if (pr->shaper_sel) { // Traffic shaper action
+ if (fields_used >= 4)
+ return -1;
+ *aif-- = pr->shaper_data;
+ fields_used++;
+ }
+
+ return 0;
+}
+
+static void rtl838x_read_pie_action(u32 r[], struct pie_rule *pr)
+{
+ u16 *aif = (u16 *)&r[17];
+
+ aif--;
+
+ pr_info("%s, at %08x\n", __func__, (u32)aif);
+ if (pr->drop)
+ pr_info("%s: Action Drop: %d", __func__, pr->drop);
+
+ if (pr->fwd_sel){ // Forwarding action
+ pr->fwd_act = *aif >> 13;
+ pr->fwd_data = *aif--;
+ pr->bypass_all = pr->fwd_data & BIT(12);
+ pr->bypass_ibc_sc = pr->fwd_data & BIT(11);
+ pr->bypass_igr_stp = pr->fwd_data & BIT(10);
+ if (pr->bypass_all || pr->bypass_ibc_sc || pr->bypass_igr_stp)
+ pr->bypass_sel = true;
+ }
+ if (pr->ovid_sel) // Outer VID action
+ pr->ovid_data = *aif--;
+ if (pr->ivid_sel) // Inner VID action
+ pr->ivid_data = *aif--;
+ if (pr->flt_sel) // Filter action
+ pr->flt_data = *aif--;
+ if (pr->log_sel) // Log action
+ pr->log_data = *aif--;
+ if (pr->rmk_sel) // Remark action
+ pr->rmk_data = *aif--;
+ if (pr->meter_sel) // Meter action
+ pr->meter_data = *aif--;
+ if (pr->tagst_sel) // Egress Tag Status action
+ pr->tagst_data = *aif--;
+ if (pr->mir_sel) // Mirror action
+ pr->mir_data = *aif--;
+ if (pr->nopri_sel) // Normal Priority action
+ pr->nopri_data = *aif--;
+ if (pr->cpupri_sel) // CPU Priority action
+ pr->nopri_data = *aif--;
+ if (pr->otpid_sel) // OTPID action
+ pr->otpid_data = *aif--;
+ if (pr->itpid_sel) // ITPID action
+ pr->itpid_data = *aif--;
+ if (pr->shaper_sel) // Traffic shaper action
+ pr->shaper_data = *aif--;
+}
+
+static void rtl838x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %08x\n", r[6]);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n", r[7], r[8], r[9], r[10], r[11], r[12]);
+ pr_info("Fixed M: %08x\n", r[13]);
+ pr_info("AIF : %08x %08x %08x\n", r[14], r[15], r[16]);
+ pr_info("Sel : %08x\n", r[17]);
+}
+
+static void rtl838x_pie_rule_dump(struct pie_rule *pr)
+{
+ pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n",
+ pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel,
+ pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel);
+ if (pr->fwd_sel)
+ pr_info("FWD: %08x\n", pr->fwd_data);
+ pr_info("TID: %x, %x\n", pr->tid, pr->tid_m);
+}
+
+static int rtl838x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Read IACL table (1) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
+ u32 r[18];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
+
+ memset(pr, 0, sizeof(*pr));
+ rtl_table_read(q, idx);
+ for (i = 0; i < 18; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl838x_read_pie_fixed_fields(r, pr);
+ if (!pr->valid)
+ return 0;
+
+ pr_info("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid);
+ rtl838x_pie_rule_dump_raw(r);
+
+ rtl838x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl838x_read_pie_action(r, pr);
+
+ return 0;
+}
+
+static int rtl838x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (1) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
+ u32 r[18];
+ int i, err = 0;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
+
+ pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 18; i++)
+ r[i] = 0;
+
+ if (!pr->valid)
+ goto err_out;
+
+ rtl838x_write_pie_fixed_fields(r, pr);
+
+ pr_info("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7);
+ rtl838x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ if (rtl838x_write_pie_action(r, pr)) {
+ pr_err("Rule actions too complex\n");
+ goto err_out;
+ }
+
+ rtl838x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 18; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+err_out:
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return err;
+}
+
+static bool rtl838x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+static int rtl838x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl838x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+
+ pr_info("In %s\n", __func__);
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = 0; block < priv->n_pie_blocks; block++) {
+ for (j = 0; j < 3; j++) {
+ t = (sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
+ pr_info("Testing block %d, template %d, template id %d\n", block, j, t);
+ idx = rtl838x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 3)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("Using block: %d, index %d, template-id %d\n", block, idx, j);
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x3;
+ pr->id = idx;
+
+ rtl838x_pie_lookup_enable(priv, idx);
+ rtl838x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+static void rtl838x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl838x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+/*
+ * Initializes the Packet Inspection Engine:
+ * powers it up, enables default matching templates for all blocks
+ * and clears all rules possibly installed by u-boot
+ */
+static void rtl838x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ // Enable ACL lookup on all ports, including CPU_PORT
+ for (i = 0; i <= priv->cpu_port; i++)
+ sw_w32(1, RTL838X_ACL_PORT_LOOKUP_CTRL(i));
+
+ // Power on all PIE blocks
+ for (i = 0; i < priv->n_pie_blocks; i++)
+ sw_w32_mask(0, BIT(i), RTL838X_ACL_BLK_PWR_CTRL);
+
+ // Include IPG in metering
+ sw_w32(1, RTL838X_METER_GLB_CTRL);
+
+ // Delete all present rules
+ rtl838x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
+
+ // Routing bypasses source port filter: disable write-protection, first
+ sw_w32_mask(0, 3, RTL838X_INT_RW_CTRL);
+ sw_w32_mask(0, 1, RTL838X_DMY_REG27);
+ sw_w32_mask(3, 0, RTL838X_INT_RW_CTRL);
+
+ // Enable predefined templates 0, 1 and 2 for even blocks
+ template_selectors = 0 | (1 << 3) | (2 << 6);
+ for (i = 0; i < 6; i += 2)
+ sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks
+ template_selectors = 0 | (3 << 3) | (4 << 6);
+ for (i = 1; i < priv->n_pie_blocks; i += 2)
+ sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Group each pair of physical blocks together to a logical block
+ sw_w32(0b10101010101, RTL838X_ACL_BLK_GROUP_CTRL);
+}
+
+static void rtl838x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ // Read ROUTING table (2) via register RTL8380_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
+
+ pr_info("In %s, id %d\n", __func__, idx);
+ rtl_table_read(r, idx);
+
+ // The table has a size of 2 registers
+ rt->nh.gw = sw_r32(rtl_table_data(r, 0));
+ rt->nh.gw <<= 32;
+ rt->nh.gw |= sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+}
+
+static void rtl838x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ // Access ROUTING table (2) via register RTL8380_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
+
+ pr_info("In %s, id %d, gw: %016llx\n", __func__, idx, rt->nh.gw);
+ sw_w32(rt->nh.gw >> 32, rtl_table_data(r, 0));
+ sw_w32(rt->nh.gw, rtl_table_data(r, 1));
+ rtl_table_write(r, idx);
+
+ rtl_table_release(r);
+}
+
+static int rtl838x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ // Nothing to be done
+ return 0;
+}
+
+static u32 rtl838x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (3) via register RTL8380_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ pr_info("Registers: %08x %08x\n",
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl838x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (3) via register RTL8380_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+void rtl838x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ algoidx&=1; // RTL838X does only support 2 concurrent algorithms
+ sw_w32_mask(1 << (group % 8), algoidx << (group % 8), RTL838X_TRK_HASH_IDX_CTRL + ((group >> 3) << 2));
+ sw_w32(algomsk, RTL838X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl838x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL838X_RMA_BPDU_CTRL + ((port >> 4) << 2));
+ break;
+ case PTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL838X_RMA_PTP_CTRL + ((port >> 4) << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL838X_RMA_LLTP_CTRL + ((port >> 4) << 2));
+ break;
+ default:
+ break;
+ }
+}
+
const struct rtl838x_reg rtl838x_reg = {
.mask_port_reg_be = rtl838x_mask_port_reg,
.set_port_reg_be = rtl838x_set_port_reg,
@@ -295,6 +1682,8 @@ const struct rtl838x_reg rtl838x_reg = {
.vlan_set_untagged = rtl838x_vlan_set_untagged,
.mac_force_mode_ctrl = rtl838x_mac_force_mode_ctrl,
.vlan_profile_dump = rtl838x_vlan_profile_dump,
+ .vlan_profile_setup = rtl838x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl838x_vlan_fwd_on_inner,
.stp_get = rtl838x_stp_get,
.stp_set = rtl838x_stp_set,
.mac_port_ctrl = rtl838x_mac_port_ctrl,
@@ -309,14 +1698,58 @@ const struct rtl838x_reg rtl838x_reg = {
.mac_rx_pause_sts = RTL838X_MAC_RX_PAUSE_STS,
.mac_tx_pause_sts = RTL838X_MAC_TX_PAUSE_STS,
.read_l2_entry_using_hash = rtl838x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl838x_write_l2_entry_using_hash,
.read_cam = rtl838x_read_cam,
+ .write_cam = rtl838x_write_cam,
.vlan_port_egr_filter = RTL838X_VLAN_PORT_EGR_FLTR,
- .vlan_port_igr_filter = RTL838X_VLAN_PORT_IGR_FLTR(0),
+ .vlan_port_igr_filter = RTL838X_VLAN_PORT_IGR_FLTR,
.vlan_port_pb = RTL838X_VLAN_PORT_PB_VLAN,
.vlan_port_tag_sts_ctrl = RTL838X_VLAN_PORT_TAG_STS_CTRL,
.trk_mbr_ctr = rtl838x_trk_mbr_ctr,
.rma_bpdu_fld_pmask = RTL838X_RMA_BPDU_FLD_PMSK,
+ .init_eee = rtl838x_init_eee,
+ .port_eee_set = rtl838x_port_eee_set,
+ .eee_port_ability = rtl838x_eee_port_ability,
+ .l2_hash_seed = rtl838x_l2_hash_seed,
+ .l2_hash_key = rtl838x_l2_hash_key,
+ .read_mcast_pmask = rtl838x_read_mcast_pmask,
+ .write_mcast_pmask = rtl838x_write_mcast_pmask,
+ .pie_init = rtl838x_pie_init,
+ .pie_rule_read = rtl838x_pie_rule_read,
+ .pie_rule_write = rtl838x_pie_rule_write,
+ .pie_rule_add = rtl838x_pie_rule_add,
+ .pie_rule_rm = rtl838x_pie_rule_rm,
+ .l2_learning_setup = rtl838x_l2_learning_setup,
+ .route_read = rtl838x_route_read,
+ .route_write = rtl838x_route_write,
+ .l3_setup = rtl838x_l3_setup,
+ .packet_cntr_read = rtl838x_packet_cntr_read,
+ .packet_cntr_clear = rtl838x_packet_cntr_clear,
+ .enable_learning = rtl838x_enable_learning,
+ .enable_flood = rtl838x_enable_flood,
+ .rma_bpdu_ctrl = RTL838X_RMA_BPDU_CTRL,
+ .rma_ptp_ctrl = RTL838X_RMA_PTP_CTRL,
+ .rma_lltp_ctrl = RTL838X_RMA_LLTP_CTRL,
+ .rma_bpdu_ctrl_div = 16,
+ .rma_ptp_ctrl_div = 16,
+ .rma_lltp_ctrl_div = 16,
+ .storm_ctrl_port_uc = RTL838X_STORM_CTRL_PORT_UC(0),
+ .storm_ctrl_port_bc = RTL838X_STORM_CTRL_PORT_BC(0),
+ .storm_ctrl_port_mc = RTL838X_STORM_CTRL_PORT_MC(0),
+ .storm_ctrl_port_uc_shift = 2,
+ .storm_ctrl_port_bc_shift = 2,
+ .storm_ctrl_port_mc_shift = 2,
.spcl_trap_eapol_ctrl = RTL838X_SPCL_TRAP_EAPOL_CTRL,
+ .spcl_trap_arp_ctrl = RTL838X_SPCL_TRAP_ARP_CTRL,
+ .spcl_trap_igmp_ctrl = RTL838X_SPCL_TRAP_IGMP_CTRL,
+ .spcl_trap_ipv6_ctrl = RTL838X_SPCL_TRAP_IPV6_CTRL,
+ .spcl_trap_switch_mac_ctrl = RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL,
+ .spcl_trap_ctrl = RTL838X_SPCL_TRAP_CTRL,
+ .vlan_ctrl = RTL838X_VLAN_CTRL,
+ .trk_hash_ctrl = RTL838X_TRK_HASH_CTRL,
+ .trk_hash_idx_ctrl = RTL838X_TRK_HASH_IDX_CTRL,
+ .set_distribution_algorithm = rtl838x_set_distribution_algorithm,
+ .set_receive_management_action = rtl838x_set_receive_management_action,
};
irqreturn_t rtl838x_switch_irq(int irq, void *dev_id)
@@ -432,6 +1865,79 @@ timeout:
return -ETIMEDOUT;
}
+/*
+ * Read an mmd register of a PHY
+ */
+int rtl838x_read_mmd_phy(u32 port, u32 addr, u32 reg, u32 *val)
+{
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ if (rtl838x_smi_wait_op(10000))
+ goto timeout;
+
+ sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
+ mdelay(10);
+
+ sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ v = addr << 16 | reg;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+
+ /* mmd-access | read | cmd-start */
+ v = 1 << 1 | 0 << 2 | 1;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(10000))
+ goto timeout;
+
+ *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Write to an mmd register of a PHY
+ */
+int rtl838x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val)
+{
+ u32 v;
+
+ pr_debug("MMD write: port %d, dev %d, reg %d, val %x\n", port, addr, reg, val);
+ val &= 0xffff;
+ mutex_lock(&smi_lock);
+
+ if (rtl838x_smi_wait_op(10000))
+ goto timeout;
+
+ sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
+ mdelay(10);
+
+ sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
+
+ sw_w32_mask(0x1f << 16, addr << 16, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+ sw_w32_mask(0xffff, reg, RTL838X_SMI_ACCESS_PHY_CTRL_3);
+ /* mmd-access | write | cmd-start */
+ v = 1 << 1 | 1 << 2 | 1;
+ sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
+
+ if (rtl838x_smi_wait_op(10000))
+ goto timeout;
+
+ mutex_unlock(&smi_lock);
+ return 0;
+
+timeout:
+ mutex_unlock(&smi_lock);
+ return -ETIMEDOUT;
+}
+
void rtl8380_get_version(struct rtl838x_switch_priv *priv)
{
u32 rw_save, info_save;
@@ -459,47 +1965,18 @@ void rtl8380_get_version(struct rtl838x_switch_priv *priv)
}
}
-/*
- * Applies the same hash algorithm as the one used currently by the ASIC
- */
-u32 rtl838x_hash(struct rtl838x_switch_priv *priv, u64 seed)
-{
- u32 h1, h2, h3, h;
-
- if (sw_r32(priv->r->l2_ctrl_0) & 1) {
- h1 = (seed >> 11) & 0x7ff;
- h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
-
- h2 = (seed >> 33) & 0x7ff;
- h2 = ((h2 & 0x3f) << 5) | ((h2 >> 6) & 0x1f);
-
- h3 = (seed >> 44) & 0x7ff;
- h3 = ((h3 & 0x7f) << 4) | ((h3 >> 7) & 0xf);
-
- h = h1 ^ h2 ^ h3 ^ ((seed >> 55) & 0x1ff);
- h ^= ((seed >> 22) & 0x7ff) ^ (seed & 0x7ff);
- } else {
- h = ((seed >> 55) & 0x1ff) ^ ((seed >> 44) & 0x7ff)
- ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
- ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff);
- }
-
- return h;
-}
-
-void rtl838x_vlan_profile_dump(int index)
+void rtl838x_vlan_profile_dump(int profile)
{
- u32 profile;
+ u32 p;
- if (index < 0 || index > 7)
+ if (profile < 0 || profile > 7)
return;
- profile = sw_r32(RTL838X_VLAN_PROFILE(index));
+ p = sw_r32(RTL838X_VLAN_PROFILE(profile));
- pr_info("VLAN %d: L2 learning: %d, L2 Unknown MultiCast Field %x, \
- IPv4 Unknown MultiCast Field %x, IPv6 Unknown MultiCast Field: %x",
- index, profile & 1, (profile >> 1) & 0x1ff, (profile >> 10) & 0x1ff,
- (profile >> 19) & 0x1ff);
+ pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \
+ UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d",
+ profile, p & 1, (p >> 1) & 0x1ff, (p >> 10) & 0x1ff, (p >> 19) & 0x1ff);
}
void rtl8380_sds_rst(int mac)
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.h b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.h
deleted file mode 100644
index d5ca153a10..0000000000
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl838x.h
+++ /dev/null
@@ -1,472 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _RTL838X_H
-#define _RTL838X_H
-
-#include <net/dsa.h>
-
-/*
- * Register definition
- */
-#define RTL838X_MAC_PORT_CTRL(port) (0xd560 + (((port) << 7)))
-#define RTL839X_MAC_PORT_CTRL(port) (0x8004 + (((port) << 7)))
-#define RTL930X_MAC_PORT_CTRL(port) (0x3260 + (((port) << 6)))
-#define RTL930X_MAC_L2_PORT_CTRL(port) (0x3268 + (((port) << 6)))
-#define RTL931X_MAC_PORT_CTRL(port) (0x6004 + (((port) << 7)))
-
-#define RTL838X_RST_GLB_CTRL_0 (0x003c)
-
-#define RTL838X_MAC_FORCE_MODE_CTRL (0xa104)
-#define RTL839X_MAC_FORCE_MODE_CTRL (0x02bc)
-#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C)
-#define RTL931X_MAC_FORCE_MODE_CTRL (0x0DCC)
-
-#define RTL838X_DMY_REG31 (0x3b28)
-#define RTL838X_SDS_MODE_SEL (0x0028)
-#define RTL838X_SDS_CFG_REG (0x0034)
-#define RTL838X_INT_MODE_CTRL (0x005c)
-#define RTL838X_CHIP_INFO (0x00d8)
-#define RTL839X_CHIP_INFO (0x0ff4)
-#define RTL838X_PORT_ISO_CTRL(port) (0x4100 + ((port) << 2))
-#define RTL839X_PORT_ISO_CTRL(port) (0x1400 + ((port) << 3))
-
-/* Packet statistics */
-#define RTL838X_STAT_PORT_STD_MIB (0x1200)
-#define RTL839X_STAT_PORT_STD_MIB (0xC000)
-#define RTL930X_STAT_PORT_MIB_CNTR (0x0664)
-#define RTL838X_STAT_RST (0x3100)
-#define RTL839X_STAT_RST (0xF504)
-#define RTL930X_STAT_RST (0x3240)
-#define RTL931X_STAT_RST (0x7ef4)
-#define RTL838X_STAT_PORT_RST (0x3104)
-#define RTL839X_STAT_PORT_RST (0xF508)
-#define RTL930X_STAT_PORT_RST (0x3244)
-#define RTL931X_STAT_PORT_RST (0x7ef8)
-#define RTL838X_STAT_CTRL (0x3108)
-#define RTL839X_STAT_CTRL (0x04cc)
-#define RTL930X_STAT_CTRL (0x3248)
-#define RTL931X_STAT_CTRL (0x5720)
-
-/* Registers of the internal Serdes of the 8390 */
-#define RTL8390_SDS0_1_XSG0 (0xA000)
-#define RTL8390_SDS0_1_XSG1 (0xA100)
-#define RTL839X_SDS12_13_XSG0 (0xB800)
-#define RTL839X_SDS12_13_XSG1 (0xB900)
-#define RTL839X_SDS12_13_PWR0 (0xb880)
-#define RTL839X_SDS12_13_PWR1 (0xb980)
-
-/* Registers of the internal Serdes of the 8380 */
-#define RTL838X_SDS4_FIB_REG0 (0xF800)
-#define RTL838X_SDS4_REG28 (0xef80)
-#define RTL838X_SDS4_DUMMY0 (0xef8c)
-#define RTL838X_SDS5_EXT_REG6 (0xf18c)
-
-/* VLAN registers */
-#define RTL838X_VLAN_CTRL (0x3A74)
-#define RTL838X_VLAN_PROFILE(idx) (0x3A88 + ((idx) << 2))
-#define RTL838X_VLAN_PORT_EGR_FLTR (0x3A84)
-#define RTL838X_VLAN_PORT_PB_VLAN (0x3C00)
-#define RTL838X_VLAN_PORT_IGR_FLTR(port) (0x3A7C + (((port >> 4) << 2)))
-#define RTL838X_VLAN_PORT_IGR_FLTR_0 (0x3A7C)
-#define RTL838X_VLAN_PORT_IGR_FLTR_1 (0x3A7C + 4)
-#define RTL838X_VLAN_PORT_TAG_STS_CTRL (0xA530)
-
-#define RTL839X_VLAN_PROFILE(idx) (0x25C0 + (((idx) << 3)))
-#define RTL839X_VLAN_CTRL (0x26D4)
-#define RTL839X_VLAN_PORT_PB_VLAN (0x26D8)
-#define RTL839X_VLAN_PORT_IGR_FLTR(port) (0x27B4 + (((port >> 4) << 2)))
-#define RTL839X_VLAN_PORT_EGR_FLTR(port) (0x27C4 + (((port >> 5) << 2)))
-#define RTL839X_VLAN_PORT_TAG_STS_CTRL (0x6828)
-
-#define RTL930X_VLAN_PROFILE_SET(idx) (0x9c60 + (((idx) * 20)))
-#define RTL930X_VLAN_CTRL (0x82D4)
-#define RTL930X_VLAN_PORT_PB_VLAN (0x82D8)
-#define RTL930X_VLAN_PORT_IGR_FLTR(port) (0x83C0 + (((port >> 4) << 2)))
-#define RTL930X_VLAN_PORT_EGR_FLTR (0x83C8)
-#define RTL930X_VLAN_PORT_TAG_STS_CTRL (0xCE24)
-
-#define RTL931X_VLAN_PROFILE_SET(idx) (0x9800 + (((idx) * 28)))
-#define RTL931X_VLAN_CTRL (0x94E4)
-#define RTL931X_VLAN_PORT_IGR_FLTR(port) (0x96B4 + (((port >> 4) << 2)))
-#define RTL931X_VLAN_PORT_EGR_FLTR(port) (0x96C4 + (((port >> 5) << 2)))
-#define RTL931X_VLAN_PORT_TAG_CTRL (0x4860)
-
-/* Table access registers */
-#define RTL838X_TBL_ACCESS_CTRL_0 (0x6914)
-#define RTL838X_TBL_ACCESS_DATA_0(idx) (0x6918 + ((idx) << 2))
-#define RTL838X_TBL_ACCESS_CTRL_1 (0xA4C8)
-#define RTL838X_TBL_ACCESS_DATA_1(idx) (0xA4CC + ((idx) << 2))
-
-#define RTL839X_TBL_ACCESS_CTRL_0 (0x1190)
-#define RTL839X_TBL_ACCESS_DATA_0(idx) (0x1194 + ((idx) << 2))
-#define RTL839X_TBL_ACCESS_CTRL_1 (0x6b80)
-#define RTL839X_TBL_ACCESS_DATA_1(idx) (0x6b84 + ((idx) << 2))
-#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
-#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
-
-#define RTL930X_TBL_ACCESS_CTRL_0 (0xB340)
-#define RTL930X_TBL_ACCESS_DATA_0(idx) (0xB344 + ((idx) << 2))
-#define RTL930X_TBL_ACCESS_CTRL_1 (0xB3A0)
-#define RTL930X_TBL_ACCESS_DATA_1(idx) (0xB3A4 + ((idx) << 2))
-#define RTL930X_TBL_ACCESS_CTRL_2 (0xCE04)
-#define RTL930X_TBL_ACCESS_DATA_2(i) (0xCE08 + (((i) << 2)))
-
-#define RTL931X_TBL_ACCESS_CTRL_0 (0x8500)
-#define RTL931X_TBL_ACCESS_DATA_0(idx) (0x8508 + ((idx) << 2))
-#define RTL931X_TBL_ACCESS_CTRL_1 (0x40C0)
-#define RTL931X_TBL_ACCESS_DATA_1(idx) (0x40C4 + ((idx) << 2))
-#define RTL931X_TBL_ACCESS_CTRL_2 (0x8528)
-#define RTL931X_TBL_ACCESS_DATA_2(i) (0x852C + (((i) << 2)))
-#define RTL931X_TBL_ACCESS_CTRL_3 (0x0200)
-#define RTL931X_TBL_ACCESS_DATA_3(i) (0x0204 + (((i) << 2)))
-#define RTL931X_TBL_ACCESS_CTRL_4 (0x20DC)
-#define RTL931X_TBL_ACCESS_DATA_4(i) (0x20E0 + (((i) << 2)))
-#define RTL931X_TBL_ACCESS_CTRL_5 (0x7E1C)
-#define RTL931X_TBL_ACCESS_DATA_5(i) (0x7E20 + (((i) << 2)))
-
-/* MAC handling */
-#define RTL838X_MAC_LINK_STS (0xa188)
-#define RTL839X_MAC_LINK_STS (0x0390)
-#define RTL930X_MAC_LINK_STS (0xCB10)
-#define RTL931X_MAC_LINK_STS (0x0EC0)
-#define RTL838X_MAC_LINK_SPD_STS(p) (0xa190 + (((p >> 4) << 2)))
-#define RTL839X_MAC_LINK_SPD_STS(p) (0x03a0 + (((p >> 4) << 2)))
-#define RTL930X_MAC_LINK_SPD_STS(p) (0xCB18 + (((p >> 3) << 2)))
-#define RTL931X_MAC_LINK_SPD_STS(p) (0x0ED0 + (((p >> 3) << 2)))
-#define RTL838X_MAC_LINK_DUP_STS (0xa19c)
-#define RTL839X_MAC_LINK_DUP_STS (0x03b0)
-#define RTL930X_MAC_LINK_DUP_STS (0xCB28)
-#define RTL931X_MAC_LINK_DUP_STS (0x0EF0)
-#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0)
-#define RTL839X_MAC_TX_PAUSE_STS (0x03b8)
-#define RTL930X_MAC_TX_PAUSE_STS (0xCB2C)
-#define RTL931X_MAC_TX_PAUSE_STS (0x0EF8)
-#define RTL838X_MAC_RX_PAUSE_STS (0xa1a4)
-#define RTL839X_MAC_RX_PAUSE_STS (0x03c0)
-#define RTL930X_MAC_RX_PAUSE_STS (0xCB30)
-#define RTL931X_MAC_RX_PAUSE_STS (0x0F00)
-
-/* MAC link state bits */
-#define FORCE_EN (1 << 0)
-#define FORCE_LINK_EN (1 << 1)
-#define NWAY_EN (1 << 2)
-#define DUPLX_MODE (1 << 3)
-#define TX_PAUSE_EN (1 << 6)
-#define RX_PAUSE_EN (1 << 7)
-
-/* EEE */
-#define RTL838X_MAC_EEE_ABLTY (0xa1a8)
-#define RTL838X_EEE_PORT_TX_EN (0x014c)
-#define RTL838X_EEE_PORT_RX_EN (0x0150)
-#define RTL838X_EEE_CLK_STOP_CTRL (0x0148)
-#define RTL838X_EEE_TX_TIMER_GIGA_CTRL (0xaa04)
-#define RTL838X_EEE_TX_TIMER_GELITE_CTRL (0xaa08)
-
-/* L2 functionality */
-#define RTL838X_L2_CTRL_0 (0x3200)
-#define RTL839X_L2_CTRL_0 (0x3800)
-#define RTL930X_L2_CTRL (0x8FD8)
-#define RTL931X_L2_CTRL (0xC800)
-#define RTL838X_L2_CTRL_1 (0x3204)
-#define RTL839X_L2_CTRL_1 (0x3804)
-#define RTL930X_L2_AGE_CTRL (0x8FDC)
-#define RTL931X_L2_AGE_CTRL (0xC804)
-#define RTL838X_L2_PORT_AGING_OUT (0x3358)
-#define RTL839X_L2_PORT_AGING_OUT (0x3b74)
-#define RTL930X_L2_PORT_AGE_CTRL (0x8FE0)
-#define RTL931X_L2_PORT_AGE_CTRL (0xc808)
-#define RTL838X_TBL_ACCESS_L2_CTRL (0x6900)
-#define RTL839X_TBL_ACCESS_L2_CTRL (0x1180)
-#define RTL930X_TBL_ACCESS_L2_CTRL (0xB320)
-#define RTL930X_TBL_ACCESS_L2_METHOD_CTRL (0xB324)
-#define RTL838X_TBL_ACCESS_L2_DATA(idx) (0x6908 + ((idx) << 2))
-#define RTL839X_TBL_ACCESS_L2_DATA(idx) (0x1184 + ((idx) << 2))
-#define RTL930X_TBL_ACCESS_L2_DATA(idx) (0xab08 + ((idx) << 2))
-#define RTL838X_L2_TBL_FLUSH_CTRL (0x3370)
-#define RTL839X_L2_TBL_FLUSH_CTRL (0x3ba0)
-#define RTL930X_L2_TBL_FLUSH_CTRL (0x9404)
-#define RTL931X_L2_TBL_FLUSH_CTRL (0xCD9C)
-
-#define RTL838X_L2_PORT_NEW_SALRN(p) (0x328c + (((p >> 4) << 2)))
-#define RTL839X_L2_PORT_NEW_SALRN(p) (0x38F0 + (((p >> 4) << 2)))
-#define RTL930X_L2_PORT_SALRN(p) (0x8FEC + (((p >> 4) << 2)))
-#define RTL931X_L2_PORT_NEW_SALRN(p) (0xC820 + (((p >> 4) << 2)))
-#define RTL838X_L2_PORT_NEW_SA_FWD(p) (0x3294 + (((p >> 4) << 2)))
-#define RTL839X_L2_PORT_NEW_SA_FWD(p) (0x3900 + (((p >> 4) << 2)))
-#define RTL930X_L2_PORT_NEW_SA_FWD(p) (0x8FF4 + (((p / 10) << 2)))
-#define RTL931X_L2_PORT_NEW_SA_FWD(p) (0xC830 + (((p / 10) << 2)))
-
-#define RTL930X_ST_CTRL (0x8798)
-
-#define RTL930X_L2_PORT_SABLK_CTRL (0x905c)
-#define RTL930X_L2_PORT_DABLK_CTRL (0x9060)
-
-#define RTL838X_RMA_BPDU_FLD_PMSK (0x4348)
-#define RTL930X_RMA_BPDU_FLD_PMSK (0x9F18)
-#define RTL931X_RMA_BPDU_FLD_PMSK (0x8950)
-#define RTL839X_RMA_BPDU_FLD_PMSK (0x125C)
-
-/* Port Mirroring */
-#define RTL838X_MIR_CTRL (0x5D00)
-#define RTL838X_MIR_DPM_CTRL (0x5D20)
-#define RTL838X_MIR_SPM_CTRL (0x5D10)
-
-#define RTL839X_MIR_CTRL (0x2500)
-#define RTL839X_MIR_DPM_CTRL (0x2530)
-#define RTL839X_MIR_SPM_CTRL (0x2510)
-
-#define RTL930X_MIR_CTRL (0xA2A0)
-#define RTL930X_MIR_DPM_CTRL (0xA2C0)
-#define RTL930X_MIR_SPM_CTRL (0xA2B0)
-
-#define RTL931X_MIR_CTRL (0xAF00)
-#define RTL931X_MIR_DPM_CTRL (0xAF30)
-#define RTL931X_MIR_SPM_CTRL (0xAF10)
-
-/* Storm/rate control and scheduling */
-#define RTL838X_STORM_CTRL (0x4700)
-#define RTL839X_STORM_CTRL (0x1800)
-#define RTL838X_STORM_CTRL_LB_CTRL(p) (0x4884 + (((p) << 2)))
-#define RTL838X_STORM_CTRL_BURST_PPS_0 (0x4874)
-#define RTL838X_STORM_CTRL_BURST_PPS_1 (0x4878)
-#define RTL838X_STORM_CTRL_BURST_0 (0x487c)
-#define RTL838X_STORM_CTRL_BURST_1 (0x4880)
-#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0 (0x1804)
-#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1 (0x1808)
-#define RTL838X_SCHED_CTRL (0xB980)
-#define RTL839X_SCHED_CTRL (0x60F4)
-#define RTL838X_SCHED_LB_TICK_TKN_CTRL_0 (0xAD58)
-#define RTL838X_SCHED_LB_TICK_TKN_CTRL_1 (0xAD5C)
-#define RTL839X_SCHED_LB_TICK_TKN_CTRL_0 (0x1804)
-#define RTL839X_SCHED_LB_TICK_TKN_CTRL_1 (0x1808)
-#define RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL (0x2000)
-#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0 (0x1604)
-#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1 (0x1608)
-#define RTL839X_SCHED_LB_TICK_TKN_CTRL (0x60F8)
-#define RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL (0x6200)
-#define RTL838X_SCHED_LB_THR (0xB984)
-#define RTL839X_SCHED_LB_THR (0x60FC)
-#define RTL838X_SCHED_P_EGR_RATE_CTRL(p) (0xC008 + (((p) << 7)))
-#define RTL838X_SCHED_Q_EGR_RATE_CTRL(p, q) (0xC00C + (p << 7) + (((q) << 2)))
-#define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
-#define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
-#define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
-#define RTL839X_STORM_CTRL_PORT_BC_EXCEED(p) (0x180c + (((p >> 5) << 2)))
-#define RTL839X_STORM_CTRL_PORT_MC_EXCEED(p) (0x1814 + (((p >> 5) << 2)))
-#define RTL839X_STORM_CTRL_PORT_UC_EXCEED(p) (0x181c + (((p >> 5) << 2)))
-#define RTL838X_STORM_CTRL_PORT_UC(p) (0x4718 + (((p) << 2)))
-#define RTL838X_STORM_CTRL_PORT_MC(p) (0x478c + (((p) << 2)))
-#define RTL838X_STORM_CTRL_PORT_BC(p) (0x4800 + (((p) << 2)))
-#define RTL839X_STORM_CTRL_PORT_UC_0(p) (0x185C + (((p) << 3)))
-#define RTL839X_STORM_CTRL_PORT_UC_1(p) (0x1860 + (((p) << 3)))
-#define RTL839X_STORM_CTRL_PORT_MC_0(p) (0x19FC + (((p) << 3)))
-#define RTL839X_STORM_CTRL_PORT_MC_1(p) (0x1a00 + (((p) << 3)))
-#define RTL839X_STORM_CTRL_PORT_BC_0(p) (0x1B9C + (((p) << 3)))
-#define RTL839X_STORM_CTRL_PORT_BC_1(p) (0x1BA0 + (((p) << 3)))
-#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
-#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
-#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p) (0x1618 + (((p) << 3)))
-#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_1(p) (0x161C + (((p) << 3)))
-#define RTL839X_IGR_BWCTRL_PORT_CTRL_0(p) (0x1640 + (((p) << 3)))
-#define RTL839X_IGR_BWCTRL_PORT_CTRL_1(p) (0x1644 + (((p) << 3)))
-#define RTL839X_IGR_BWCTRL_CTRL_LB_THR (0x1614)
-
-/* Link aggregation (Trunking) */
-#define RTL839X_TRK_MBR_CTR (0x2200)
-#define RTL838X_TRK_MBR_CTR (0x3E00)
-#define RTL930X_TRK_MBR_CTRL (0xA41C)
-#define RTL931X_TRK_MBR_CTRL (0xB8D0)
-
-/* Attack prevention */
-#define RTL838X_ATK_PRVNT_PORT_EN (0x5B00)
-#define RTL838X_ATK_PRVNT_CTRL (0x5B04)
-#define RTL838X_ATK_PRVNT_ACT (0x5B08)
-#define RTL838X_ATK_PRVNT_STS (0x5B1C)
-
-/* 802.1X */
-#define RTL838X_SPCL_TRAP_EAPOL_CTRL (0x6988)
-#define RTL839X_SPCL_TRAP_EAPOL_CTRL (0x105C)
-
-/* QoS */
-#define RTL838X_QM_INTPRI2QID_CTRL (0x5F00)
-#define RTL839X_QM_INTPRI2QID_CTRL(q) (0x1110 + (q << 2))
-#define RTL839X_QM_PORT_QNUM(p) (0x1130 + (((p / 10) << 2)))
-#define RTL838X_PRI_SEL_PORT_PRI(p) (0x5FB8 + (((p / 10) << 2)))
-#define RTL839X_PRI_SEL_PORT_PRI(p) (0x10A8 + (((p / 10) << 2)))
-#define RTL838X_QM_PKT2CPU_INTPRI_MAP (0x5F10)
-#define RTL839X_QM_PKT2CPU_INTPRI_MAP (0x1154)
-#define RTL838X_PRI_SEL_CTRL (0x10E0)
-#define RTL839X_PRI_SEL_CTRL (0x10E0)
-#define RTL838X_PRI_SEL_TBL_CTRL(i) (0x5FD8 + (((i) << 2)))
-#define RTL839X_PRI_SEL_TBL_CTRL(i) (0x10D0 + (((i) << 2)))
-#define RTL838X_QM_PKT2CPU_INTPRI_0 (0x5F04)
-#define RTL838X_QM_PKT2CPU_INTPRI_1 (0x5F08)
-#define RTL838X_QM_PKT2CPU_INTPRI_2 (0x5F0C)
-#define RTL839X_OAM_CTRL (0x2100)
-#define RTL839X_OAM_PORT_ACT_CTRL(p) (0x2104 + (((p) << 2)))
-#define RTL839X_RMK_PORT_DEI_TAG_CTRL(p) (0x6A9C + (((p >> 5) << 2)))
-#define RTL839X_PRI_SEL_IPRI_REMAP (0x1080)
-#define RTL838X_PRI_SEL_IPRI_REMAP (0x5F8C)
-#define RTL839X_PRI_SEL_DEI2DP_REMAP (0x10EC)
-#define RTL839X_PRI_SEL_DSCP2DP_REMAP_ADDR(i) (0x10F0 + (((i >> 4) << 2)))
-#define RTL839X_RMK_DEI_CTRL (0x6AA4)
-#define RTL839X_WRED_PORT_THR_CTRL(i) (0x6084 + ((i) << 2))
-#define RTL839X_WRED_QUEUE_THR_CTRL(q, i) (0x6090 + ((q) * 12) + ((i) << 2))
-#define RTL838X_PRI_DSCP_INVLD_CTRL0 (0x5FE8)
-#define RTL838X_RMK_IPRI_CTRL (0xA460)
-#define RTL838X_RMK_OPRI_CTRL (0xA464)
-#define RTL838X_SCHED_P_TYPE_CTRL(p) (0xC04C + (((p) << 7)))
-#define RTL838X_SCHED_LB_CTRL(p) (0xC004 + (((p) << 7)))
-#define RTL838X_FC_P_EGR_DROP_CTRL(p) (0x6B1C + (((p) << 2)))
-
-/* Debug features */
-#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8)
-
-#define MAX_LAGS 16
-#define MAX_PRIOS 8
-
-enum phy_type {
- PHY_NONE = 0,
- PHY_RTL838X_SDS = 1,
- PHY_RTL8218B_INT = 2,
- PHY_RTL8218B_EXT = 3,
- PHY_RTL8214FC = 4,
- PHY_RTL839X_SDS = 5,
-};
-
-struct rtl838x_port {
- bool enable;
- u64 pm;
- u16 pvid;
- bool eee_enabled;
- enum phy_type phy;
- bool is10G;
- bool is2G5;
- u8 sds_num;
- const struct dsa_port *dp;
-};
-
-struct rtl838x_vlan_info {
- u64 untagged_ports;
- u64 tagged_ports;
- u8 profile_id;
- bool hash_mc_fid;
- bool hash_uc_fid;
- u8 fid;
-};
-
-enum l2_entry_type {
- L2_INVALID = 0,
- L2_UNICAST = 1,
- L2_MULTICAST = 2,
- IP4_MULTICAST = 3,
- IP6_MULTICAST = 4,
-};
-
-struct rtl838x_l2_entry {
- u8 mac[6];
- u16 vid;
- u16 rvid;
- u8 port;
- bool valid;
- enum l2_entry_type type;
- bool is_static;
- bool is_ip_mc;
- bool is_ipv6_mc;
- bool block_da;
- bool block_sa;
- bool suspended;
- bool next_hop;
- int age;
- u8 trunk;
- u8 stackDev;
- u16 mc_portmask_index;
-};
-
-struct rtl838x_switch_priv;
-
-struct rtl838x_reg {
- void (*mask_port_reg_be)(u64 clear, u64 set, int reg);
- void (*set_port_reg_be)(u64 set, int reg);
- u64 (*get_port_reg_be)(int reg);
- void (*mask_port_reg_le)(u64 clear, u64 set, int reg);
- void (*set_port_reg_le)(u64 set, int reg);
- u64 (*get_port_reg_le)(int reg);
- int stat_port_rst;
- int stat_rst;
- int stat_port_std_mib;
- int (*port_iso_ctrl)(int p);
- void (*traffic_enable)(int source, int dest);
- void (*traffic_disable)(int source, int dest);
- void (*traffic_set)(int source, u64 dest_matrix);
- u64 (*traffic_get)(int source);
- int l2_ctrl_0;
- int l2_ctrl_1;
- int l2_port_aging_out;
- int smi_poll_ctrl;
- int l2_tbl_flush_ctrl;
- void (*exec_tbl0_cmd)(u32 cmd);
- void (*exec_tbl1_cmd)(u32 cmd);
- int (*tbl_access_data_0)(int i);
- int isr_glb_src;
- int isr_port_link_sts_chg;
- int imr_port_link_sts_chg;
- int imr_glb;
- void (*vlan_tables_read)(u32 vlan, struct rtl838x_vlan_info *info);
- void (*vlan_set_tagged)(u32 vlan, struct rtl838x_vlan_info *info);
- void (*vlan_set_untagged)(u32 vlan, u64 portmask);
- void (*vlan_profile_dump)(int index);
- void (*stp_get)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
- void (*stp_set)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
- int (*mac_force_mode_ctrl)(int port);
- int (*mac_port_ctrl)(int port);
- int (*l2_port_new_salrn)(int port);
- int (*l2_port_new_sa_fwd)(int port);
- int mir_ctrl;
- int mir_dpm;
- int mir_spm;
- int mac_link_sts;
- int mac_link_dup_sts;
- int (*mac_link_spd_sts)(int port);
- int mac_rx_pause_sts;
- int mac_tx_pause_sts;
- u64 (*read_l2_entry_using_hash)(u32 hash, u32 position, struct rtl838x_l2_entry *e);
- u64 (*read_cam)(int idx, struct rtl838x_l2_entry *e);
- int vlan_port_egr_filter;
- int vlan_port_igr_filter;
- int vlan_port_pb;
- int vlan_port_tag_sts_ctrl;
- int (*rtl838x_vlan_port_tag_sts_ctrl)(int port);
- int (*trk_mbr_ctr)(int group);
- int rma_bpdu_fld_pmask;
- int spcl_trap_eapol_ctrl;
-};
-
-struct rtl838x_switch_priv {
- /* Switch operation */
- struct dsa_switch *ds;
- struct device *dev;
- u16 id;
- u16 family_id;
- char version;
- struct rtl838x_port ports[57];
- struct mutex reg_mutex;
- int link_state_irq;
- int mirror_group_ports[4];
- struct mii_bus *mii_bus;
- const struct rtl838x_reg *r;
- u8 cpu_port;
- u8 port_mask;
- u8 port_width;
- u64 irq_mask;
- u32 fib_entries;
- struct dentry *dbgfs_dir;
- int n_lags;
- u64 lags_port_members[MAX_LAGS];
- struct net_device *lag_devs[MAX_LAGS];
- struct notifier_block nb;
-};
-
-void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv);
-
-#endif /* _RTL838X_H */
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl839x.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl839x.c
index 5106bd2e9d..bfc9e0e1fa 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl839x.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl839x.c
@@ -6,6 +6,104 @@
extern struct mutex smi_lock;
extern struct rtl83xx_soc_info soc_info;
+/* Definition of the RTL839X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPMMASK = 0,
+ TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-31
+ TEMPLATE_FIELD_SPM2 = 3, // Source portmask ports 32-47
+ TEMPLATE_FIELD_SPM3 = 4, // Source portmask ports 48-56
+ TEMPLATE_FIELD_DMAC0 = 5, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 6, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 7, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 8, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 9, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 10, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 11, // Ethernet frame type field
+ // Field-ID 12 is not used
+ TEMPLATE_FIELD_OTAG = 13,
+ TEMPLATE_FIELD_ITAG = 14,
+ TEMPLATE_FIELD_SIP0 = 15,
+ TEMPLATE_FIELD_SIP1 = 16,
+ TEMPLATE_FIELD_DIP0 = 17,
+ TEMPLATE_FIELD_DIP1 = 18,
+ TEMPLATE_FIELD_IP_TOS_PROTO = 19,
+ TEMPLATE_FIELD_IP_FLAG = 20,
+ TEMPLATE_FIELD_L4_SPORT = 21,
+ TEMPLATE_FIELD_L4_DPORT = 22,
+ TEMPLATE_FIELD_L34_HEADER = 23,
+ TEMPLATE_FIELD_ICMP_IGMP = 24,
+ TEMPLATE_FIELD_VID_RANG0 = 25,
+ TEMPLATE_FIELD_VID_RANG1 = 26,
+ TEMPLATE_FIELD_L4_PORT_RANG = 27,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 28,
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 29,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 30,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 31,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 32,
+ TEMPLATE_FIELD_FIELD_SELECTOR_4 = 33,
+ TEMPLATE_FIELD_FIELD_SELECTOR_5 = 34,
+ TEMPLATE_FIELD_SIP2 = 35,
+ TEMPLATE_FIELD_SIP3 = 36,
+ TEMPLATE_FIELD_SIP4 = 37,
+ TEMPLATE_FIELD_SIP5 = 38,
+ TEMPLATE_FIELD_SIP6 = 39,
+ TEMPLATE_FIELD_SIP7 = 40,
+ TEMPLATE_FIELD_OLABEL = 41,
+ TEMPLATE_FIELD_ILABEL = 42,
+ TEMPLATE_FIELD_OILABEL = 43,
+ TEMPLATE_FIELD_DPMMASK = 44,
+ TEMPLATE_FIELD_DPM0 = 45,
+ TEMPLATE_FIELD_DPM1 = 46,
+ TEMPLATE_FIELD_DPM2 = 47,
+ TEMPLATE_FIELD_DPM3 = 48,
+ TEMPLATE_FIELD_L2DPM0 = 49,
+ TEMPLATE_FIELD_L2DPM1 = 50,
+ TEMPLATE_FIELD_L2DPM2 = 51,
+ TEMPLATE_FIELD_L2DPM3 = 52,
+ TEMPLATE_FIELD_IVLAN = 53,
+ TEMPLATE_FIELD_OVLAN = 54,
+ TEMPLATE_FIELD_FWD_VID = 55,
+ TEMPLATE_FIELD_DIP2 = 56,
+ TEMPLATE_FIELD_DIP3 = 57,
+ TEMPLATE_FIELD_DIP4 = 58,
+ TEMPLATE_FIELD_DIP5 = 59,
+ TEMPLATE_FIELD_DIP6 = 60,
+ TEMPLATE_FIELD_DIP7 = 61,
+};
+
+// Number of fixed templates predefined in the SoC
+#define N_FIXED_TEMPLATES 5
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_ITAG,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1,TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_L4_SPORT,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_SPM0,
+ TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_ITAG, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_SIP0,
+ TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_L4_DPORT,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_ICMP_IGMP, TEMPLATE_FIELD_IP_TOS_PROTO
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_SPM0,
+ TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM2, TEMPLATE_FIELD_SPM3
+ },
+};
+
void rtl839x_print_matrix(void)
{
volatile u64 *ptr9;
@@ -48,68 +146,120 @@ static inline int rtl839x_tbl_access_data_0(int i)
static void rtl839x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
- u32 cmd;
- u64 v;
- u32 u, w;
-
- cmd = 1 << 16 /* Execute cmd */
- | 0 << 15 /* Read */
- | 0 << 12 /* Table type 0b000 */
- | (vlan & 0xfff);
- rtl839x_exec_tbl0_cmd(cmd);
-
- v = sw_r32(RTL839X_TBL_ACCESS_DATA_0(0));
- v <<= 32;
- u = sw_r32(RTL839X_TBL_ACCESS_DATA_0(1));
- v |= u;
- info->tagged_ports = v >> 11;
-
- w = sw_r32(RTL839X_TBL_ACCESS_DATA_0(2));
-
- info->profile_id = w >> 30 | ((u & 1) << 2);
- info->hash_mc_fid = !!(u & 2);
- info->hash_uc_fid = !!(u & 4);
- info->fid = (u >> 3) & 0xff;
-
- cmd = 1 << 15 /* Execute cmd */
- | 0 << 14 /* Read */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- rtl839x_exec_tbl1_cmd(cmd);
- v = sw_r32(RTL839X_TBL_ACCESS_DATA_1(0));
- v <<= 32;
- v |= sw_r32(RTL839X_TBL_ACCESS_DATA_1(1));
- info->untagged_ports = v >> 11;
+ u32 u, v, w;
+ // Read VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
+
+ rtl_table_read(r, vlan);
+ u = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ w = sw_r32(rtl_table_data(r, 2));
+ rtl_table_release(r);
+
+ info->tagged_ports = u;
+ info->tagged_ports = (info->tagged_ports << 21) | ((v >> 11) & 0x1fffff);
+ info->profile_id = w >> 30 | ((v & 1) << 2);
+ info->hash_mc_fid = !!(w & BIT(2));
+ info->hash_uc_fid = !!(w & BIT(3));
+ info->fid = (v >> 3) & 0xff;
+
+ // Read UNTAG table (0) via table register 1
+ r = rtl_table_get(RTL8390_TBL_1, 0);
+ rtl_table_read(r, vlan);
+ u = sw_r32(rtl_table_data(r, 0));
+ v = sw_r32(rtl_table_data(r, 1));
+ rtl_table_release(r);
+
+ info->untagged_ports = u;
+ info->untagged_ports = (info->untagged_ports << 21) | ((v >> 11) & 0x1fffff);
}
static void rtl839x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
- u32 cmd = BIT(16) /* Execute cmd */
- | BIT(15) /* Write */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- u32 w;
- u64 v = info->tagged_ports << 11;
+ u32 u, v, w;
+ // Access VLAN table (0) via register 0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
- v |= info->profile_id >> 2;
- v |= info->hash_mc_fid ? 2 : 0;
- v |= info->hash_uc_fid ? 4 : 0;
+ u = info->tagged_ports >> 21;
+ v = info->tagged_ports << 11;
v |= ((u32)info->fid) << 3;
- rtl839x_set_port_reg_be(v, RTL838X_TBL_ACCESS_DATA_0(0));
+ v |= info->hash_uc_fid ? BIT(2) : 0;
+ v |= info->hash_mc_fid ? BIT(1) : 0;
+ v |= (info->profile_id & 0x4) ? 1 : 0;
+ w = ((u32)(info->profile_id & 3)) << 30;
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+ sw_w32(w, rtl_table_data(r, 2));
- w = info->profile_id;
- sw_w32(w << 30, RTL838X_TBL_ACCESS_DATA_0(2));
- rtl839x_exec_tbl0_cmd(cmd);
+ rtl_table_write(r, vlan);
+ rtl_table_release(r);
}
static void rtl839x_vlan_set_untagged(u32 vlan, u64 portmask)
{
- u32 cmd = BIT(16) /* Execute cmd */
- | BIT(15) /* Write */
- | 0 << 12 /* Table type 0b00 */
- | (vlan & 0xfff);
- rtl839x_set_port_reg_be(portmask << 11, RTL838X_TBL_ACCESS_DATA_1(0));
- rtl839x_exec_tbl1_cmd(cmd);
+ u32 u, v;
+
+ // Access UNTAG table (0) via table register 1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 0);
+
+ u = portmask >> 21;
+ v = portmask << 11;
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+ rtl_table_write(r, vlan);
+
+ rtl_table_release(r);
+}
+
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl839x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ if (is_set)
+ rtl839x_mask_port_reg_be(BIT_ULL(port), 0ULL, RTL839X_VLAN_PORT_FWD);
+ else
+ rtl839x_mask_port_reg_be(0ULL, BIT_ULL(port), RTL839X_VLAN_PORT_FWD);
+}
+
+/*
+ * Hash seed is vid (actually rvid) concatenated with the MAC address
+ */
+static u64 rtl839x_l2_hash_seed(u64 mac, u32 vid)
+{
+ u64 v = vid;
+
+ v <<= 48;
+ v |= mac;
+
+ return v;
+}
+
+/*
+ * Applies the same hash algorithm as the one used currently by the ASIC to the seed
+ * and returns a key into the L2 hash table
+ */
+static u32 rtl839x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 h1, h2, h;
+
+ if (sw_r32(priv->r->l2_ctrl_0) & 1) {
+ h1 = (u32) (((seed >> 60) & 0x3f) ^ ((seed >> 54) & 0x3f)
+ ^ ((seed >> 36) & 0x3f) ^ ((seed >> 30) & 0x3f)
+ ^ ((seed >> 12) & 0x3f) ^ ((seed >> 6) & 0x3f));
+ h2 = (u32) (((seed >> 48) & 0x3f) ^ ((seed >> 42) & 0x3f)
+ ^ ((seed >> 24) & 0x3f) ^ ((seed >> 18) & 0x3f)
+ ^ (seed & 0x3f));
+ h = (h1 << 6) | h2;
+ } else {
+ h = (seed >> 60)
+ ^ ((((seed >> 48) & 0x3f) << 6) | ((seed >> 54) & 0x3f))
+ ^ ((seed >> 36) & 0xfff) ^ ((seed >> 24) & 0xfff)
+ ^ ((seed >> 12) & 0xfff) ^ (seed & 0xfff);
+ }
+
+ return h;
}
static inline int rtl839x_mac_force_mode_ctrl(int p)
@@ -147,10 +297,11 @@ static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
/* Table contains different entry types, we need to identify the right one:
* Check for MC entries, first
*/
+ // pr_info("READING L2: %08x %08x %08x\n", r[0], r[1], r[2]);
e->is_ip_mc = !!(r[2] & BIT(31));
e->is_ipv6_mc = !!(r[2] & BIT(30));
e->type = L2_INVALID;
- if (!e->is_ip_mc) {
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
e->mac[0] = (r[0] >> 12);
e->mac[1] = (r[0] >> 4);
e->mac[2] = ((r[1] >> 28) | (r[0] << 4));
@@ -158,18 +309,23 @@ static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
e->mac[4] = (r[1] >> 12);
e->mac[5] = (r[1] >> 4);
+ e->vid = (r[2] >> 4) & 0xfff;
+ e->rvid = (r[0] >> 20) & 0xfff;
+
/* Is it a unicast entry? check multicast bit */
if (!(e->mac[0] & 1)) {
e->is_static = !!((r[2] >> 18) & 1);
- e->vid = (r[2] >> 4) & 0xfff;
- e->rvid = (r[0] >> 20) & 0xfff;
e->port = (r[2] >> 24) & 0x3f;
e->block_da = !!(r[2] & (1 << 19));
e->block_sa = !!(r[2] & (1 << 20));
e->suspended = !!(r[2] & (1 << 17));
e->next_hop = !!(r[2] & (1 << 16));
- if (e->next_hop)
+ if (e->next_hop) {
pr_info("Found next hop entry, need to read data\n");
+ e->nh_vlan_target = !!(r[2] & BIT(15));
+ e->nh_route_id = (r[2] >> 4) & 0x1ff;
+ e->vid = e->rvid;
+ }
e->age = (r[2] >> 21) & 3;
e->valid = true;
if (!(r[2] & 0xc0fd0000)) /* Check for valid entry */
@@ -179,8 +335,13 @@ static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
} else {
e->valid = true;
e->type = L2_MULTICAST;
- e->mc_portmask_index = (r[2]>>6) & 0xfff;
+ e->mc_portmask_index = (r[2] >> 6) & 0xfff;
+ e->vid = e->rvid;
}
+ } else { // IPv4 and IPv6 multicast
+ e->vid = e->rvid = (r[0] << 20) & 0xfff;
+ e->mc_gip = r[1];
+ e->mc_portmask_index = (r[2] >> 6) & 0xfff;
}
if (e->is_ip_mc) {
e->valid = true;
@@ -190,72 +351,180 @@ static void rtl839x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
e->valid = true;
e->type = IP6_MULTICAST;
}
+ // pr_info("%s: vid %d, rvid: %d\n", __func__, e->vid, e->rvid);
}
-static u64 rtl839x_read_l2_entry_using_hash(u32 hash, u32 position, struct rtl838x_l2_entry *e)
+/*
+ * Fills the 3 SoC table registers r[] with the information in the rtl838x_l2_entry
+ */
+static void rtl839x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
{
- u64 entry;
- u32 r[3];
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ pr_info("%s: vid %d, rvid, %d, nh_route_id %d\n", __func__,
+ e->vid, e->rvid, e->nh_route_id);
+ r[2] = e->is_ip_mc ? BIT(31) : 0;
+ r[2] |= e->is_ipv6_mc ? BIT(30) : 0;
+
+ if (!e->is_ip_mc && !e->is_ipv6_mc) {
+ r[0] = ((u32)e->mac[0]) << 12;
+ r[0] |= ((u32)e->mac[1]) << 4;
+ r[0] |= ((u32)e->mac[2]) >> 4;
+ r[1] = ((u32)e->mac[2]) << 28;
+ r[1] |= ((u32)e->mac[3]) << 20;
+ r[1] |= ((u32)e->mac[4]) << 12;
+ r[1] |= ((u32)e->mac[5]) << 4;
+
+ if (!(e->mac[0] & 1)) { // Not multicast
+ r[2] |= e->is_static ? BIT(18) : 0;
+ r[0] |= ((u32)e->rvid) << 20;
+ r[2] |= e->port << 24;
+ r[2] |= e->block_da ? BIT(19) : 0;
+ r[2] |= e->block_sa ? BIT(20) : 0;
+ r[2] |= e->suspended ? BIT(17) : 0;
+ r[2] |= ((u32)e->age) << 21;
+ if (e->next_hop) {
+ r[2] |= BIT(16);
+ r[2] |= e->nh_vlan_target ? BIT(15) : 0;
+ r[2] |= (e->nh_route_id & 0x7ff) << 4;
+ } else {
+ r[2] |= e->vid << 4;
+ }
+ pr_info("Write L2 NH: %08x %08x %08x\n", r[0], r[1], r[2]);
+ } else { // L2 Multicast
+ r[0] |= ((u32)e->rvid) << 20;
+ r[2] |= ((u32)e->mc_portmask_index) << 6;
+ }
+ } else { // IPv4 or IPv6 MC entry
+ r[0] = ((u32)e->rvid) << 20;
+ r[1] = e->mc_gip;
+ r[2] |= ((u32)e->mc_portmask_index) << 6;
+ }
+}
- /* Search in SRAM, with hash and at position in hash bucket (0-3) */
- u32 idx = (0 << 14) | (hash << 2) | position;
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl839x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ int i;
- u32 cmd = 1 << 17 /* Execute cmd */
- | 0 << 16 /* Read */
- | 0 << 14 /* Table type 0b00 */
- | (idx & 0x3fff);
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
- sw_w32(cmd, RTL839X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL839X_TBL_ACCESS_L2_CTRL) & (1 << 17));
- r[0] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(0));
- r[1] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(1));
- r[2] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(2));
+ rtl_table_release(q);
rtl839x_fill_l2_entry(r, e);
+ if (!e->valid)
+ return 0;
+
+ return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid);
+}
+
+static void rtl839x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
+ int i;
+
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+
+ rtl839x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
- entry = (((u64) r[0]) << 12) | ((r[1] & 0xfffffff0) << 12) | ((r[2] >> 4) & 0xfff);
- return entry;
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
}
static u64 rtl839x_read_cam(int idx, struct rtl838x_l2_entry *e)
{
- u64 entry;
u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ int i;
- u32 cmd = 1 << 17 /* Execute cmd */
- | 0 << 16 /* Read */
- | 1 << 14 /* Table type 0b01 */
- | (idx & 0x3f);
- sw_w32(cmd, RTL839X_TBL_ACCESS_L2_CTRL);
- do { } while (sw_r32(RTL839X_TBL_ACCESS_L2_CTRL) & (1 << 17));
- r[0] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(0));
- r[1] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(1));
- r[2] = sw_r32(RTL839X_TBL_ACCESS_L2_DATA(2));
+ rtl_table_read(q, idx);
+ for (i= 0; i < 3; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+ rtl_table_release(q);
rtl839x_fill_l2_entry(r, e);
- if (e->valid)
- pr_info("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
- else
+ if (!e->valid)
return 0;
- entry = (((u64) r[0]) << 12) | ((r[1] & 0xfffffff0) << 12) | ((r[2] >> 4) & 0xfff);
- return entry;
+ pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
+
+ // Return MAC with concatenated VID ac concatenated ID
+ return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid);
+}
+
+static void rtl839x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl839x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
}
-static inline int rtl839x_vlan_profile(int profile)
+static u64 rtl839x_read_mcast_pmask(int idx)
{
- return RTL839X_VLAN_PROFILE(profile);
+ u64 portmask;
+ // Read MC_PMSK (2) via register RTL8390_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ portmask <<= 32;
+ portmask |= sw_r32(rtl_table_data(q, 1));
+ portmask >>= 11; // LSB is bit 11 in data registers
+ rtl_table_release(q);
+
+ return portmask;
}
-static inline int rtl839x_vlan_port_egr_filter(int port)
+static void rtl839x_write_mcast_pmask(int idx, u64 portmask)
{
- return RTL839X_VLAN_PORT_EGR_FLTR(port);
+ // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
+
+ portmask <<= 11; // LSB is bit 11 in data registers
+ sw_w32((u32)(portmask >> 32), rtl_table_data(q, 0));
+ sw_w32((u32)((portmask & 0xfffff800)), rtl_table_data(q, 1));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
}
-static inline int rtl839x_vlan_port_igr_filter(int port)
+static void rtl839x_vlan_profile_setup(int profile)
{
- return RTL839X_VLAN_PORT_IGR_FLTR(port);
+ u32 p[2];
+ u32 pmask_id = UNKNOWN_MC_PMASK;
+
+ p[0] = pmask_id; // Use portmaks 0xfff for unknown IPv6 MC flooding
+ // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for IP/L2-MC traffic flooding
+ p[1] = 1 | pmask_id << 1 | pmask_id << 13;
+
+ sw_w32(p[0], RTL839X_VLAN_PROFILE(profile));
+ sw_w32(p[1], RTL839X_VLAN_PROFILE(profile) + 4);
+
+ rtl839x_write_mcast_pmask(UNKNOWN_MC_PMASK, 0x001fffffffffffff);
}
u64 rtl839x_traffic_get(int source)
@@ -275,7 +544,59 @@ void rtl839x_traffic_enable(int source, int dest)
void rtl839x_traffic_disable(int source, int dest)
{
- rtl839x_mask_port_reg_be(BIT(dest), 0, rtl839x_port_iso_ctrl(source));
+ rtl839x_mask_port_reg_be(BIT_ULL(dest), 0, rtl839x_port_iso_ctrl(source));
+}
+
+static void rtl839x_l2_learning_setup(void)
+{
+ /* Set portmask for broadcast (offset bit 12) and unknown unicast (offset 0)
+ * address flooding to the reserved entry in the portmask table used
+ * also for multicast flooding */
+ sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL839X_L2_FLD_PMSK);
+
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ sw_w32((0x7fff << 2) | 0, RTL839X_L2_LRN_CONSTRT);
+
+ // Do not trap ARP packets to CPU_PORT
+ sw_w32(0, RTL839X_SPCL_TRAP_ARP_CTRL);
+}
+static void rtl839x_enable_learning(int port, bool enable)
+{
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+
+ if (enable) {
+ // flood after 32k entries
+ sw_w32((0x7fff << 2) | 0, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // just forward
+ sw_w32(0, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+static void rtl839x_enable_flood(int port, bool enable)
+{
+ u32 flood_mask = sw_r32(RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+
+ if (enable) {
+ // flood
+ flood_mask &=~3;
+ flood_mask |=0;
+ sw_w32(flood_mask, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // drop (bit 1)
+ flood_mask &=~3;
+ flood_mask |=1;
+ sw_w32(flood_mask, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+static void rtl839x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+static void rtl839x_enable_bcast_flood(int port, bool enable)
+{
+
}
irqreturn_t rtl839x_switch_irq(int irq, void *dev_id)
@@ -290,10 +611,10 @@ irqreturn_t rtl839x_switch_irq(int irq, void *dev_id)
rtl839x_set_port_reg_le(ports, RTL839X_ISR_PORT_LINK_STS_CHG);
pr_debug("RTL8390 Link change: status: %x, ports %llx\n", status, ports);
- for (i = 0; i < 52; i++) {
- if (ports & (1ULL << i)) {
+ for (i = 0; i < RTL839X_CPU_PORT; i++) {
+ if (ports & BIT_ULL(i)) {
link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS);
- if (link & (1ULL << i))
+ if (link & BIT_ULL(i))
dsa_port_phylink_mac_change(ds, i, true);
else
dsa_port_phylink_mac_change(ds, i, false);
@@ -358,10 +679,9 @@ int rtl839x_write_phy(u32 port, u32 page, u32 reg, u32 val)
return -ENOTSUPP;
mutex_lock(&smi_lock);
- /* Clear both port registers */
- sw_w32(0, RTL839X_PHYREG_PORT_CTRL(0));
- sw_w32(0, RTL839X_PHYREG_PORT_CTRL(0) + 4);
- sw_w32_mask(0, BIT(port), RTL839X_PHYREG_PORT_CTRL(port));
+
+ // Set PHY to access
+ rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
sw_w32_mask(0xffff0000, val << 16, RTL839X_PHYREG_DATA_CTRL);
@@ -383,6 +703,68 @@ int rtl839x_write_phy(u32 port, u32 page, u32 reg, u32 val)
return err;
}
+/*
+ * Read an mmd register of the PHY
+ */
+int rtl839x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ int err = 0;
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ sw_w32_mask(0xffff << 16, port << 16, RTL839X_PHYREG_DATA_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
+
+ v = BIT(2) | BIT(0); // MMD-access | EXEC
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ do {
+ v = sw_r32(RTL839X_PHYREG_ACCESS_CTRL);
+ } while (v & BIT(0));
+ // There is no error-checking via BIT 1 of v, as it does not seem to be set correctly
+ *val = (sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff);
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+/*
+ * Write to an mmd register of the PHY
+ */
+int rtl839x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
+{
+ int err = 0;
+ u32 v;
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access
+ rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
+
+ // Set data to write
+ sw_w32_mask(0xffff << 16, val << 16, RTL839X_PHYREG_DATA_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
+
+ v = BIT(3) | BIT(2) | BIT(0); // WRITE | MMD-access | EXEC
+ sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
+
+ do {
+ v = sw_r32(RTL839X_PHYREG_ACCESS_CTRL);
+ } while (v & BIT(0));
+
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err);
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
void rtl8390_get_version(struct rtl838x_switch_priv *priv)
{
u32 info;
@@ -393,42 +775,21 @@ void rtl8390_get_version(struct rtl838x_switch_priv *priv)
priv->version = RTL8390_VERSION_A;
}
-u32 rtl839x_hash(struct rtl838x_switch_priv *priv, u64 seed)
+void rtl839x_vlan_profile_dump(int profile)
{
- u32 h1, h2, h;
+ u32 p[2];
- if (sw_r32(priv->r->l2_ctrl_0) & 1) {
- h1 = (u32) (((seed >> 60) & 0x3f) ^ ((seed >> 54) & 0x3f)
- ^ ((seed >> 36) & 0x3f) ^ ((seed >> 30) & 0x3f)
- ^ ((seed >> 12) & 0x3f) ^ ((seed >> 6) & 0x3f));
- h2 = (u32) (((seed >> 48) & 0x3f) ^ ((seed >> 42) & 0x3f)
- ^ ((seed >> 24) & 0x3f) ^ ((seed >> 18) & 0x3f)
- ^ (seed & 0x3f));
- h = (h1 << 6) | h2;
- } else {
- h = (seed >> 60)
- ^ ((((seed >> 48) & 0x3f) << 6) | ((seed >> 54) & 0x3f))
- ^ ((seed >> 36) & 0xfff) ^ ((seed >> 24) & 0xfff)
- ^ ((seed >> 12) & 0xfff) ^ (seed & 0xfff);
- }
-
- return h;
-}
-
-void rtl839x_vlan_profile_dump(int index)
-{
- u32 profile, profile1;
-
- if (index < 0 || index > 7)
+ if (profile < 0 || profile > 7)
return;
- profile1 = sw_r32(RTL839X_VLAN_PROFILE(index) + 4);
- profile = sw_r32(RTL839X_VLAN_PROFILE(index));
+ p[0] = sw_r32(RTL839X_VLAN_PROFILE(profile));
+ p[1] = sw_r32(RTL839X_VLAN_PROFILE(profile) + 4);
- pr_debug("VLAN %d: L2 learning: %d, L2 Unknown MultiCast Field %x, \
- IPv4 Unknown MultiCast Field %x, IPv6 Unknown MultiCast Field: %x",
- index, profile & 1, (profile >> 1) & 0xfff, (profile >> 13) & 0xfff,
- (profile1) & 0xfff);
+ pr_info("VLAN profile %d: L2 learning: %d, UNKN L2MC FLD PMSK %d, \
+ UNKN IPMC FLD PMSK %d, UNKN IPv6MC FLD PMSK: %d",
+ profile, p[1] & 1, (p[1] >> 1) & 0xfff, (p[1] >> 13) & 0xfff,
+ (p[0]) & 0xfff);
+ pr_info("VLAN profile %d: raw %08x, %08x\n", profile, p[0], p[1]);
}
static void rtl839x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
@@ -456,6 +817,942 @@ static void rtl839x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port
priv->r->exec_tbl0_cmd(cmd);
}
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+void rtl839x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL839X, ports above 47 are SFP
+ if (port >= 48)
+ return;
+
+ enable = true;
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0xf : 0x0;
+
+ // Set EEE for 100, 500, 1000MBit and 10GBit
+ sw_w32_mask(0xf << 8, v << 8, rtl839x_mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ v = enable ? 0x3 : 0x0;
+ sw_w32(v, RTL839X_EEE_CTRL(port));
+
+ priv->ports[port].eee_enabled = enable;
+}
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+int rtl839x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port)
+{
+ u64 link, a;
+
+ if (port >= 48)
+ return 0;
+
+ link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS);
+ if (!(link & BIT_ULL(port)))
+ return 0;
+
+ if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(8))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl839x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ a = rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY);
+ pr_info("Link partner: %016llx\n", a);
+ if (rtl839x_get_port_reg_le(RTL839X_MAC_EEE_ABLTY) & BIT_ULL(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void rtl839x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+
+ // Set wake timer for TX and pause timer both to 0x21
+ sw_w32_mask(0xff << 20| 0xff, 0x21 << 20| 0x21, RTL839X_EEE_TX_TIMER_GELITE_CTRL);
+ // Set pause wake timer for GIGA-EEE to 0x11
+ sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_GIGA_CTRL);
+ // Set pause wake timer for 10GBit ports to 0x11
+ sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_10G_CTRL);
+
+ // Setup EEE on all ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl839x_port_eee_set(priv, i, enable);
+ }
+ priv->eee_enabled = enable;
+}
+
+static void rtl839x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+
+ sw_w32_mask(0, BIT(block), RTL839X_ACL_BLK_LOOKUP_CTRL);
+}
+
+/*
+ * Delete a range of Packet Inspection Engine rules
+ */
+static int rtl839x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ u32 v = (index_from << 1)| (index_to << 13 ) | BIT(0);
+
+ pr_info("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL839X_ACL_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL839X_ACL_CLR_CTRL) & BIT(0));
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids are different
+ * on all SoCs
+ * On the RTL8390 the template mask registers are not word-aligned!
+ */
+static void rtl839x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_SPM2:
+ data = pr->spm >> 32;
+ data_m = pr->spm_m >> 32;
+ break;
+ case TEMPLATE_FIELD_SPM3:
+ data = pr->spm >> 48;
+ data_m = pr->spm_m >> 48;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ data = pr->icmp_igmp;
+ data_m = pr->icmp_igmp_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+
+ // On the RTL8390, the mask fields are not word aligned!
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] |= ((u32)data_m << 8);
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 24;
+ r[11 - i / 2] |= ((u32)data_m) >> 8;
+ }
+ }
+}
+
+/*
+ * Creates the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure by reading the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids
+ * On the RTL8390 the template mask registers are not word-aligned!
+ */
+void rtl839x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ if (!(i % 2)) {
+ // BUG: fix me after verifying writing
+ data = r[5 - i / 2];
+ data_m = r[12 - i / 2];
+ } else {
+ data = r[5 - i / 2] >> 16;
+ data_m = r[12 - i / 2] >> 16;
+ }
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ pr->spm = (pr->spn << 16) | data;
+ pr->spm_m = (pr->spn << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ pr->spm = data;
+ pr->spm_m = data_m;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ pr->otag = data;
+ pr->otag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ pr->smac[4] = data >> 8;
+ pr->smac[5] = data;
+ pr->smac_m[4] = data >> 8;
+ pr->smac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ pr->smac[2] = data >> 8;
+ pr->smac[3] = data;
+ pr->smac_m[2] = data >> 8;
+ pr->smac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ pr->smac[0] = data >> 8;
+ pr->smac[1] = data;
+ pr->smac_m[0] = data >> 8;
+ pr->smac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ pr->dmac[4] = data >> 8;
+ pr->dmac[5] = data;
+ pr->dmac_m[4] = data >> 8;
+ pr->dmac_m[5] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ pr->dmac[2] = data >> 8;
+ pr->dmac[3] = data;
+ pr->dmac_m[2] = data >> 8;
+ pr->dmac_m[3] = data;
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ pr->dmac[0] = data >> 8;
+ pr->dmac[1] = data;
+ pr->dmac_m[0] = data >> 8;
+ pr->dmac_m[1] = data;
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ pr->ethertype = data;
+ pr->ethertype_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ pr->itag = data;
+ pr->itag_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ pr->sip = data;
+ pr->sip_m = data_m;
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ pr->sip = (pr->sip << 16) | data;
+ pr->sip_m = (pr->sip << 16) | data_m;
+ break;
+ case TEMPLATE_FIELD_SIP2:
+ pr->is_ipv6 = true;
+ // Make use of limitiations on the position of the match values
+ ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ pr->dip = data;
+ pr->dip_m = data_m;
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ pr->dip = (pr->dip << 16) | data;
+ pr->dip_m = (pr->dip << 16) | data_m;
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ pr->is_ipv6 = true;
+ ipv6_addr_set(&pr->dip6, pr->dip, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ ipv6_addr_set(&pr->dip6_m, pr->dip_m, r[5 - i / 2],
+ r[4 - i / 2], r[3 - i / 2]);
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ break;
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ pr->tos_proto = data;
+ pr->tos_proto_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ pr->sport = data;
+ pr->sport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ pr->dport = data;
+ pr->dport_m = data_m;
+ break;
+ case TEMPLATE_FIELD_ICMP_IGMP:
+ pr->icmp_igmp = data;
+ pr->icmp_igmp_m = data_m;
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+ }
+}
+
+static void rtl839x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->spmmask_fix = (r[6] >> 30) & 0x3;
+ pr->spn = (r[6] >> 24) & 0x3f;
+ pr->mgnt_vlan = (r[6] >> 23) & 1;
+ pr->dmac_hit_sw = (r[6] >> 22) & 1;
+ pr->not_first_frag = (r[6] >> 21) & 1;
+ pr->frame_type_l4 = (r[6] >> 18) & 7;
+ pr->frame_type = (r[6] >> 16) & 3;
+ pr->otag_fmt = (r[6] >> 15) & 1;
+ pr->itag_fmt = (r[6] >> 14) & 1;
+ pr->otag_exist = (r[6] >> 13) & 1;
+ pr->itag_exist = (r[6] >> 12) & 1;
+ pr->frame_type_l2 = (r[6] >> 10) & 3;
+ pr->tid = (r[6] >> 8) & 3;
+
+ pr->spmmask_fix_m = (r[12] >> 6) & 0x3;
+ pr->spn_m = r[12] & 0x3f;
+ pr->mgnt_vlan_m = (r[13] >> 31) & 1;
+ pr->dmac_hit_sw_m = (r[13] >> 30) & 1;
+ pr->not_first_frag_m = (r[13] >> 29) & 1;
+ pr->frame_type_l4_m = (r[13] >> 26) & 7;
+ pr->frame_type_m = (r[13] >> 24) & 3;
+ pr->otag_fmt_m = (r[13] >> 23) & 1;
+ pr->itag_fmt_m = (r[13] >> 22) & 1;
+ pr->otag_exist_m = (r[13] >> 21) & 1;
+ pr->itag_exist_m = (r[13] >> 20) & 1;
+ pr->frame_type_l2_m = (r[13] >> 18) & 3;
+ pr->tid_m = (r[13] >> 16) & 3;
+
+ pr->valid = r[13] & BIT(15);
+ pr->cond_not = r[13] & BIT(14);
+ pr->cond_and1 = r[13] & BIT(13);
+ pr->cond_and2 = r[13] & BIT(12);
+}
+
+static void rtl839x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = ((u32) (pr->spmmask_fix & 0x3)) << 30;
+ r[6] |= ((u32) (pr->spn & 0x3f)) << 24;
+ r[6] |= pr->mgnt_vlan ? BIT(23) : 0;
+ r[6] |= pr->dmac_hit_sw ? BIT(22) : 0;
+ r[6] |= pr->not_first_frag ? BIT(21) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 16;
+ r[6] |= pr->otag_fmt ? BIT(15) : 0;
+ r[6] |= pr->itag_fmt ? BIT(14) : 0;
+ r[6] |= pr->otag_exist ? BIT(13) : 0;
+ r[6] |= pr->itag_exist ? BIT(12) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10;
+ r[6] |= ((u32) (pr->tid & 0x3)) << 8;
+
+ r[12] |= ((u32) (pr->spmmask_fix_m & 0x3)) << 6;
+ r[12] |= (u32) (pr->spn_m & 0x3f);
+ r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0;
+ r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(29) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24;
+ r[13] |= pr->otag_fmt_m ? BIT(23) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(22) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(21) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(20) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18;
+ r[13] |= ((u32) (pr->tid_m & 0x3)) << 16;
+
+ r[13] |= pr->valid ? BIT(15) : 0;
+ r[13] |= pr->cond_not ? BIT(14) : 0;
+ r[13] |= pr->cond_and1 ? BIT(13) : 0;
+ r[13] |= pr->cond_and2 ? BIT(12) : 0;
+}
+
+static void rtl839x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ if (pr->drop) {
+ r[13] |= 0x9; // Set ACT_MASK_FWD & FWD_ACT = DROP
+ r[13] |= BIT(3);
+ } else {
+ r[13] |= pr->fwd_sel ? BIT(3) : 0;
+ r[13] |= pr->fwd_act;
+ }
+ r[13] |= pr->bypass_sel ? BIT(11) : 0;
+ r[13] |= pr->mpls_sel ? BIT(10) : 0;
+ r[13] |= pr->nopri_sel ? BIT(9) : 0;
+ r[13] |= pr->ovid_sel ? BIT(8) : 0;
+ r[13] |= pr->ivid_sel ? BIT(7) : 0;
+ r[13] |= pr->meter_sel ? BIT(6) : 0;
+ r[13] |= pr->mir_sel ? BIT(5) : 0;
+ r[13] |= pr->log_sel ? BIT(4) : 0;
+
+ r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 18;
+ r[14] |= pr->log_octets ? BIT(17) : 0;
+ r[14] |= ((u32)(pr->log_data & 0x7ff)) << 4;
+ r[14] |= (pr->mir_data & 0x3) << 3;
+ r[14] |= ((u32)(pr->meter_data >> 7)) & 0x7;
+ r[15] |= (u32)(pr->meter_data) << 26;
+ r[15] |= ((u32)(pr->ivid_act) << 23) & 0x3;
+ r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff;
+ r[15] |= ((u32)(pr->ovid_act) << 6) & 0x3;
+ r[15] |= ((u32)(pr->ovid_data) >> 4) & 0xff;
+ r[16] |= ((u32)(pr->ovid_data) & 0xf) << 28;
+ r[16] |= ((u32)(pr->nopri_data) & 0x7) << 20;
+ r[16] |= ((u32)(pr->mpls_act) & 0x7) << 20;
+ r[16] |= ((u32)(pr->mpls_lib_idx) & 0x7) << 20;
+ r[16] |= pr->bypass_all ? BIT(9) : 0;
+ r[16] |= pr->bypass_igr_stp ? BIT(8) : 0;
+ r[16] |= pr->bypass_ibc_sc ? BIT(7) : 0;
+}
+
+static void rtl839x_read_pie_action(u32 r[], struct pie_rule *pr)
+{
+ if (r[13] & BIT(3)) { // ACT_MASK_FWD set, is it a drop?
+ if ((r[14] & 0x7) == 1) {
+ pr->drop = true;
+ } else {
+ pr->fwd_sel = true;
+ pr->fwd_act = r[14] & 0x7;
+ }
+ }
+
+ pr->bypass_sel = r[13] & BIT(11);
+ pr->mpls_sel = r[13] & BIT(10);
+ pr->nopri_sel = r[13] & BIT(9);
+ pr->ovid_sel = r[13] & BIT(8);
+ pr->ivid_sel = r[13] & BIT(7);
+ pr->meter_sel = r[13] & BIT(6);
+ pr->mir_sel = r[13] & BIT(5);
+ pr->log_sel = r[13] & BIT(4);
+
+ // TODO: Read in data fields
+
+ pr->bypass_all = r[16] & BIT(9);
+ pr->bypass_igr_stp = r[16] & BIT(8);
+ pr->bypass_ibc_sc = r[16] & BIT(7);
+}
+
+void rtl839x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %06x\n", r[6] >> 8);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n",
+ (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8),
+ (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8),
+ (r[11] << 24) | (r[12] >> 8));
+ pr_info("R[13]: %08x\n", r[13]);
+ pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff);
+ pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf);
+ pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]);
+}
+
+void rtl839x_pie_rule_dump(struct pie_rule *pr)
+{
+ pr_info("Drop: %d, fwd: %d, ovid: %d, ivid: %d, flt: %d, log: %d, rmk: %d, meter: %d tagst: %d, mir: %d, nopri: %d, cpupri: %d, otpid: %d, itpid: %d, shape: %d\n",
+ pr->drop, pr->fwd_sel, pr->ovid_sel, pr->ivid_sel, pr->flt_sel, pr->log_sel, pr->rmk_sel, pr->log_sel, pr->tagst_sel, pr->mir_sel, pr->nopri_sel,
+ pr->cpupri_sel, pr->otpid_sel, pr->itpid_sel, pr->shaper_sel);
+ if (pr->fwd_sel)
+ pr_info("FWD: %08x\n", pr->fwd_data);
+ pr_info("TID: %x, %x\n", pr->tid, pr->tid_m);
+}
+
+static int rtl839x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Read IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 2);
+ u32 r[17];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
+
+ memset(pr, 0, sizeof(*pr));
+ rtl_table_read(q, idx);
+ for (i = 0; i < 17; i++)
+ r[i] = sw_r32(rtl_table_data(q, i));
+
+ rtl_table_release(q);
+
+ rtl839x_read_pie_fixed_fields(r, pr);
+ if (!pr->valid)
+ return 0;
+
+ pr_info("%s: template_selectors %08x, tid: %d\n", __func__, t_select, pr->tid);
+ rtl839x_pie_rule_dump_raw(r);
+
+ rtl839x_read_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl839x_read_pie_action(r, pr);
+
+ return 0;
+}
+
+static int rtl839x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_0, 2);
+ u32 r[17];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
+
+ pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 17; i++)
+ r[i] = 0;
+
+ if (!pr->valid) {
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+ return 0;
+ }
+ rtl839x_write_pie_fixed_fields(r, pr);
+
+ pr_info("%s: template %d\n", __func__, (t_select >> (pr->tid * 3)) & 0x7);
+ rtl839x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 3)) & 0x7]);
+
+ rtl839x_write_pie_action(r, pr);
+
+ rtl839x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 17; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return 0;
+}
+
+static bool rtl839x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+static int rtl839x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl839x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+ int min_block = 0;
+ int max_block = priv->n_pie_blocks / 2;
+
+ if (pr->is_egress) {
+ min_block = max_block;
+ max_block = priv->n_pie_blocks;
+ }
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = min_block; block < max_block; block++) {
+ for (j = 0; j < 2; j++) {
+ t = (sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
+ idx = rtl839x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 2)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x3;
+ pr->id = idx;
+
+ rtl839x_pie_lookup_enable(priv, idx);
+ rtl839x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+static void rtl839x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl839x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+static void rtl839x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ // Power on all PIE blocks
+ for (i = 0; i < priv->n_pie_blocks; i++)
+ sw_w32_mask(0, BIT(i), RTL839X_PS_ACL_PWR_CTRL);
+
+ // Set ingress and egress ACL blocks to 50/50: first Egress block is 9
+ sw_w32_mask(0x1f, 9, RTL839X_ACL_CTRL); // Writes 9 to cutline field
+
+ // Include IPG in metering
+ sw_w32(1, RTL839X_METER_GLB_CTRL);
+
+ // Delete all present rules
+ rtl839x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
+
+ // Enable predefined templates 0, 1 for blocks 0-2
+ template_selectors = 0 | (1 << 3);
+ for (i = 0; i < 3; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for blocks 3-5
+ template_selectors = 2 | (3 << 3);
+ for (i = 3; i < 6; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 1, 4 for blocks 6-8
+ template_selectors = 2 | (3 << 3);
+ for (i = 6; i < 9; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 1 for blocks 9-11
+ template_selectors = 0 | (1 << 3);
+ for (i = 9; i < 12; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for blocks 12-14
+ template_selectors = 2 | (3 << 3);
+ for (i = 12; i < 15; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 1, 4 for blocks 15-17
+ template_selectors = 2 | (3 << 3);
+ for (i = 15; i < 18; i++)
+ sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
+}
+
+static void rtl839x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u64 v;
+ // Read ROUTING table (2) via register RTL8390_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
+
+ pr_info("In %s\n", __func__);
+ rtl_table_read(r, idx);
+
+ // The table has a size of 2 registers
+ v = sw_r32(rtl_table_data(r, 0));
+ v <<= 32;
+ v |= sw_r32(rtl_table_data(r, 1));
+ rt->switch_mac_id = (v >> 12) & 0xf;
+ rt->nh.gw = v >> 16;
+
+ rtl_table_release(r);
+}
+
+static void rtl839x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+
+ // Read ROUTING table (2) via register RTL8390_TBL_1
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
+
+ pr_info("In %s\n", __func__);
+ sw_w32(rt->nh.gw >> 16, rtl_table_data(r, 0));
+ v = rt->nh.gw << 16;
+ v |= rt->switch_mac_id << 12;
+ sw_w32(v, rtl_table_data(r, 1));
+ rtl_table_write(r, idx);
+
+ rtl_table_release(r);
+}
+
+/*
+ * Configure the switch's own MAC addresses used when routing packets
+ */
+static void rtl839x_setup_port_macs(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ struct net_device *dev;
+ u64 mac;
+
+ pr_info("%s: got port %08x\n", __func__, (u32)priv->ports[priv->cpu_port].dp);
+ dev = priv->ports[priv->cpu_port].dp->slave;
+ mac = ether_addr_to_u64(dev->dev_addr);
+
+ for (i = 0; i < 15; i++) {
+ mac++; // BUG: VRRP for testing
+ sw_w32(mac >> 32, RTL839X_ROUTING_SA_CTRL + i * 8);
+ sw_w32(mac, RTL839X_ROUTING_SA_CTRL + i * 8 + 4);
+ }
+}
+
+int rtl839x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ rtl839x_setup_port_macs(priv);
+
+ return 0;
+}
+
+static u32 rtl839x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (4) via register RTL8390_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ pr_info("Registers: %08x %08x\n",
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl839x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (4) via register RTL8390_TBL_0
+ struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+void rtl839x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ sw_w32_mask(3 << ((group & 0xf) << 1), algoidx << ((group & 0xf) << 1), RTL839X_TRK_HASH_IDX_CTRL + ((group >> 4) << 2));
+ sw_w32(algomsk, RTL839X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl839x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL839X_RMA_BPDU_CTRL + ((port >> 4) << 2));
+ break;
+ case PTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL839X_RMA_PTP_CTRL + ((port >> 4) << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), (action & 0x3) << ((port & 0xf) << 1), RTL839X_RMA_LLTP_CTRL + ((port >> 4) << 2));
+ break;
+ default:
+ break;
+ }
+}
+
const struct rtl838x_reg rtl839x_reg = {
.mask_port_reg_be = rtl839x_mask_port_reg_be,
.set_port_reg_be = rtl839x_set_port_reg_be,
@@ -487,6 +1784,8 @@ const struct rtl838x_reg rtl839x_reg = {
.vlan_set_tagged = rtl839x_vlan_set_tagged,
.vlan_set_untagged = rtl839x_vlan_set_untagged,
.vlan_profile_dump = rtl839x_vlan_profile_dump,
+ .vlan_profile_setup = rtl839x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl839x_vlan_fwd_on_inner,
.stp_get = rtl839x_stp_get,
.stp_set = rtl839x_stp_set,
.mac_force_mode_ctrl = rtl839x_mac_force_mode_ctrl,
@@ -502,12 +1801,62 @@ const struct rtl838x_reg rtl839x_reg = {
.mac_rx_pause_sts = RTL839X_MAC_RX_PAUSE_STS,
.mac_tx_pause_sts = RTL839X_MAC_TX_PAUSE_STS,
.read_l2_entry_using_hash = rtl839x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl839x_write_l2_entry_using_hash,
.read_cam = rtl839x_read_cam,
- .vlan_port_egr_filter = RTL839X_VLAN_PORT_EGR_FLTR(0),
- .vlan_port_igr_filter = RTL839X_VLAN_PORT_IGR_FLTR(0),
+ .write_cam = rtl839x_write_cam,
+ .vlan_port_egr_filter = RTL839X_VLAN_PORT_EGR_FLTR,
+ .vlan_port_igr_filter = RTL839X_VLAN_PORT_IGR_FLTR,
.vlan_port_pb = RTL839X_VLAN_PORT_PB_VLAN,
.vlan_port_tag_sts_ctrl = RTL839X_VLAN_PORT_TAG_STS_CTRL,
.trk_mbr_ctr = rtl839x_trk_mbr_ctr,
.rma_bpdu_fld_pmask = RTL839X_RMA_BPDU_FLD_PMSK,
+ .init_eee = rtl839x_init_eee,
+ .port_eee_set = rtl839x_port_eee_set,
+ .eee_port_ability = rtl839x_eee_port_ability,
+ .l2_hash_seed = rtl839x_l2_hash_seed,
+ .l2_hash_key = rtl839x_l2_hash_key,
+ .read_mcast_pmask = rtl839x_read_mcast_pmask,
+ .write_mcast_pmask = rtl839x_write_mcast_pmask,
+ .l2_learning_setup = rtl839x_l2_learning_setup,
+ .pie_init = rtl839x_pie_init,
+ .pie_rule_read = rtl839x_pie_rule_read,
+ .pie_rule_write = rtl839x_pie_rule_write,
+ .pie_rule_add = rtl839x_pie_rule_add,
+ .pie_rule_rm = rtl839x_pie_rule_rm,
+ .l2_learning_setup = rtl839x_l2_learning_setup,
+ .route_read = rtl839x_route_read,
+ .route_write = rtl839x_route_write,
+ .l3_setup = rtl839x_l3_setup,
+ .packet_cntr_read = rtl839x_packet_cntr_read,
+ .packet_cntr_clear = rtl839x_packet_cntr_clear,
+ .enable_learning = rtl839x_enable_learning,
+ .enable_flood = rtl839x_enable_flood,
+ .rma_bpdu_ctrl = RTL839X_RMA_BPDU_CTRL,
+ .rma_ptp_ctrl = RTL839X_RMA_PTP_CTRL,
+ .rma_lltp_ctrl = RTL839X_RMA_LLTP_CTRL,
+ .rma_bpdu_ctrl_div = 16,
+ .rma_ptp_ctrl_div = 16,
+ .rma_lltp_ctrl_div = 16,
+ .storm_ctrl_port_uc = RTL839X_STORM_CTRL_PORT_UC_0(0),
+ .storm_ctrl_port_bc = RTL839X_STORM_CTRL_PORT_BC_0(0),
+ .storm_ctrl_port_mc = RTL839X_STORM_CTRL_PORT_MC_0(0),
+ .storm_ctrl_port_uc_shift = 3,
+ .storm_ctrl_port_bc_shift = 3,
+ .storm_ctrl_port_mc_shift = 3,
+ .vlan_ctrl = RTL839X_VLAN_CTRL,
.spcl_trap_eapol_ctrl = RTL839X_SPCL_TRAP_EAPOL_CTRL,
+ .spcl_trap_arp_ctrl = RTL839X_SPCL_TRAP_ARP_CTRL,
+ .spcl_trap_igmp_ctrl = RTL839X_SPCL_TRAP_IGMP_CTRL,
+ .spcl_trap_ipv6_ctrl = RTL839X_SPCL_TRAP_IPV6_CTRL,
+ .spcl_trap_switch_mac_ctrl = RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL,
+ .spcl_trap_switch_ipv4_addr_ctrl = RTL839X_SPCL_TRAP_SWITCH_IPV4_ADDR_CTRL,
+ .spcl_trap_crc_ctrl = RTL839X_SPCL_TRAP_CRC_CTRL,
+ .spcl_trap_ctrl = RTL839X_SPCL_TRAP_CTRL,
+ .vlan_ctrl = RTL839X_VLAN_CTRL,
+ .sflow_ctrl = RTL839X_SFLOW_CTRL,
+ .sflow_port_rate_ctrl = RTL839X_SFLOW_PORT_RATE_CTRL,
+ .trk_hash_ctrl = RTL839X_TRK_HASH_CTRL,
+ .trk_hash_idx_ctrl = RTL839X_TRK_HASH_IDX_CTRL,
+ .set_distribution_algorithm = rtl839x_set_distribution_algorithm,
+ .set_receive_management_action = rtl839x_set_receive_management_action,
};
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl83xx.h b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl83xx.h
index fd0455a6cd..413ce1f538 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl83xx.h
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl83xx.h
@@ -4,7 +4,7 @@
#define _NET_DSA_RTL83XX_H
#include <net/dsa.h>
-#include "rtl838x.h"
+#include <linux/rtl838x.h>
#define RTL8380_VERSION_A 'A'
@@ -74,6 +74,15 @@ inline u32 rtl_table_data_r(struct table_reg *r, int i);
inline void rtl_table_data_w(struct table_reg *r, u32 v, int i);
void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv);
+
+int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh);
+struct rtl83xx_route *rtl83xx_route_find_gw(struct rtl838x_switch_priv *priv, __be32 ip);
+struct rtl83xx_route *route_alloc(struct rtl838x_switch_priv *priv, u32 ip);
+
+int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv);
+
+int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv);
+
int read_phy(u32 port, u32 page, u32 reg, u32 *val);
int write_phy(u32 port, u32 page, u32 reg, u32 val);
@@ -114,10 +123,13 @@ u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed);
irqreturn_t rtl930x_switch_irq(int irq, void *dev_id);
irqreturn_t rtl839x_switch_irq(int irq, void *dev_id);
void rtl930x_vlan_profile_dump(int index);
-int rtl9300_sds_power(int mac, int val);
+int rtl9300_sds_power(struct rtl838x_switch_priv *priv, int mac, bool power_on);
void rtl9300_sds_rst(int sds_num, u32 mode);
void rtl930x_print_matrix(void);
+int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info);
+int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port);
+
/* RTL931x-specific */
irqreturn_t rtl931x_switch_irq(int irq, void *dev_id);
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl930x.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl930x.c
index 59c283903b..b13f9eb8ed 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl930x.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl930x.c
@@ -1,11 +1,105 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include <linux/inetdevice.h>
+
#include "rtl83xx.h"
extern struct mutex smi_lock;
extern struct rtl83xx_soc_info soc_info;
+/* Definition of the RTL930X-specific template field IDs as used in the PIE */
+enum template_field_id {
+ TEMPLATE_FIELD_SPM0 = 0, // Source portmask ports 0-15
+ TEMPLATE_FIELD_SPM1 = 1, // Source portmask ports 16-31
+ TEMPLATE_FIELD_DMAC0 = 2, // Destination MAC [15:0]
+ TEMPLATE_FIELD_DMAC1 = 3, // Destination MAC [31:16]
+ TEMPLATE_FIELD_DMAC2 = 4, // Destination MAC [47:32]
+ TEMPLATE_FIELD_SMAC0 = 5, // Source MAC [15:0]
+ TEMPLATE_FIELD_SMAC1 = 6, // Source MAC [31:16]
+ TEMPLATE_FIELD_SMAC2 = 7, // Source MAC [47:32]
+ TEMPLATE_FIELD_ETHERTYPE = 8, // Ethernet frame type field
+ TEMPLATE_FIELD_OTAG = 9,
+ TEMPLATE_FIELD_ITAG = 10,
+ TEMPLATE_FIELD_SIP0 = 11,
+ TEMPLATE_FIELD_SIP1 = 12,
+ TEMPLATE_FIELD_DIP0 = 13,
+ TEMPLATE_FIELD_DIP1 = 14,
+ TEMPLATE_FIELD_IP_TOS_PROTO = 15,
+ TEMPLATE_FIELD_L4_SPORT = 16,
+ TEMPLATE_FIELD_L4_DPORT = 17,
+ TEMPLATE_FIELD_L34_HEADER = 18,
+ TEMPLATE_FIELD_TCP_INFO = 19,
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 20,
+ TEMPLATE_FIELD_FIELD_SELECTOR_0 = 21,
+ TEMPLATE_FIELD_FIELD_SELECTOR_1 = 22,
+ TEMPLATE_FIELD_FIELD_SELECTOR_2 = 23,
+ TEMPLATE_FIELD_FIELD_SELECTOR_3 = 24,
+ TEMPLATE_FIELD_FIELD_SELECTOR_4 = 25,
+ TEMPLATE_FIELD_FIELD_SELECTOR_5 = 26,
+ TEMPLATE_FIELD_SIP2 = 27,
+ TEMPLATE_FIELD_SIP3 = 28,
+ TEMPLATE_FIELD_SIP4 = 29,
+ TEMPLATE_FIELD_SIP5 = 30,
+ TEMPLATE_FIELD_SIP6 = 31,
+ TEMPLATE_FIELD_SIP7 = 32,
+ TEMPLATE_FIELD_DIP2 = 33,
+ TEMPLATE_FIELD_DIP3 = 34,
+ TEMPLATE_FIELD_DIP4 = 35,
+ TEMPLATE_FIELD_DIP5 = 36,
+ TEMPLATE_FIELD_DIP6 = 37,
+ TEMPLATE_FIELD_DIP7 = 38,
+ TEMPLATE_FIELD_PKT_INFO = 39,
+ TEMPLATE_FIELD_FLOW_LABEL = 40,
+ TEMPLATE_FIELD_DSAP_SSAP = 41,
+ TEMPLATE_FIELD_SNAP_OUI = 42,
+ TEMPLATE_FIELD_FWD_VID = 43,
+ TEMPLATE_FIELD_RANGE_CHK = 44,
+ TEMPLATE_FIELD_VLAN_GMSK = 45, // VLAN Group Mask/IP range check
+ TEMPLATE_FIELD_DLP = 46,
+ TEMPLATE_FIELD_META_DATA = 47,
+ TEMPLATE_FIELD_SRC_FWD_VID = 48,
+ TEMPLATE_FIELD_SLP = 49,
+};
+
+/* The meaning of TEMPLATE_FIELD_VLAN depends on phase and the configuration in
+ * RTL930X_PIE_CTRL. We use always the same definition and map to the inner VLAN tag:
+ */
+#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG
+
+// Number of fixed templates predefined in the RTL9300 SoC
+#define N_FIXED_TEMPLATES 5
+// RTL9300 specific predefined templates
+static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
+{
+ {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_SMAC0, TEMPLATE_FIELD_SMAC1, TEMPLATE_FIELD_SMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_DSAP_SSAP,
+ TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_IP_TOS_PROTO, TEMPLATE_FIELD_TCP_INFO,
+ TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT, TEMPLATE_FIELD_VLAN,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM0, TEMPLATE_FIELD_SPM1
+ }, {
+ TEMPLATE_FIELD_DMAC0, TEMPLATE_FIELD_DMAC1, TEMPLATE_FIELD_DMAC2,
+ TEMPLATE_FIELD_VLAN, TEMPLATE_FIELD_ETHERTYPE, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_DIP0,
+ TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT
+ }, {
+ TEMPLATE_FIELD_DIP0, TEMPLATE_FIELD_DIP1, TEMPLATE_FIELD_DIP2,
+ TEMPLATE_FIELD_DIP3, TEMPLATE_FIELD_DIP4, TEMPLATE_FIELD_DIP5,
+ TEMPLATE_FIELD_DIP6, TEMPLATE_FIELD_DIP7, TEMPLATE_FIELD_IP_TOS_PROTO,
+ TEMPLATE_FIELD_TCP_INFO, TEMPLATE_FIELD_L4_SPORT, TEMPLATE_FIELD_L4_DPORT
+ }, {
+ TEMPLATE_FIELD_SIP0, TEMPLATE_FIELD_SIP1, TEMPLATE_FIELD_SIP2,
+ TEMPLATE_FIELD_SIP3, TEMPLATE_FIELD_SIP4, TEMPLATE_FIELD_SIP5,
+ TEMPLATE_FIELD_SIP6, TEMPLATE_FIELD_SIP7, TEMPLATE_FIELD_VLAN,
+ TEMPLATE_FIELD_RANGE_CHK, TEMPLATE_FIELD_SPM1, TEMPLATE_FIELD_SPM1
+ },
+};
+
void rtl930x_print_matrix(void)
{
int i;
@@ -54,7 +148,7 @@ inline static int rtl930x_trk_mbr_ctr(int group)
static void rtl930x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v, w;
- // Read VLAN table (0) via register 0
+ // Read VLAN table (1) via register 0
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1);
rtl_table_read(r, vlan);
@@ -99,22 +193,28 @@ static void rtl930x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
rtl_table_release(r);
}
-void rtl930x_vlan_profile_dump(int index)
+void rtl930x_vlan_profile_dump(int profile)
{
- u32 profile[5];
+ u32 p[5];
- if (index < 0 || index > 7)
+ if (profile < 0 || profile > 7)
return;
- profile[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(index));
- profile[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(index) + 4);
- profile[2] = sw_r32(RTL930X_VLAN_PROFILE_SET(index) + 8) & 0x1FFFFFFF;
- profile[3] = sw_r32(RTL930X_VLAN_PROFILE_SET(index) + 12) & 0x1FFFFFFF;
- profile[4] = sw_r32(RTL930X_VLAN_PROFILE_SET(index) + 16) & 0x1FFFFFFF;
-
- pr_debug("VLAN %d: L2 learning: %d, L2 Unknown MultiCast Field %x, \
- IPv4 Unknown MultiCast Field %x, IPv6 Unknown MultiCast Field: %x",
- index, profile[0] & (3 << 21), profile[2], profile[3], profile[4]);
+ p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile));
+ p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4);
+ p[2] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 8) & 0x1FFFFFFF;
+ p[3] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 12) & 0x1FFFFFFF;
+ p[4] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 16) & 0x1FFFFFFF;
+
+ pr_info("VLAN %d: L2 learn: %d; Unknown MC PMasks: L2 %0x, IPv4 %0x, IPv6: %0x",
+ profile, p[0] & (3 << 21), p[2], p[3], p[4]);
+ pr_info(" Routing enabled: IPv4 UC %c, IPv6 UC %c, IPv4 MC %c, IPv6 MC %c\n",
+ p[0] & BIT(17) ? 'y' : 'n', p[0] & BIT(16) ? 'y' : 'n',
+ p[0] & BIT(13) ? 'y' : 'n', p[0] & BIT(12) ? 'y' : 'n');
+ pr_info(" Bridge enabled: IPv4 MC %c, IPv6 MC %c,\n",
+ p[0] & BIT(15) ? 'y' : 'n', p[0] & BIT(14) ? 'y' : 'n');
+ pr_info("VLAN profile %d: raw %08x %08x %08x %08x %08x\n",
+ profile, p[0], p[1], p[2], p[3], p[4]);
}
static void rtl930x_vlan_set_untagged(u32 vlan, u64 portmask)
@@ -126,6 +226,51 @@ static void rtl930x_vlan_set_untagged(u32 vlan, u64 portmask)
rtl_table_release(r);
}
+/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer
+ */
+static void rtl930x_vlan_fwd_on_inner(int port, bool is_set)
+{
+ // Always set all tag modes to fwd based on either inner or outer tag
+ if (is_set)
+ sw_w32_mask(0, 0xf, RTL930X_VLAN_PORT_FWD + (port << 2));
+ else
+ sw_w32_mask(0xf, 0, RTL930X_VLAN_PORT_FWD + (port << 2));
+}
+
+static void rtl930x_vlan_profile_setup(int profile)
+{
+ u32 p[5];
+
+ pr_info("In %s\n", __func__);
+ p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile));
+ p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4);
+
+ // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic
+ p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12);
+ p[2] = 0x1fffffff; // L2 unknwon MC flooding portmask all ports, including the CPU-port
+ p[3] = 0x1fffffff; // IPv4 unknwon MC flooding portmask
+ p[4] = 0x1fffffff; // IPv6 unknwon MC flooding portmask
+
+ sw_w32(p[0], RTL930X_VLAN_PROFILE_SET(profile));
+ sw_w32(p[1], RTL930X_VLAN_PROFILE_SET(profile) + 4);
+ sw_w32(p[2], RTL930X_VLAN_PROFILE_SET(profile) + 8);
+ sw_w32(p[3], RTL930X_VLAN_PROFILE_SET(profile) + 12);
+ sw_w32(p[4], RTL930X_VLAN_PROFILE_SET(profile) + 16);
+ pr_info("Leaving %s\n", __func__);
+}
+
+static void rtl930x_l2_learning_setup(void)
+{
+ // Portmask for flooding broadcast traffic
+ sw_w32(0x1fffffff, RTL930X_L2_BC_FLD_PMSK);
+
+ // Portmask for flooding unicast traffic with unknown destination
+ sw_w32(0x1fffffff, RTL930X_L2_UNKN_UC_FLD_PMSK);
+
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ sw_w32((0x7fff << 2) | 0, RTL930X_L2_LRN_CONSTRT_CTRL);
+}
+
static void rtl930x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
int i;
@@ -168,12 +313,74 @@ static inline int rtl930x_mac_link_spd_sts(int p)
return RTL930X_MAC_LINK_SPD_STS(p);
}
+static u64 rtl930x_l2_hash_seed(u64 mac, u32 vid)
+{
+ u64 v = vid;
+
+ v <<= 48;
+ v |= mac;
+
+ return v;
+}
+
+/*
+ * Calculate both the block 0 and the block 1 hash by applyingthe same hash
+ * algorithm as the one used currently by the ASIC to the seed, and return
+ * both hashes in the lower and higher word of the return value since only 12 bit of
+ * the hash are significant
+ */
+static u32 rtl930x_l2_hash_key(struct rtl838x_switch_priv *priv, u64 seed)
+{
+ u32 k0, k1, h1, h2, h;
+
+ k0 = (u32) (((seed >> 55) & 0x1f) ^ ((seed >> 44) & 0x7ff)
+ ^ ((seed >> 33) & 0x7ff) ^ ((seed >> 22) & 0x7ff)
+ ^ ((seed >> 11) & 0x7ff) ^ (seed & 0x7ff));
+
+ h1 = (seed >> 11) & 0x7ff;
+ h1 = ((h1 & 0x1f) << 6) | ((h1 >> 5) & 0x3f);
+
+ h2 = (seed >> 33) & 0x7ff;
+ h2 = ((h2 & 0x3f) << 5)| ((h2 >> 6) & 0x3f);
+
+ k1 = (u32) (((seed << 55) & 0x1f) ^ ((seed >> 44) & 0x7ff) ^ h2
+ ^ ((seed >> 22) & 0x7ff) ^ h1
+ ^ (seed & 0x7ff));
+
+ // Algorithm choice for block 0
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(0))
+ h = k1;
+ else
+ h = k0;
+
+ /* Algorithm choice for block 1
+ * Since k0 and k1 are < 2048, adding 2048 will offset the hash into the second
+ * half of hash-space
+ * 2048 is in fact the hash-table size 16384 divided by 4 hashes per bucket
+ * divided by 2 to divide the hash space in 2
+ */
+ if (sw_r32(RTL930X_L2_CTRL) & BIT(1))
+ h |= (k1 + 2048) << 16;
+ else
+ h |= (k0 + 2048) << 16;
+
+ return h;
+}
+
+/*
+ * Fills an L2 entry structure from the SoC registers
+ */
static void rtl930x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
{
+ pr_debug("In %s valid?\n", __func__);
e->valid = !!(r[2] & BIT(31));
if (!e->valid)
return;
+ pr_debug("In %s is valid\n", __func__);
+ e->is_ip_mc = false;
+ e->is_ipv6_mc = false;
+
// TODO: Is there not a function to copy directly MAC memory?
e->mac[0] = (r[0] >> 24);
e->mac[1] = (r[0] >> 16);
@@ -182,61 +389,165 @@ static void rtl930x_fill_l2_entry(u32 r[], struct rtl838x_l2_entry *e)
e->mac[4] = (r[1] >> 24);
e->mac[5] = (r[1] >> 16);
+ e->next_hop = !!(r[2] & BIT(12));
+ e->rvid = r[1] & 0xfff;
+
/* Is it a unicast entry? check multicast bit */
if (!(e->mac[0] & 1)) {
e->type = L2_UNICAST;
e->is_static = !!(r[2] & BIT(14));
- e->vid = r[2] & 0xfff;
- e->rvid = r[1] & 0xfff;
e->port = (r[2] >> 20) & 0x3ff;
// Check for trunk port
if (r[2] & BIT(30)) {
- e->stackDev = (e->port >> 9) & 1;
+ e->is_trunk = true;
+ e->stack_dev = (e->port >> 9) & 1;
e->trunk = e->port & 0x3f;
} else {
- e->stackDev = (e->port >> 6) & 0xf;
+ e->is_trunk = false;
+ e->stack_dev = (e->port >> 6) & 0xf;
e->port = e->port & 0x3f;
}
e->block_da = !!(r[2] & BIT(15));
e->block_sa = !!(r[2] & BIT(16));
e->suspended = !!(r[2] & BIT(13));
- e->next_hop = !!(r[2] & BIT(12));
e->age = (r[2] >> 17) & 3;
e->valid = true;
-
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop) {
+ e->nh_route_id = r[2] & 0x7ff;
+ e->vid = 0;
+ } else {
+ e->vid = r[2] & 0xfff;
+ e->nh_route_id = 0;
+ }
} else {
e->valid = true;
e->type = L2_MULTICAST;
- e->mc_portmask_index = (r[2]>>6) & 0xfff;
+ e->mc_portmask_index = (r[2] >> 16) & 0x3ff;
+ }
+}
+
+/*
+ * Fills the 3 SoC table registers r[] with the information of in the rtl838x_l2_entry
+ */
+static void rtl930x_fill_l2_row(u32 r[], struct rtl838x_l2_entry *e)
+{
+ u32 port;
+
+ if (!e->valid) {
+ r[0] = r[1] = r[2] = 0;
+ return;
+ }
+
+ r[2] = BIT(31); // Set valid bit
+
+ r[0] = ((u32)e->mac[0]) << 24 | ((u32)e->mac[1]) << 16
+ | ((u32)e->mac[2]) << 8 | ((u32)e->mac[3]);
+ r[1] = ((u32)e->mac[4]) << 24 | ((u32)e->mac[5]) << 16;
+
+ r[2] |= e->next_hop ? BIT(12) : 0;
+
+ if (e->type == L2_UNICAST) {
+ r[2] |= e->is_static ? BIT(14) : 0;
+ r[1] |= e->rvid & 0xfff;
+ r[2] |= (e->port & 0x3ff) << 20;
+ if (e->is_trunk) {
+ r[2] |= BIT(30);
+ port = e->stack_dev << 9 | (e->port & 0x3f);
+ } else {
+ port = (e->stack_dev & 0xf) << 6;
+ port |= e->port & 0x3f;
+ }
+ r[2] |= port << 20;
+ r[2] |= e->block_da ? BIT(15) : 0;
+ r[2] |= e->block_sa ? BIT(17) : 0;
+ r[2] |= e->suspended ? BIT(13) : 0;
+ r[2] |= (e->age & 0x3) << 17;
+ // the UC_VID field in hardware is used for the VID or for the route id
+ if (e->next_hop)
+ r[2] |= e->nh_route_id & 0x7ff;
+ else
+ r[2] |= e->vid & 0xfff;
+ } else { // L2_MULTICAST
+ r[2] |= (e->mc_portmask_index & 0x3ff) << 16;
+ r[2] |= e->mc_mac_index & 0x7ff;
}
}
-static u64 rtl930x_read_l2_entry_using_hash(u32 hash, u32 position, struct rtl838x_l2_entry *e)
+/*
+ * Read an L2 UC or MC entry out of a hash bucket of the L2 forwarding table
+ * hash is the id of the bucket and pos is the position of the entry in that bucket
+ * The data read from the SoC is filled into rtl838x_l2_entry
+ */
+static u64 rtl930x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
{
- u64 entry;
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
- u32 idx = (0 << 14) | (hash << 2) | position;
+ u32 idx;
int i;
+ u64 mac;
+ u64 seed;
+
+ pr_debug("%s: hash %08x, pos: %d\n", __func__, hash, pos);
+
+ /* On the RTL93xx, 2 different hash algorithms are used making it a total of
+ * 8 buckets that need to be searched, 4 for each hash-half
+ * Use second hash space when bucket is between 4 and 8 */
+ if (pos >= 4) {
+ pos -= 4;
+ hash >>= 16;
+ } else {
+ hash &= 0xffff;
+ }
+
+ idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
rtl_table_read(q, idx);
- for (i= 0; i < 3; i++)
+ for (i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
rtl930x_fill_l2_entry(r, e);
+
+ pr_debug("%s: valid: %d, nh: %d\n", __func__, e->valid, e->next_hop);
if (!e->valid)
return 0;
- entry = ((u64)r[0] << 32) | (r[1] & 0xffff0000) | e->vid;
- return entry;
+ mac = ((u64)e->mac[0]) << 40 | ((u64)e->mac[1]) << 32 | ((u64)e->mac[2]) << 24
+ | ((u64)e->mac[3]) << 16 | ((u64)e->mac[4]) << 8 | ((u64)e->mac[5]);
+
+ seed = rtl930x_l2_hash_seed(mac, e->rvid);
+ pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed);
+ // return vid with concatenated mac as unique id
+ return seed;
+}
+
+static void rtl930x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
+ u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ int i;
+
+ pr_info("%s: hash %d, pos %d\n", __func__, hash, pos);
+ pr_info("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx,
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3],e->mac[4],e->mac[5]);
+
+ rtl930x_fill_l2_row(r, e);
+ pr_info("%s: %d: %08x %08x %08x\n", __func__, idx, r[0], r[1], r[2]);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
}
static u64 rtl930x_read_cam(int idx, struct rtl838x_l2_entry *e)
{
- u64 entry;
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1);
int i;
@@ -251,9 +562,68 @@ static u64 rtl930x_read_cam(int idx, struct rtl838x_l2_entry *e)
if (!e->valid)
return 0;
- entry = ((u64)r[0] << 32) | (r[1] & 0xffff0000) | e->vid;
+ // return mac with concatenated vid as unique id
+ return ((u64)r[0] << 28) | ((r[1] & 0xffff0000) >> 4) | e->vid;
+}
+
+static void rtl930x_write_cam(int idx, struct rtl838x_l2_entry *e)
+{
+ u32 r[3];
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); // Access L2 Table 1
+ int i;
+
+ rtl930x_fill_l2_row(r, e);
+
+ for (i= 0; i < 3; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+}
+
+static void dump_l2_entry(struct rtl838x_l2_entry *e)
+{
+ pr_info("MAC: %02x:%02x:%02x:%02x:%02x:%02x vid: %d, rvid: %d, port: %d, valid: %d\n",
+ e->mac[0], e->mac[1], e->mac[2], e->mac[3], e->mac[4], e->mac[5],
+ e->vid, e->rvid, e->port, e->valid);
+ pr_info("Type: %d, is_static: %d, is_ip_mc: %d, is_ipv6_mc: %d, block_da: %d\n",
+ e->type, e->is_static, e->is_ip_mc, e->is_ipv6_mc, e->block_da);
+ pr_info(" block_sa: %d, suspended: %d, next_hop: %d, age: %d, is_trunk: %d, trunk: %d\n",
+ e->block_sa, e->suspended, e->next_hop, e->age, e->is_trunk, e->trunk);
+ if (e->is_ip_mc || e->is_ipv6_mc)
+ pr_info(" mc_portmask_index: %d, mc_gip: %d, mc_sip: %d\n",
+ e->mc_portmask_index, e->mc_gip, e->mc_sip);
+ pr_info(" stac_dev: %d, nh_route_id: %d, port: %d, dev_id\n",
+ e->stack_dev, e->nh_route_id, e->port);
+}
+
+static u64 rtl930x_read_mcast_pmask(int idx)
+{
+ u32 portmask;
+ // Read MC_PORTMASK (2) via register RTL9300_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
+
+ rtl_table_read(q, idx);
+ portmask = sw_r32(rtl_table_data(q, 0));
+ portmask >>= 3;
+ rtl_table_release(q);
+
+ pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, portmask);
+ return portmask;
+}
- return entry;
+static void rtl930x_write_mcast_pmask(int idx, u64 portmask)
+{
+ u32 pm = portmask;
+
+ // Access MC_PORTMASK (2) via register RTL9300_TBL_L2
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
+
+ pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, pm);
+ pm <<= 3;
+ sw_w32(pm, rtl_table_data(q, 0));
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
}
u64 rtl930x_traffic_get(int source)
@@ -329,7 +699,7 @@ irqreturn_t rtl930x_switch_irq(int irq, void *dev_id)
sw_w32(ports, RTL930X_ISR_PORT_LINK_STS_CHG);
pr_info("RTL9300 Link change: status: %x, ports %x\n", status, ports);
- rtl9300_dump_debug();
+// rtl9300_dump_debug();
for (i = 0; i < 28; i++) {
if (ports & BIT(i)) {
@@ -347,42 +717,6 @@ irqreturn_t rtl930x_switch_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int rtl9300_sds_power(int mac, int val)
-{
- int sds_num;
- u32 mode;
-
- // TODO: these numbers are hard-coded for the Zyxel XGS1210 12 Switch
- pr_info("SerDes: %s %d\n", __func__, mac);
- switch (mac) {
- case 24:
- sds_num = 6;
- mode = 0x12; // HISGMII
- break;
- case 25:
- sds_num = 7;
- mode = 0x12; // HISGMII
- break;
- case 26:
- sds_num = 8;
- mode = 0x1b; // 10GR/1000BX auto
- break;
- case 27:
- sds_num = 9;
- mode = 0x1b; // 10GR/1000BX auto
- break;
- default:
- return -1;
- }
- if (!val)
- mode = 0x1f; // OFF
-
- rtl9300_sds_rst(sds_num, mode);
-
- return 0;
-}
-
-
int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val)
{
u32 v;
@@ -418,7 +752,6 @@ int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
u32 v;
int err = 0;
-// pr_info("In %s\n", __func__);
if (port > 63 || page > 4095 || reg > 31)
return -ENOTSUPP;
@@ -445,7 +778,6 @@ int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
return err;
}
-
/*
* Write to an mmd register of the PHY
*/
@@ -465,12 +797,12 @@ int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
// Set MMD device number and register to write to
sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
- v = BIT(2)| BIT(1)| BIT(0); // WRITE | MMD-access | EXEC
+ v = BIT(2) | BIT(1) | BIT(0); // WRITE | MMD-access | EXEC
sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
do {
v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
- } while ( v & BIT(0));
+ } while (v & BIT(0));
pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err);
mutex_unlock(&smi_lock);
@@ -493,12 +825,12 @@ int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
// Set MMD device number and register to write to
sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
- v = BIT(1)| BIT(0); // MMD-access | EXEC
+ v = BIT(1) | BIT(0); // MMD-access | EXEC
sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
do {
v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
- } while ( v & 0x1);
+ } while (v & BIT(0));
// There is no error-checking via BIT 25 of v, as it does not seem to be set correctly
*val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff);
pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
@@ -508,7 +840,6 @@ int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
return err;
}
-
/*
* Calculate both the block 0 and the block 1 hash, and return in
* lower and higher word of the return value since only 12 bit of
@@ -552,6 +883,1539 @@ u32 rtl930x_hash(struct rtl838x_switch_priv *priv, u64 seed)
return h;
}
+/*
+ * Enables or disables the EEE/EEEP capability of a port
+ */
+void rtl930x_port_eee_set(struct rtl838x_switch_priv *priv, int port, bool enable)
+{
+ u32 v;
+
+ // This works only for Ethernet ports, and on the RTL930X, ports from 26 are SFP
+ if (port >= 26)
+ return;
+
+ pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
+ v = enable ? 0x3f : 0x0;
+
+ // Set EEE/EEEP state for 100, 500, 1000MBit and 2.5, 5 and 10GBit
+ sw_w32_mask(0, v << 10, rtl930x_mac_force_mode_ctrl(port));
+
+ // Set TX/RX EEE state
+ v = enable ? 0x3 : 0x0;
+ sw_w32(v, RTL930X_EEE_CTRL(port));
+
+ priv->ports[port].eee_enabled = enable;
+}
+
+/*
+ * Get EEE own capabilities and negotiation result
+ */
+int rtl930x_eee_port_ability(struct rtl838x_switch_priv *priv, struct ethtool_eee *e, int port)
+{
+ u32 link, a;
+
+ if (port >= 26)
+ return -ENOTSUPP;
+
+ pr_info("In %s, port %d\n", __func__, port);
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ link = sw_r32(RTL930X_MAC_LINK_STS);
+ if (!(link & BIT(port)))
+ return 0;
+
+ pr_info("Setting advertised\n");
+ if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(10))
+ e->advertised |= ADVERTISED_100baseT_Full;
+
+ if (sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(12))
+ e->advertised |= ADVERTISED_1000baseT_Full;
+
+ if (priv->ports[port].is2G5 && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(13)) {
+ pr_info("ADVERTISING 2.5G EEE\n");
+ e->advertised |= ADVERTISED_2500baseX_Full;
+ }
+
+ if (priv->ports[port].is10G && sw_r32(rtl930x_mac_force_mode_ctrl(port)) & BIT(15))
+ e->advertised |= ADVERTISED_10000baseT_Full;
+
+ a = sw_r32(RTL930X_MAC_EEE_ABLTY);
+ a = sw_r32(RTL930X_MAC_EEE_ABLTY);
+ pr_info("Link partner: %08x\n", a);
+ if (a & BIT(port)) {
+ e->lp_advertised = ADVERTISED_100baseT_Full;
+ e->lp_advertised |= ADVERTISED_1000baseT_Full;
+ if (priv->ports[port].is2G5)
+ e->lp_advertised |= ADVERTISED_2500baseX_Full;
+ if (priv->ports[port].is10G)
+ e->lp_advertised |= ADVERTISED_10000baseT_Full;
+ }
+
+ // Read 2x to clear latched state
+ a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
+ a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
+ pr_info("%s RTL930X_EEEP_PORT_CTRL: %08x\n", __func__, a);
+
+ return 0;
+}
+
+static void rtl930x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
+{
+ int i;
+
+ pr_info("Setting up EEE, state: %d\n", enable);
+
+ // Setup EEE on all ports
+ for (i = 0; i < priv->cpu_port; i++) {
+ if (priv->ports[i].phy)
+ rtl930x_port_eee_set(priv, i, enable);
+ }
+
+ priv->eee_enabled = enable;
+}
+
+#define HASH_PICK(val, lsb, len) ((val & (((1 << len) - 1) << lsb)) >> lsb)
+
+static u32 rtl930x_l3_hash4(u32 ip, int algorithm, bool move_dip)
+{
+ u32 rows[4];
+ u32 hash;
+ u32 s0, s1, pH;
+
+ memset(rows, 0, sizeof(rows));
+
+ rows[0] = HASH_PICK(ip, 27, 5);
+ rows[1] = HASH_PICK(ip, 18, 9);
+ rows[2] = HASH_PICK(ip, 9, 9);
+
+ if (!move_dip)
+ rows[3] = HASH_PICK(ip, 0, 9);
+
+ if (!algorithm) {
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3];
+ } else {
+ s0 = rows[0] + rows[1] + rows[2];
+ s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9);
+ pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9);
+ hash = pH ^ rows[3];
+ }
+ return hash;
+}
+
+static u32 rtl930x_l3_hash6(struct in6_addr *ip6, int algorithm, bool move_dip)
+{
+ u32 rows[16];
+ u32 hash;
+ u32 s0, s1, pH;
+
+ rows[0] = (HASH_PICK(ip6->s6_addr[0], 6, 2) << 0);
+ rows[1] = (HASH_PICK(ip6->s6_addr[0], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[1], 5, 3);
+ rows[2] = (HASH_PICK(ip6->s6_addr[1], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[2], 4, 4);
+ rows[3] = (HASH_PICK(ip6->s6_addr[2], 0, 4) << 5) | HASH_PICK(ip6->s6_addr[3], 3, 5);
+ rows[4] = (HASH_PICK(ip6->s6_addr[3], 0, 3) << 6) | HASH_PICK(ip6->s6_addr[4], 2, 6);
+ rows[5] = (HASH_PICK(ip6->s6_addr[4], 0, 2) << 7) | HASH_PICK(ip6->s6_addr[5], 1, 7);
+ rows[6] = (HASH_PICK(ip6->s6_addr[5], 0, 1) << 8) | HASH_PICK(ip6->s6_addr[6], 0, 8);
+ rows[7] = (HASH_PICK(ip6->s6_addr[7], 0, 8) << 1) | HASH_PICK(ip6->s6_addr[8], 7, 1);
+ rows[8] = (HASH_PICK(ip6->s6_addr[8], 0, 7) << 2) | HASH_PICK(ip6->s6_addr[9], 6, 2);
+ rows[9] = (HASH_PICK(ip6->s6_addr[9], 0, 6) << 3) | HASH_PICK(ip6->s6_addr[10], 5, 3);
+ rows[10] = (HASH_PICK(ip6->s6_addr[10], 0, 5) << 4) | HASH_PICK(ip6->s6_addr[11], 4, 4);
+ if (!algorithm) {
+ rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5)
+ | (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0);
+ rows[12] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6)
+ | (HASH_PICK(ip6->s6_addr[13], 2, 6) << 0);
+ rows[13] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7)
+ | (HASH_PICK(ip6->s6_addr[14], 1, 7) << 0);
+ if (!move_dip) {
+ rows[14] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8)
+ | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0);
+ }
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6]
+ ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ rows[12]
+ ^ rows[13] ^ rows[14];
+ } else {
+ rows[11] = (HASH_PICK(ip6->s6_addr[11], 0, 4) << 5);
+ rows[12] = (HASH_PICK(ip6->s6_addr[12], 3, 5) << 0);
+ rows[13] = (HASH_PICK(ip6->s6_addr[12], 0, 3) << 6)
+ | HASH_PICK(ip6->s6_addr[13], 2, 6);
+ rows[14] = (HASH_PICK(ip6->s6_addr[13], 0, 2) << 7)
+ | HASH_PICK(ip6->s6_addr[14], 1, 7);
+ if (!move_dip) {
+ rows[15] = (HASH_PICK(ip6->s6_addr[14], 0, 1) << 8)
+ | (HASH_PICK(ip6->s6_addr[15], 0, 8) << 0);
+ }
+ s0 = rows[12] + rows[13] + rows[14];
+ s1 = (s0 & 0x1ff) + ((s0 & (0x1ff << 9)) >> 9);
+ pH = (s1 & 0x1ff) + ((s1 & (0x1ff << 9)) >> 9);
+ hash = rows[0] ^ rows[1] ^ rows[2] ^ rows[3] ^ rows[4] ^ rows[5] ^ rows[6]
+ ^ rows[7] ^ rows[8] ^ rows[9] ^ rows[10] ^ rows[11] ^ pH ^ rows[15];
+ }
+ return hash;
+}
+
+/*
+ * Read a prefix route entry from the L3_PREFIX_ROUTE_IPUC table
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u32 v, ip4_m;
+ bool host_route, default_route;
+ struct in6_addr ip6_m;
+
+ // Read L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 11 registers
+ rt->attr.valid = !!(sw_r32(rtl_table_data(r, 0)) & BIT(31));
+ if (!rt->attr.valid)
+ goto out;
+
+ rt->attr.type = (sw_r32(rtl_table_data(r, 0)) >> 29) & 0x3;
+
+ v = sw_r32(rtl_table_data(r, 10));
+ host_route = !!(v & BIT(21));
+ default_route = !!(v & BIT(20));
+ rt->prefix_len = -1;
+ pr_info("%s: host route %d, default_route %d\n", __func__, host_route, default_route);
+
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ rt->dst_ip = sw_r32(rtl_table_data(r, 4));
+ ip4_m = sw_r32(rtl_table_data(r, 9));
+ pr_info("%s: Read ip4 mask: %08x\n", __func__, ip4_m);
+ rt->prefix_len = host_route ? 32 : -1;
+ rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1;
+ if (rt->prefix_len < 0)
+ rt->prefix_len = inet_mask_len(ip4_m);
+ break;
+ case 2: // IPv6 Unicast route
+ ipv6_addr_set(&rt->dst_ip6,
+ sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)));
+ ipv6_addr_set(&ip6_m,
+ sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)),
+ sw_r32(rtl_table_data(r, 8)), sw_r32(rtl_table_data(r, 9)));
+ rt->prefix_len = host_route ? 128 : 0;
+ rt->prefix_len = (rt->prefix_len < 0 && default_route) ? 0 : -1;
+ if (rt->prefix_len < 0)
+ rt->prefix_len = find_last_bit((unsigned long int *)&ip6_m.s6_addr32,
+ 128);
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rt->attr.hit = !!(v & BIT(22));
+ rt->attr.action = (v >> 18) & 3;
+ rt->nh.id = (v >> 7) & 0x7ff;
+ rt->attr.ttl_dec = !!(v & BIT(6));
+ rt->attr.ttl_check = !!(v & BIT(5));
+ rt->attr.dst_null = !!(v & BIT(4));
+ rt->attr.qos_as = !!(v & BIT(3));
+ rt->attr.qos_prio = v & 0x7;
+ pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_info("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action,
+ rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null);
+ pr_info("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+out:
+ rtl_table_release(r);
+}
+
+static void rtl930x_net6_mask(int prefix_len, struct in6_addr *ip6_m)
+{
+ int o, b;
+ // Define network mask
+ o = prefix_len >> 3;
+ b = prefix_len & 0x7;
+ memset(ip6_m->s6_addr, 0xff, o);
+ ip6_m->s6_addr[o] |= b ? 0xff00 >> b : 0x00;
+}
+
+/*
+ * Read a host route entry from the table using its index
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_host_route_read(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+ // Read L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
+
+ idx = ((idx / 6) * 8) + (idx % 6);
+
+ pr_info("In %s, physical index %d\n", __func__, idx);
+ rtl_table_read(r, idx);
+ // The table has a size of 5 (for UC, 11 for MC) registers
+ v = sw_r32(rtl_table_data(r, 0));
+ rt->attr.valid = !!(v & BIT(31));
+ if (!rt->attr.valid)
+ goto out;
+ rt->attr.type = (v >> 29) & 0x3;
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ rt->dst_ip = sw_r32(rtl_table_data(r, 4));
+ break;
+ case 2: // IPv6 Unicast route
+ ipv6_addr_set(&rt->dst_ip6,
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 0)));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rt->attr.hit = !!(v & BIT(20));
+ rt->attr.dst_null = !!(v & BIT(19));
+ rt->attr.action = (v >> 17) & 3;
+ rt->nh.id = (v >> 6) & 0x7ff;
+ rt->attr.ttl_dec = !!(v & BIT(5));
+ rt->attr.ttl_check = !!(v & BIT(4));
+ rt->attr.qos_as = !!(v & BIT(3));
+ rt->attr.qos_prio = v & 0x7;
+ pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_info("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check,
+ rt->attr.dst_null);
+ pr_info("%s: Destination: %pI4\n", __func__, &rt->dst_ip);
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Write a host route entry from the table using its index
+ * We currently only support IPv4 and IPv6 unicast route
+ */
+static void rtl930x_host_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v;
+ // Access L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
+ // The table has a size of 5 (for UC, 11 for MC) registers
+
+ idx = ((idx / 6) * 8) + (idx % 6);
+
+ pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_info("%s: next_hop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action, rt->attr.ttl_dec, rt->attr.ttl_check,
+ rt->attr.dst_null);
+ pr_info("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+
+ v = BIT(31); // Entry is valid
+ v |= (rt->attr.type & 0x3) << 29;
+ v |= rt->attr.hit ? BIT(20) : 0;
+ v |= rt->attr.dst_null ? BIT(19) : 0;
+ v |= (rt->attr.action & 0x3) << 17;
+ v |= (rt->nh.id & 0x7ff) << 6;
+ v |= rt->attr.ttl_dec ? BIT(5) : 0;
+ v |= rt->attr.ttl_check ? BIT(4) : 0;
+ v |= rt->attr.qos_as ? BIT(3) : 0;
+ v |= rt->attr.qos_prio & 0x7;
+
+ sw_w32(v, rtl_table_data(r, 0));
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ sw_w32(0, rtl_table_data(r, 1));
+ sw_w32(0, rtl_table_data(r, 2));
+ sw_w32(0, rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip, rtl_table_data(r, 4));
+ break;
+ case 2: // IPv6 Unicast route
+ sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
+ sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
+ sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ goto out;
+ }
+
+ rtl_table_write(r, idx);
+ pr_info("%s: %08x %08x %08x %08x %08x\n", __func__,
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)),
+ sw_r32(rtl_table_data(r, 2)), sw_r32(rtl_table_data(r, 3)),
+ sw_r32(rtl_table_data(r, 4)));
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Look up the index of a prefix route in the routing table CAM for unicast IPv4/6 routes
+ * using hardware offload.
+ */
+static int rtl930x_route_lookup_hw(struct rtl83xx_route *rt)
+{
+ u32 ip4_m, v;
+ struct in6_addr ip6_m;
+ int i;
+
+ if (rt->attr.type == 1 || rt->attr.type == 3) // Hardware only supports UC routes
+ return -1;
+
+ sw_w32_mask(0x3 << 19, rt->attr.type, RTL930X_L3_HW_LU_KEY_CTRL);
+ if (rt->attr.type) { // IPv6
+ rtl930x_net6_mask(rt->prefix_len, &ip6_m);
+ for (i = 0; i < 4; i++)
+ sw_w32(rt->dst_ip6.s6_addr32[0] & ip6_m.s6_addr32[0],
+ RTL930X_L3_HW_LU_KEY_IP_CTRL + (i << 2));
+ } else { // IPv4
+ ip4_m = inet_make_mask(rt->prefix_len);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 4);
+ sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 8);
+ v = rt->dst_ip & ip4_m;
+ pr_info("%s: searching for %pI4\n", __func__, &v);
+ sw_w32(v, RTL930X_L3_HW_LU_KEY_IP_CTRL + 12);
+ }
+
+ // Execute CAM lookup in SoC
+ sw_w32(BIT(15), RTL930X_L3_HW_LU_CTRL);
+
+ // Wait until execute bit clears and result is ready
+ do {
+ v = sw_r32(RTL930X_L3_HW_LU_CTRL);
+ } while (v & BIT(15));
+
+ pr_info("%s: found: %d, index: %d\n", __func__, !!(v & BIT(14)), v & 0x1ff);
+
+ // Test if search successful (BIT 14 set)
+ if (v & BIT(14))
+ return v & 0x1ff;
+
+ return -1;
+}
+
+static int rtl930x_find_l3_slot(struct rtl83xx_route *rt, bool must_exist)
+{
+ int t, s, slot_width, algorithm, addr, idx;
+ u32 hash;
+ struct rtl83xx_route route_entry;
+
+ // IPv6 entries take up 3 slots
+ slot_width = (rt->attr.type == 0) || (rt->attr.type == 2) ? 1 : 3;
+
+ for (t = 0; t < 2; t++) {
+ algorithm = (sw_r32(RTL930X_L3_HOST_TBL_CTRL) >> (2 + t)) & 0x1;
+ hash = rtl930x_l3_hash4(rt->dst_ip, algorithm, false);
+
+ pr_debug("%s: table %d, algorithm %d, hash %04x\n", __func__, t, algorithm, hash);
+
+ for (s = 0; s < 6; s += slot_width) {
+ addr = (t << 12) | ((hash & 0x1ff) << 3) | s;
+ pr_debug("%s physical address %d\n", __func__, addr);
+ idx = ((addr / 8) * 6) + (addr % 8);
+ pr_debug("%s logical address %d\n", __func__, idx);
+
+ rtl930x_host_route_read(idx, &route_entry);
+ pr_debug("%s route valid %d, route dest: %pI4, hit %d\n", __func__,
+ rt->attr.valid, &rt->dst_ip, rt->attr.hit);
+ if (!must_exist && rt->attr.valid)
+ return idx;
+ if (must_exist && route_entry.dst_ip == rt->dst_ip)
+ return idx;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Write a prefix route into the routing table CAM at position idx
+ * Currently only IPv4 and IPv6 unicast routes are supported
+ */
+static void rtl930x_route_write(int idx, struct rtl83xx_route *rt)
+{
+ u32 v, ip4_m;
+ struct in6_addr ip6_m;
+ // Access L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
+ // The table has a size of 11 registers (20 for MC)
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
+
+ pr_info("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
+ pr_info("%s: nexthop: %d, hit: %d, action :%d, ttl_dec %d, ttl_check %d, dst_null %d\n",
+ __func__, rt->nh.id, rt->attr.hit, rt->attr.action,
+ rt->attr.ttl_dec, rt->attr.ttl_check, rt->attr.dst_null);
+ pr_info("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
+
+ v = rt->attr.valid ? BIT(31) : 0;
+ v |= (rt->attr.type & 0x3) << 29;
+ sw_w32(v, rtl_table_data(r, 0));
+
+ v = rt->attr.hit ? BIT(22) : 0;
+ v |= (rt->attr.action & 0x3) << 18;
+ v |= (rt->nh.id & 0x7ff) << 7;
+ v |= rt->attr.ttl_dec ? BIT(6) : 0;
+ v |= rt->attr.ttl_check ? BIT(5) : 0;
+ v |= rt->attr.dst_null ? BIT(6) : 0;
+ v |= rt->attr.qos_as ? BIT(6) : 0;
+ v |= rt->attr.qos_prio & 0x7;
+ v |= rt->prefix_len == 0 ? BIT(20) : 0; // set default route bit
+
+ // set bit mask for entry type always to 0x3
+ sw_w32(0x3 << 29, rtl_table_data(r, 5));
+
+ switch (rt->attr.type) {
+ case 0: // IPv4 Unicast route
+ sw_w32(0, rtl_table_data(r, 1));
+ sw_w32(0, rtl_table_data(r, 2));
+ sw_w32(0, rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip, rtl_table_data(r, 4));
+
+ v |= rt->prefix_len == 32 ? BIT(21) : 0; // set host-route bit
+ ip4_m = inet_make_mask(rt->prefix_len);
+ sw_w32(0, rtl_table_data(r, 6));
+ sw_w32(0, rtl_table_data(r, 7));
+ sw_w32(0, rtl_table_data(r, 8));
+ sw_w32(ip4_m, rtl_table_data(r, 9));
+ break;
+ case 2: // IPv6 Unicast route
+ sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
+ sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
+ sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
+ sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
+
+ v |= rt->prefix_len == 128 ? BIT(21) : 0; // set host-route bit
+
+ rtl930x_net6_mask(rt->prefix_len, &ip6_m);
+
+ sw_w32(ip6_m.s6_addr32[0], rtl_table_data(r, 6));
+ sw_w32(ip6_m.s6_addr32[1], rtl_table_data(r, 7));
+ sw_w32(ip6_m.s6_addr32[2], rtl_table_data(r, 8));
+ sw_w32(ip6_m.s6_addr32[3], rtl_table_data(r, 9));
+ break;
+ case 1: // IPv4 Multicast route
+ case 3: // IPv6 Multicast route
+ pr_warn("%s: route type not supported\n", __func__);
+ rtl_table_release(r);
+ return;
+ }
+ sw_w32(v, rtl_table_data(r, 10));
+
+ pr_info("%s: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", __func__,
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)),
+ sw_r32(rtl_table_data(r, 6)), sw_r32(rtl_table_data(r, 7)), sw_r32(rtl_table_data(r, 8)),
+ sw_r32(rtl_table_data(r, 9)), sw_r32(rtl_table_data(r, 10)));
+
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+
+/*
+ * Get the destination MAC and L3 egress interface ID of a nexthop entry from
+ * the SoC's L3_NEXTHOP table
+ */
+static void rtl930x_get_l3_nexthop(int idx, u16 *dmac_id, u16 *interface)
+{
+ u32 v;
+ // Read L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 1 register
+ v = sw_r32(rtl_table_data(r, 0));
+ rtl_table_release(r);
+
+ *dmac_id = (v >> 7) & 0x7fff;
+ *interface = v & 0x7f;
+}
+
+static int rtl930x_l3_mtu_del(struct rtl838x_switch_priv *priv, int mtu)
+{
+ int i;
+
+ for (i = 0; i < MAX_INTF_MTUS; i++) {
+ if (mtu == priv->intf_mtus[i])
+ break;
+ }
+ if (i >= MAX_INTF_MTUS || !priv->intf_mtu_count[i]) {
+ pr_err("%s: No MTU slot found for MTU: %d\n", __func__, mtu);
+ return -EINVAL;
+ }
+
+ priv->intf_mtu_count[i]--;
+}
+
+static int rtl930x_l3_mtu_add(struct rtl838x_switch_priv *priv, int mtu)
+{
+ int i, free_mtu;
+ int mtu_id;
+
+ // Try to find an existing mtu-value or a free slot
+ free_mtu = MAX_INTF_MTUS;
+ for (i = 0; i < MAX_INTF_MTUS && priv->intf_mtus[i] != mtu; i++) {
+ if ((!priv->intf_mtu_count[i]) && (free_mtu == MAX_INTF_MTUS))
+ free_mtu = i;
+ }
+ i = (i < MAX_INTF_MTUS) ? i : free_mtu;
+ if (i < MAX_INTF_MTUS) {
+ mtu_id = i;
+ } else {
+ pr_err("%s: No free MTU slot available!\n", __func__);
+ return -EINVAL;
+ }
+
+ priv->intf_mtus[i] = mtu;
+ pr_info("Writing MTU %d to slot %d\n", priv->intf_mtus[i], i);
+ // Set MTU-value of the slot TODO: distinguish between IPv4/IPv6 routes / slots
+ sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
+ RTL930X_L3_IP_MTU_CTRL(i));
+ sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
+ RTL930X_L3_IP6_MTU_CTRL(i));
+
+ priv->intf_mtu_count[i]++;
+
+ return mtu_id;
+}
+
+/*
+ * Creates an interface for a route by setting up the HW tables in the SoC
+ */
+static int rtl930x_l3_intf_add(struct rtl838x_switch_priv *priv, struct rtl838x_l3_intf *intf)
+{
+ int i, intf_id, mtu_id;
+ // number of MTU-values < 16384
+
+ // Use the same IPv6 mtu as the ip4 mtu for this route if unset
+ intf->ip6_mtu = intf->ip6_mtu ? intf->ip6_mtu : intf->ip4_mtu;
+
+ mtu_id = rtl930x_l3_mtu_add(priv, intf->ip4_mtu);
+ pr_info("%s: added mtu %d with mtu-id %d\n", __func__, intf->ip4_mtu, mtu_id);
+ if (mtu_id < 0)
+ return -ENOSPC;
+ intf->ip4_mtu_id = mtu_id;
+ intf->ip6_mtu_id = mtu_id;
+
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ if (!priv->interfaces[i])
+ break;
+ }
+ if (i >= MAX_INTERFACES) {
+ pr_err("%s: cannot find free interface entry\n", __func__);
+ return -EINVAL;
+ }
+ intf_id = i;
+ priv->interfaces[i] = kzalloc(sizeof(struct rtl838x_l3_intf), GFP_KERNEL);
+ if (!priv->interfaces[i]) {
+ pr_err("%s: no memory to allocate new interface\n", __func__);
+ return -ENOMEM;
+ }
+}
+
+/*
+ * Set the destination MAC and L3 egress interface ID for a nexthop entry in the SoC's
+ * L3_NEXTHOP table. The nexthop entry is identified by idx.
+ * dmac_id is the reference to the L2 entry in the L2 forwarding table, special values are
+ * 0x7ffe: TRAP2CPU
+ * 0x7ffd: TRAP2MASTERCPU
+ * 0x7fff: DMAC_ID_DROP
+ */
+static void rtl930x_set_l3_nexthop(int idx, u16 dmac_id, u16 interface)
+{
+ // Access L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
+
+ pr_info("%s: Writing to L3_NEXTHOP table, index %d, dmac_id %d, interface %d\n",
+ __func__, idx, dmac_id, interface);
+ sw_w32(((dmac_id & 0x7fff) << 7) | (interface & 0x7f), rtl_table_data(r, 0));
+
+ pr_info("%s: %08x\n", __func__, sw_r32(rtl_table_data(r,0)));
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+static void rtl930x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
+{
+ int block = index / PIE_BLOCK_SIZE;
+
+ sw_w32_mask(0, BIT(block), RTL930X_PIE_BLK_LOOKUP_CTRL);
+}
+
+/*
+ * Reads the intermediate representation of the templated match-fields of the
+ * PIE rule in the pie_rule structure and fills in the raw data fields in the
+ * raw register space r[].
+ * The register space configuration size is identical for the RTL8380/90 and RTL9300,
+ * however the RTL9310 has 2 more registers / fields and the physical field-ids are different
+ * on all SoCs
+ * On the RTL9300 the mask fields are not word-aligend!
+ */
+static void rtl930x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
+{
+ int i;
+ enum template_field_id field_type;
+ u16 data, data_m;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ field_type = t[i];
+ data = data_m = 0;
+
+ switch (field_type) {
+ case TEMPLATE_FIELD_SPM0:
+ data = pr->spm;
+ data_m = pr->spm_m;
+ break;
+ case TEMPLATE_FIELD_SPM1:
+ data = pr->spm >> 16;
+ data_m = pr->spm_m >> 16;
+ break;
+ case TEMPLATE_FIELD_OTAG:
+ data = pr->otag;
+ data_m = pr->otag_m;
+ break;
+ case TEMPLATE_FIELD_SMAC0:
+ data = pr->smac[4];
+ data = (data << 8) | pr->smac[5];
+ data_m = pr->smac_m[4];
+ data_m = (data_m << 8) | pr->smac_m[5];
+ break;
+ case TEMPLATE_FIELD_SMAC1:
+ data = pr->smac[2];
+ data = (data << 8) | pr->smac[3];
+ data_m = pr->smac_m[2];
+ data_m = (data_m << 8) | pr->smac_m[3];
+ break;
+ case TEMPLATE_FIELD_SMAC2:
+ data = pr->smac[0];
+ data = (data << 8) | pr->smac[1];
+ data_m = pr->smac_m[0];
+ data_m = (data_m << 8) | pr->smac_m[1];
+ break;
+ case TEMPLATE_FIELD_DMAC0:
+ data = pr->dmac[4];
+ data = (data << 8) | pr->dmac[5];
+ data_m = pr->dmac_m[4];
+ data_m = (data_m << 8) | pr->dmac_m[5];
+ break;
+ case TEMPLATE_FIELD_DMAC1:
+ data = pr->dmac[2];
+ data = (data << 8) | pr->dmac[3];
+ data_m = pr->dmac_m[2];
+ data_m = (data_m << 8) | pr->dmac_m[3];
+ break;
+ case TEMPLATE_FIELD_DMAC2:
+ data = pr->dmac[0];
+ data = (data << 8) | pr->dmac[1];
+ data_m = pr->dmac_m[0];
+ data_m = (data_m << 8) | pr->dmac_m[1];
+ break;
+ case TEMPLATE_FIELD_ETHERTYPE:
+ data = pr->ethertype;
+ data_m = pr->ethertype_m;
+ break;
+ case TEMPLATE_FIELD_ITAG:
+ data = pr->itag;
+ data_m = pr->itag_m;
+ break;
+ case TEMPLATE_FIELD_SIP0:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[7];
+ data_m = pr->sip6_m.s6_addr16[7];
+ } else {
+ data = pr->sip;
+ data_m = pr->sip_m;
+ }
+ break;
+ case TEMPLATE_FIELD_SIP1:
+ if (pr->is_ipv6) {
+ data = pr->sip6.s6_addr16[6];
+ data_m = pr->sip6_m.s6_addr16[6];
+ } else {
+ data = pr->sip >> 16;
+ data_m = pr->sip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_SIP2:
+ case TEMPLATE_FIELD_SIP3:
+ case TEMPLATE_FIELD_SIP4:
+ case TEMPLATE_FIELD_SIP5:
+ case TEMPLATE_FIELD_SIP6:
+ case TEMPLATE_FIELD_SIP7:
+ data = pr->sip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ data_m = pr->sip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_SIP2)];
+ break;
+
+ case TEMPLATE_FIELD_DIP0:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[7];
+ data_m = pr->dip6_m.s6_addr16[7];
+ } else {
+ data = pr->dip;
+ data_m = pr->dip_m;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP1:
+ if (pr->is_ipv6) {
+ data = pr->dip6.s6_addr16[6];
+ data_m = pr->dip6_m.s6_addr16[6];
+ } else {
+ data = pr->dip >> 16;
+ data_m = pr->dip_m >> 16;
+ }
+ break;
+
+ case TEMPLATE_FIELD_DIP2:
+ case TEMPLATE_FIELD_DIP3:
+ case TEMPLATE_FIELD_DIP4:
+ case TEMPLATE_FIELD_DIP5:
+ case TEMPLATE_FIELD_DIP6:
+ case TEMPLATE_FIELD_DIP7:
+ data = pr->dip6.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ data_m = pr->dip6_m.s6_addr16[5 - (field_type - TEMPLATE_FIELD_DIP2)];
+ break;
+
+ case TEMPLATE_FIELD_IP_TOS_PROTO:
+ data = pr->tos_proto;
+ data_m = pr->tos_proto_m;
+ break;
+ case TEMPLATE_FIELD_L4_SPORT:
+ data = pr->sport;
+ data_m = pr->sport_m;
+ break;
+ case TEMPLATE_FIELD_L4_DPORT:
+ data = pr->dport;
+ data_m = pr->dport_m;
+ break;
+ case TEMPLATE_FIELD_DSAP_SSAP:
+ data = pr->dsap_ssap;
+ data_m = pr->dsap_ssap_m;
+ break;
+ case TEMPLATE_FIELD_TCP_INFO:
+ data = pr->tcp_info;
+ data_m = pr->tcp_info_m;
+ break;
+ case TEMPLATE_FIELD_RANGE_CHK:
+ pr_info("TEMPLATE_FIELD_RANGE_CHK: not configured\n");
+ break;
+ default:
+ pr_info("%s: unknown field %d\n", __func__, field_type);
+ }
+
+ // On the RTL9300, the mask fields are not word aligned!
+ if (!(i % 2)) {
+ r[5 - i / 2] = data;
+ r[12 - i / 2] |= ((u32)data_m << 8);
+ } else {
+ r[5 - i / 2] |= ((u32)data) << 16;
+ r[12 - i / 2] |= ((u32)data_m) << 24;
+ r[11 - i / 2] |= ((u32)data_m) >> 8;
+ }
+ }
+}
+
+static void rtl930x_read_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ pr->stacking_port = r[6] & BIT(31);
+ pr->spn = (r[6] >> 24) & 0x7f;
+ pr->mgnt_vlan = r[6] & BIT(23);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw = r[6] & BIT(22);
+ else
+ pr->content_too_deep = r[6] & BIT(22);
+ pr->not_first_frag = r[6] & BIT(21);
+ pr->frame_type_l4 = (r[6] >> 18) & 7;
+ pr->frame_type = (r[6] >> 16) & 3;
+ pr->otag_fmt = (r[6] >> 15) & 1;
+ pr->itag_fmt = (r[6] >> 14) & 1;
+ pr->otag_exist = (r[6] >> 13) & 1;
+ pr->itag_exist = (r[6] >> 12) & 1;
+ pr->frame_type_l2 = (r[6] >> 10) & 3;
+ pr->igr_normal_port = (r[6] >> 9) & 1;
+ pr->tid = (r[6] >> 8) & 1;
+
+ pr->stacking_port_m = r[12] & BIT(7);
+ pr->spn_m = r[12] & 0x7f;
+ pr->mgnt_vlan_m = r[13] & BIT(31);
+ if (pr->phase == PHASE_IACL)
+ pr->dmac_hit_sw_m = r[13] & BIT(30);
+ else
+ pr->content_too_deep_m = r[13] & BIT(30);
+ pr->not_first_frag_m = r[13] & BIT(29);
+ pr->frame_type_l4_m = (r[13] >> 26) & 7;
+ pr->frame_type_m = (r[13] >> 24) & 3;
+ pr->otag_fmt_m = r[13] & BIT(23);
+ pr->itag_fmt_m = r[13] & BIT(22);
+ pr->otag_exist_m = r[13] & BIT(21);
+ pr->itag_exist_m = r[13] & BIT (20);
+ pr->frame_type_l2_m = (r[13] >> 18) & 3;
+ pr->igr_normal_port_m = r[13] & BIT(17);
+ pr->tid_m = (r[13] >> 16) & 1;
+
+ pr->valid = r[13] & BIT(15);
+ pr->cond_not = r[13] & BIT(14);
+ pr->cond_and1 = r[13] & BIT(13);
+ pr->cond_and2 = r[13] & BIT(12);
+}
+
+static void rtl930x_write_pie_fixed_fields(u32 r[], struct pie_rule *pr)
+{
+ r[6] = pr->stacking_port ? BIT(31) : 0;
+ r[6] |= ((u32) (pr->spn & 0x7f)) << 24;
+ r[6] |= pr->mgnt_vlan ? BIT(23) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[6] |= pr->dmac_hit_sw ? BIT(22) : 0;
+ else
+ r[6] |= pr->content_too_deep ? BIT(22) : 0;
+ r[6] |= pr->not_first_frag ? BIT(21) : 0;
+ r[6] |= ((u32) (pr->frame_type_l4 & 0x7)) << 18;
+ r[6] |= ((u32) (pr->frame_type & 0x3)) << 16;
+ r[6] |= pr->otag_fmt ? BIT(15) : 0;
+ r[6] |= pr->itag_fmt ? BIT(14) : 0;
+ r[6] |= pr->otag_exist ? BIT(13) : 0;
+ r[6] |= pr->itag_exist ? BIT(12) : 0;
+ r[6] |= ((u32) (pr->frame_type_l2 & 0x3)) << 10;
+ r[6] |= pr->igr_normal_port ? BIT(9) : 0;
+ r[6] |= ((u32) (pr->tid & 0x1)) << 8;
+
+ r[12] |= pr->stacking_port_m ? BIT(7) : 0;
+ r[12] |= (u32) (pr->spn_m & 0x7f);
+ r[13] |= pr->mgnt_vlan_m ? BIT(31) : 0;
+ if (pr->phase == PHASE_IACL)
+ r[13] |= pr->dmac_hit_sw_m ? BIT(30) : 0;
+ else
+ r[13] |= pr->content_too_deep_m ? BIT(30) : 0;
+ r[13] |= pr->not_first_frag_m ? BIT(29) : 0;
+ r[13] |= ((u32) (pr->frame_type_l4_m & 0x7)) << 26;
+ r[13] |= ((u32) (pr->frame_type_m & 0x3)) << 24;
+ r[13] |= pr->otag_fmt_m ? BIT(23) : 0;
+ r[13] |= pr->itag_fmt_m ? BIT(22) : 0;
+ r[13] |= pr->otag_exist_m ? BIT(21) : 0;
+ r[13] |= pr->itag_exist_m ? BIT(20) : 0;
+ r[13] |= ((u32) (pr->frame_type_l2_m & 0x3)) << 18;
+ r[13] |= pr->igr_normal_port_m ? BIT(17) : 0;
+ r[13] |= ((u32) (pr->tid_m & 0x1)) << 16;
+
+ r[13] |= pr->valid ? BIT(15) : 0;
+ r[13] |= pr->cond_not ? BIT(14) : 0;
+ r[13] |= pr->cond_and1 ? BIT(13) : 0;
+ r[13] |= pr->cond_and2 ? BIT(12) : 0;
+}
+
+static void rtl930x_write_pie_action(u32 r[], struct pie_rule *pr)
+{
+ // Either drop or forward
+ if (pr->drop) {
+ r[14] |= BIT(24) | BIT(25) | BIT(26); // Do Green, Yellow and Red drops
+ // Actually DROP, not PERMIT in Green / Yellow / Red
+ r[14] |= BIT(23) | BIT(22) | BIT(20);
+ } else {
+ r[14] |= pr->fwd_sel ? BIT(27) : 0;
+ r[14] |= pr->fwd_act << 18;
+ r[14] |= BIT(14); // We overwrite any drop
+ }
+ if (pr->phase == PHASE_VACL)
+ r[14] |= pr->fwd_sa_lrn ? BIT(15) : 0;
+ r[13] |= pr->bypass_sel ? BIT(5) : 0;
+ r[13] |= pr->nopri_sel ? BIT(4) : 0;
+ r[13] |= pr->tagst_sel ? BIT(3) : 0;
+ r[13] |= pr->ovid_sel ? BIT(1) : 0;
+ r[14] |= pr->ivid_sel ? BIT(31) : 0;
+ r[14] |= pr->meter_sel ? BIT(30) : 0;
+ r[14] |= pr->mir_sel ? BIT(29) : 0;
+ r[14] |= pr->log_sel ? BIT(28) : 0;
+
+ r[14] |= ((u32)(pr->fwd_data & 0x3fff)) << 3;
+ r[15] |= pr->log_octets ? BIT(31) : 0;
+ r[15] |= (u32)(pr->meter_data) << 23;
+
+ r[15] |= ((u32)(pr->ivid_act) << 21) & 0x3;
+ r[15] |= ((u32)(pr->ivid_data) << 9) & 0xfff;
+ r[16] |= ((u32)(pr->ovid_act) << 30) & 0x3;
+ r[16] |= ((u32)(pr->ovid_data) & 0xfff) << 16;
+ r[16] |= (pr->mir_data & 0x3) << 6;
+ r[17] |= ((u32)(pr->tagst_data) & 0xf) << 28;
+ r[17] |= ((u32)(pr->nopri_data) & 0x7) << 25;
+ r[17] |= pr->bypass_ibc_sc ? BIT(16) : 0;
+}
+
+void rtl930x_pie_rule_dump_raw(u32 r[])
+{
+ pr_info("Raw IACL table entry:\n");
+ pr_info("r 0 - 7: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]);
+ pr_info("r 8 - 15: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]);
+ pr_info("r 16 - 18: %08x %08x %08x\n", r[16], r[17], r[18]);
+ pr_info("Match : %08x %08x %08x %08x %08x %08x\n", r[0], r[1], r[2], r[3], r[4], r[5]);
+ pr_info("Fixed : %06x\n", r[6] >> 8);
+ pr_info("Match M: %08x %08x %08x %08x %08x %08x\n",
+ (r[6] << 24) | (r[7] >> 8), (r[7] << 24) | (r[8] >> 8), (r[8] << 24) | (r[9] >> 8),
+ (r[9] << 24) | (r[10] >> 8), (r[10] << 24) | (r[11] >> 8),
+ (r[11] << 24) | (r[12] >> 8));
+ pr_info("R[13]: %08x\n", r[13]);
+ pr_info("Fixed M: %06x\n", ((r[12] << 16) | (r[13] >> 16)) & 0xffffff);
+ pr_info("Valid / not / and1 / and2 : %1x\n", (r[13] >> 12) & 0xf);
+ pr_info("r 13-16: %08x %08x %08x %08x\n", r[13], r[14], r[15], r[16]);
+}
+
+static int rtl930x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
+{
+ // Access IACL table (2) via register 0
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_0, 2);
+ u32 r[19];
+ int i;
+ int block = idx / PIE_BLOCK_SIZE;
+ u32 t_select = sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block));
+
+ pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select);
+
+ for (i = 0; i < 19; i++)
+ r[i] = 0;
+
+ if (!pr->valid) {
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+ return 0;
+ }
+ rtl930x_write_pie_fixed_fields(r, pr);
+
+ pr_info("%s: template %d\n", __func__, (t_select >> (pr->tid * 4)) & 0xf);
+ rtl930x_write_pie_templated(r, pr, fixed_templates[(t_select >> (pr->tid * 4)) & 0xf]);
+
+ rtl930x_write_pie_action(r, pr);
+
+ rtl930x_pie_rule_dump_raw(r);
+
+ for (i = 0; i < 19; i++)
+ sw_w32(r[i], rtl_table_data(q, i));
+
+ rtl_table_write(q, idx);
+ rtl_table_release(q);
+
+ return 0;
+}
+
+static bool rtl930x_pie_templ_has(int t, enum template_field_id field_type)
+{
+ int i;
+ enum template_field_id ft;
+
+ for (i = 0; i < N_FIXED_FIELDS; i++) {
+ ft = fixed_templates[t][i];
+ if (field_type == ft)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Verify that the rule pr is compatible with a given template t in block block
+ * Note that this function is SoC specific since the values of e.g. TEMPLATE_FIELD_SIP0
+ * depend on the SoC
+ */
+static int rtl930x_pie_verify_template(struct rtl838x_switch_priv *priv,
+ struct pie_rule *pr, int t, int block)
+{
+ int i;
+
+ if (!pr->is_ipv6 && pr->sip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP0))
+ return -1;
+
+ if (!pr->is_ipv6 && pr->dip_m && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP0))
+ return -1;
+
+ if (pr->is_ipv6) {
+ if ((pr->sip6_m.s6_addr32[0] || pr->sip6_m.s6_addr32[1]
+ || pr->sip6_m.s6_addr32[2] || pr->sip6_m.s6_addr32[3])
+ && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SIP2))
+ return -1;
+ if ((pr->dip6_m.s6_addr32[0] || pr->dip6_m.s6_addr32[1]
+ || pr->dip6_m.s6_addr32[2] || pr->dip6_m.s6_addr32[3])
+ && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DIP2))
+ return -1;
+ }
+
+ if (ether_addr_to_u64(pr->smac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_SMAC0))
+ return -1;
+
+ if (ether_addr_to_u64(pr->dmac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
+ return -1;
+
+ // TODO: Check more
+
+ i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
+
+ if (i >= PIE_BLOCK_SIZE)
+ return -1;
+
+ return i + PIE_BLOCK_SIZE * block;
+}
+
+static int rtl930x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx, block, j, t;
+ int min_block = 0;
+ int max_block = priv->n_pie_blocks / 2;
+
+ if (pr->is_egress) {
+ min_block = max_block;
+ max_block = priv->n_pie_blocks;
+ }
+ pr_info("In %s\n", __func__);
+
+ mutex_lock(&priv->pie_mutex);
+
+ for (block = min_block; block < max_block; block++) {
+ for (j = 0; j < 2; j++) {
+ t = (sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf;
+ pr_info("Testing block %d, template %d, template id %d\n", block, j, t);
+ pr_info("%s: %08x\n",
+ __func__, sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block)));
+ idx = rtl930x_pie_verify_template(priv, pr, t, block);
+ if (idx >= 0)
+ break;
+ }
+ if (j < 2)
+ break;
+ }
+
+ if (block >= priv->n_pie_blocks) {
+ mutex_unlock(&priv->pie_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("Using block: %d, index %d, template-id %d\n", block, idx, j);
+ set_bit(idx, priv->pie_use_bm);
+
+ pr->valid = true;
+ pr->tid = j; // Mapped to template number
+ pr->tid_m = 0x1;
+ pr->id = idx;
+
+ rtl930x_pie_lookup_enable(priv, idx);
+ rtl930x_pie_rule_write(priv, idx, pr);
+
+ mutex_unlock(&priv->pie_mutex);
+ return 0;
+}
+
+/*
+ * Delete a range of Packet Inspection Engine rules
+ */
+static int rtl930x_pie_rule_del(struct rtl838x_switch_priv *priv, int index_from, int index_to)
+{
+ u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0);
+
+ pr_info("%s: from %d to %d\n", __func__, index_from, index_to);
+ mutex_lock(&priv->reg_mutex);
+
+ // Write from-to and execute bit into control register
+ sw_w32(v, RTL930X_PIE_CLR_CTRL);
+
+ // Wait until command has completed
+ do {
+ } while (sw_r32(RTL930X_PIE_CLR_CTRL) & BIT(0));
+
+ mutex_unlock(&priv->reg_mutex);
+ return 0;
+}
+
+static void rtl930x_pie_rule_rm(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
+{
+ int idx = pr->id;
+
+ rtl930x_pie_rule_del(priv, idx, idx);
+ clear_bit(idx, priv->pie_use_bm);
+}
+
+static void rtl930x_pie_init(struct rtl838x_switch_priv *priv)
+{
+ int i;
+ u32 template_selectors;
+
+ mutex_init(&priv->pie_mutex);
+
+ pr_info("%s\n", __func__);
+ // Enable ACL lookup on all ports, including CPU_PORT
+ for (i = 0; i <= priv->cpu_port; i++)
+ sw_w32(1, RTL930X_ACL_PORT_LOOKUP_CTRL(i));
+
+ // Include IPG in metering
+ sw_w32_mask(0, 1, RTL930X_METER_GLB_CTRL);
+
+ // Delete all present rules, block size is 128 on all SoC families
+ rtl930x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1);
+
+ // Assign blocks 0-7 to VACL phase (bit = 0), blocks 8-15 to IACL (bit = 1)
+ sw_w32(0xff00, RTL930X_PIE_BLK_PHASE_CTRL);
+
+ // Enable predefined templates 0, 1 for first quarter of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = 0; i < priv->n_pie_blocks / 4; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for second quarter of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 0, 1 for third half of all blocks
+ template_selectors = 0 | (1 << 4);
+ for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+ // Enable predefined templates 2, 3 for fourth quater of all blocks
+ template_selectors = 2 | (3 << 4);
+ for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
+ sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
+
+}
+
+/*
+ * Sets up an egress interface for L3 actions
+ * Actions for ip4/6_icmp_redirect, ip4/6_pbr_icmp_redirect are:
+ * 0: FORWARD, 1: DROP, 2: TRAP2CPU, 3: COPY2CPU, 4: TRAP2MASTERCPU 5: COPY2MASTERCPU
+ * 6: HARDDROP
+ * idx is the index in the HW interface table: idx < 0x80
+ */
+static void rtl930x_set_l3_egress_intf(int idx, struct rtl838x_l3_intf *intf)
+{
+ u32 u, v;
+ // Read L3_EGR_INTF table (4) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 4);
+
+ // The table has 2 registers
+ u = (intf->vid & 0xfff) << 9;
+ u |= (intf->smac_idx & 0x3f) << 3;
+ u |= (intf->ip4_mtu_id & 0x7);
+
+ v = (intf->ip6_mtu_id & 0x7) << 28;
+ v |= (intf->ttl_scope & 0xff) << 20;
+ v |= (intf->hl_scope & 0xff) << 12;
+ v |= (intf->ip4_icmp_redirect & 0x7) << 9;
+ v |= (intf->ip6_icmp_redirect & 0x7)<< 6;
+ v |= (intf->ip4_pbr_icmp_redirect & 0x7) << 3;
+ v |= (intf->ip6_pbr_icmp_redirect & 0x7);
+
+ sw_w32(u, rtl_table_data(r, 0));
+ sw_w32(v, rtl_table_data(r, 1));
+
+ pr_info("%s writing to index %d: %08x %08x\n", __func__, idx, u, v);
+ rtl_table_write(r, idx & 0x7f);
+ rtl_table_release(r);
+}
+
+/*
+ * Reads a MAC entry for L3 termination as entry point for routing
+ * from the hardware table
+ * idx is the index into the L3_ROUTER_MAC table
+ */
+static void rtl930x_get_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
+{
+ u32 v, w;
+ // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 7 registers, 64 entries
+ v = sw_r32(rtl_table_data(r, 0));
+ w = sw_r32(rtl_table_data(r, 3));
+ m->valid = !!(v & BIT(20));
+ if (!m->valid)
+ goto out;
+
+ m->p_type = !!(v & BIT(19));
+ m->p_id = (v >> 13) & 0x3f; // trunk id of port
+ m->vid = v & 0xfff;
+ m->vid_mask = w & 0xfff;
+ m->action = sw_r32(rtl_table_data(r, 6)) & 0x7;
+ m->mac_mask = ((((u64)sw_r32(rtl_table_data(r, 5))) << 32) & 0xffffffffffffULL)
+ | (sw_r32(rtl_table_data(r, 4)));
+ m->mac = ((((u64)sw_r32(rtl_table_data(r, 1))) << 32) & 0xffffffffffffULL)
+ | (sw_r32(rtl_table_data(r, 2)));
+ // Bits L3_INTF and BMSK_L3_INTF are 0
+
+out:
+ rtl_table_release(r);
+}
+
+/*
+ * Writes a MAC entry for L3 termination as entry point for routing
+ * into the hardware table
+ * idx is the index into the L3_ROUTER_MAC table
+ */
+static void rtl930x_set_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
+{
+ u32 v, w;
+ // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
+
+ // The table has a size of 7 registers, 64 entries
+ v = BIT(20); // mac entry valid, port type is 0: individual
+ v |= (m->p_id & 0x3f) << 13;
+ v |= (m->vid & 0xfff); // Set the interface_id to the vlan id
+
+ w = m->vid_mask;
+ w |= (m->p_id_mask & 0x3f) << 13;
+
+ sw_w32(v, rtl_table_data(r, 0));
+ sw_w32(w, rtl_table_data(r, 3));
+
+ // Set MAC address, L3_INTF (bit 12 in register 1) needs to be 0
+ sw_w32((u32)(m->mac), rtl_table_data(r, 2));
+ sw_w32(m->mac >> 32, rtl_table_data(r, 1));
+
+ // Set MAC address mask, BMSK_L3_INTF (bit 12 in register 5) needs to be 0
+ sw_w32((u32)(m->mac_mask >> 32), rtl_table_data(r, 4));
+ sw_w32((u32)m->mac_mask, rtl_table_data(r, 5));
+
+ sw_w32(m->action & 0x7, rtl_table_data(r, 6));
+
+ pr_info("%s writing index %d: %08x %08x %08x %08x %08x %08x %08x\n", __func__, idx,
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
+ sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)), sw_r32(rtl_table_data(r, 5)),
+ sw_r32(rtl_table_data(r, 6))
+ );
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+/*
+ * Get the Destination-MAC of an L3 egress interface or the Source MAC for routed packets
+ * from the SoC's L3_EGR_INTF_MAC table
+ * Indexes 0-2047 are DMACs, 2048+ are SMACs
+ */
+static u64 rtl930x_get_l3_egress_mac(u32 idx)
+{
+ u64 mac;
+ // Read L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
+
+ rtl_table_read(r, idx);
+ // The table has a size of 2 registers
+ mac = sw_r32(rtl_table_data(r, 0));
+ mac <<= 32;
+ mac |= sw_r32(rtl_table_data(r, 1));
+ rtl_table_release(r);
+
+ return mac;
+}
+/*
+ * Set the Destination-MAC of a route or the Source MAC of an L3 egress interface
+ * in the SoC's L3_EGR_INTF_MAC table
+ * Indexes 0-2047 are DMACs, 2048+ are SMACs
+ */
+static void rtl930x_set_l3_egress_mac(u32 idx, u64 mac)
+{
+ // Access L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
+
+ // The table has a size of 2 registers
+ sw_w32(mac >> 32, rtl_table_data(r, 0));
+ sw_w32(mac, rtl_table_data(r, 1));
+
+ pr_debug("%s: setting index %d to %016llx\n", __func__, idx, mac);
+ rtl_table_write(r, idx);
+ rtl_table_release(r);
+}
+
+/*
+ * Configure L3 routing settings of the device:
+ * - MTUs
+ * - Egress interface
+ * - The router's MAC address on which routed packets are expected
+ * - MAC addresses used as source macs of routed packets
+ */
+int rtl930x_l3_setup(struct rtl838x_switch_priv *priv)
+{
+ int i;
+
+ // Setup MTU with id 0 for default interface
+ for (i = 0; i < MAX_INTF_MTUS; i++)
+ priv->intf_mtu_count[i] = priv->intf_mtus[i] = 0;
+
+ priv->intf_mtu_count[0] = 0; // Needs to stay forever
+ priv->intf_mtus[0] = DEFAULT_MTU;
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(0));
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(0));
+ priv->intf_mtus[1] = DEFAULT_MTU;
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(0));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(0));
+
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(1));
+ sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(1));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(1));
+ sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(1));
+
+ // Clear all source port MACs
+ for (i = 0; i < MAX_SMACS; i++)
+ rtl930x_set_l3_egress_mac(L3_EGRESS_DMACS + i, 0ULL);
+
+ // Configure the default L3 hash algorithm
+ sw_w32_mask(BIT(2), 0, RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 0 = 0
+ sw_w32_mask(0, BIT(3), RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 1 = 1
+
+ pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n",
+ sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
+ sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL));
+ sw_w32_mask(0, 1, RTL930X_L3_IPUC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IP6UC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IPMC_ROUTE_CTRL);
+ sw_w32_mask(0, 1, RTL930X_L3_IP6MC_ROUTE_CTRL);
+
+ sw_w32(0x00002001, RTL930X_L3_IPUC_ROUTE_CTRL);
+ sw_w32(0x00014581, RTL930X_L3_IP6UC_ROUTE_CTRL);
+ sw_w32(0x00000501, RTL930X_L3_IPMC_ROUTE_CTRL);
+ sw_w32(0x00012881, RTL930X_L3_IP6MC_ROUTE_CTRL);
+
+ pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n",
+ sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
+ sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL));
+
+ // Trap non-ip traffic to the CPU-port (e.g. ARP so we stay reachable)
+ sw_w32_mask(0x3 << 8, 0x1 << 8, RTL930X_L3_IP_ROUTE_CTRL);
+ pr_info("L3_IP_ROUTE_CTRL %08x\n", sw_r32(RTL930X_L3_IP_ROUTE_CTRL));
+
+ // PORT_ISO_RESTRICT_ROUTE_CTRL ?
+
+ // Do not use prefix route 0 because of HW limitations
+ set_bit(0, priv->route_use_bm);
+
+ return 0;
+}
+
+static u32 rtl930x_packet_cntr_read(int counter)
+{
+ u32 v;
+
+ // Read LOG table (3) via register RTL9300_TBL_0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ rtl_table_read(r, counter / 2);
+
+ pr_info("Registers: %08x %08x\n",
+ sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
+ // The table has a size of 2 registers
+ if (counter % 2)
+ v = sw_r32(rtl_table_data(r, 0));
+ else
+ v = sw_r32(rtl_table_data(r, 1));
+
+ rtl_table_release(r);
+
+ return v;
+}
+
+static void rtl930x_packet_cntr_clear(int counter)
+{
+ // Access LOG table (3) via register RTL9300_TBL_0
+ struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
+
+ pr_info("In %s, id %d\n", __func__, counter);
+ // The table has a size of 2 registers
+ if (counter % 2)
+ sw_w32(0, rtl_table_data(r, 0));
+ else
+ sw_w32(0, rtl_table_data(r, 1));
+
+ rtl_table_write(r, counter / 2);
+
+ rtl_table_release(r);
+}
+
+void rtl930x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ u32 l3shift = 0;
+ u32 newmask = 0;
+ /* unless we clarified how the algo index is configured, we set it to 0 */
+ algoidx=0;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (l3shift == 4)
+ {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT;
+ }
+ } else {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT;
+ }
+ }
+ sw_w32(newmask << l3shift, RTL930X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl930x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ u32 value = 0;
+
+ switch(action) {
+ case FORWARD:
+ value = 0;
+ break;
+ case DROP:
+ value = 1;
+ break;
+ case TRAP2CPU:
+ value = 2;
+ break;
+ case TRAP2MASTERCPU:
+ value = 3;
+ break;
+ case FLOODALL:
+ value = 4;
+ break;
+ }
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL930X_RMA_BPDU_CTRL + ((port / 10) << 2));
+ break;
+ case PTP:
+ //udp
+ sw_w32_mask(3 << 2, value << 2, RTL930X_RMA_PTP_CTRL + (port << 2));
+ //eth2
+ sw_w32_mask(3, value, RTL930X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_UDP:
+ sw_w32_mask(3 << 2, value << 2, RTL930X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_ETH2:
+ sw_w32_mask(3, value, RTL930X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL930X_RMA_LLTP_CTRL + ((port / 10) << 2));
+ break;
+ case EAPOL:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL930X_RMA_EAPOL_CTRL + ((port / 10) << 2));
+ break;
+ default:
+ break;
+ }
+}
+
const struct rtl838x_reg rtl930x_reg = {
.mask_port_reg_be = rtl838x_mask_port_reg,
.set_port_reg_be = rtl838x_set_port_reg,
@@ -582,6 +2446,8 @@ const struct rtl838x_reg rtl930x_reg = {
.vlan_set_tagged = rtl930x_vlan_set_tagged,
.vlan_set_untagged = rtl930x_vlan_set_untagged,
.vlan_profile_dump = rtl930x_vlan_profile_dump,
+ .vlan_profile_setup = rtl930x_vlan_profile_setup,
+ .vlan_fwd_on_inner = rtl930x_vlan_fwd_on_inner,
.stp_get = rtl930x_stp_get,
.stp_set = rtl930x_stp_set,
.mac_force_mode_ctrl = rtl930x_mac_force_mode_ctrl,
@@ -597,11 +2463,62 @@ const struct rtl838x_reg rtl930x_reg = {
.mac_rx_pause_sts = RTL930X_MAC_RX_PAUSE_STS,
.mac_tx_pause_sts = RTL930X_MAC_TX_PAUSE_STS,
.read_l2_entry_using_hash = rtl930x_read_l2_entry_using_hash,
+ .write_l2_entry_using_hash = rtl930x_write_l2_entry_using_hash,
.read_cam = rtl930x_read_cam,
+ .write_cam = rtl930x_write_cam,
.vlan_port_egr_filter = RTL930X_VLAN_PORT_EGR_FLTR,
- .vlan_port_igr_filter = RTL930X_VLAN_PORT_IGR_FLTR(0),
+ .vlan_port_igr_filter = RTL930X_VLAN_PORT_IGR_FLTR,
.vlan_port_pb = RTL930X_VLAN_PORT_PB_VLAN,
.vlan_port_tag_sts_ctrl = RTL930X_VLAN_PORT_TAG_STS_CTRL,
.trk_mbr_ctr = rtl930x_trk_mbr_ctr,
.rma_bpdu_fld_pmask = RTL930X_RMA_BPDU_FLD_PMSK,
+ .init_eee = rtl930x_init_eee,
+ .port_eee_set = rtl930x_port_eee_set,
+ .eee_port_ability = rtl930x_eee_port_ability,
+ .l2_hash_seed = rtl930x_l2_hash_seed,
+ .l2_hash_key = rtl930x_l2_hash_key,
+ .read_mcast_pmask = rtl930x_read_mcast_pmask,
+ .write_mcast_pmask = rtl930x_write_mcast_pmask,
+ .l2_learning_setup = rtl930x_l2_learning_setup,
+ .pie_init = rtl930x_pie_init,
+ .pie_rule_write = rtl930x_pie_rule_write,
+ .pie_rule_add = rtl930x_pie_rule_add,
+ .pie_rule_rm = rtl930x_pie_rule_rm,
+ .l2_learning_setup = rtl930x_l2_learning_setup,
+ .route_read = rtl930x_route_read,
+ .route_write = rtl930x_route_write,
+ .host_route_write = rtl930x_host_route_write,
+ .l3_setup = rtl930x_l3_setup,
+ .set_l3_nexthop = rtl930x_set_l3_nexthop,
+ .get_l3_nexthop = rtl930x_get_l3_nexthop,
+ .get_l3_egress_mac = rtl930x_get_l3_egress_mac,
+ .set_l3_egress_mac = rtl930x_set_l3_egress_mac,
+ .find_l3_slot = rtl930x_find_l3_slot,
+ .route_lookup_hw = rtl930x_route_lookup_hw,
+ .get_l3_router_mac = rtl930x_get_l3_router_mac,
+ .set_l3_router_mac = rtl930x_set_l3_router_mac,
+ .set_l3_egress_intf = rtl930x_set_l3_egress_intf,
+ .packet_cntr_read = rtl930x_packet_cntr_read,
+ .packet_cntr_clear = rtl930x_packet_cntr_clear,
+ .rma_bpdu_ctrl = RTL930X_RMA_BPDU_CTRL,
+ .rma_ptp_ctrl = RTL930X_RMA_PTP_CTRL,
+ .rma_lltp_ctrl = RTL930X_RMA_LLTP_CTRL,
+ .rma_eapol_ctrl = RTL930X_RMA_EAPOL_CTRL,
+ .rma_bpdu_ctrl_div = 10,
+ .rma_ptp_ctrl_div = 1,
+ .rma_lltp_ctrl_div = 10,
+ .rma_eapol_ctrl_div = 10,
+ .storm_ctrl_port_uc = RTL930X_STORM_CTRL_PORT_UC_0(0),
+ .storm_ctrl_port_bc = RTL930X_STORM_CTRL_PORT_BC_0(0),
+ .storm_ctrl_port_mc = RTL930X_STORM_CTRL_PORT_MC_0(0),
+ .storm_ctrl_port_uc_shift = 3,
+ .storm_ctrl_port_bc_shift = 3,
+ .storm_ctrl_port_mc_shift = 3,
+ .vlan_ctrl = RTL930X_VLAN_CTRL,
+ .sflow_ctrl = RTL930X_SFLOW_CTRL,
+ .sflow_port_rate_ctrl = RTL930X_SFLOW_PORT_RATE_CTRL,
+ .trk_hash_ctrl = RTL930X_TRK_HASH_CTRL,
+// .trk_hash_idx_ctrl = RTL930X_TRK_HASH_IDX_CTRL,
+ .set_distribution_algorithm = rtl930x_set_distribution_algorithm,
+ .set_receive_management_action = rtl930x_set_receive_management_action,
};
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl931x.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl931x.c
index a33941a0eb..9e0dee79a5 100644
--- a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl931x.c
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/rtl931x.c
@@ -175,6 +175,7 @@ static u64 rtl931x_read_cam(int idx, struct rtl838x_l2_entry *e)
// TODO: Implement
return entry;
}
+
irqreturn_t rtl931x_switch_irq(int irq, void *dev_id)
{
struct dsa_switch *ds = dev_id;
@@ -199,7 +200,6 @@ irqreturn_t rtl931x_switch_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val)
{
u32 v;
@@ -264,6 +264,73 @@ int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val)
return 0;
}
+/*
+ * Read an mmd register of the PHY
+ */
+int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ int err = 0;
+ u32 v;
+ int type = 1; // TODO: For C45 PHYs need to set to 2
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access via port-number
+ sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
+
+ v = type << 2 | BIT(0); // MMD-access-type | EXEC
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+ } while (v & BIT(0));
+
+ // There is no error-checking via BIT 1 of v, as it does not seem to be set correctly
+
+ *val = (sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_3) & 0xffff);
+
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
+
+ mutex_unlock(&smi_lock);
+
+ return err;
+}
+
+/*
+ * Write to an mmd register of the PHY
+ */
+int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 val)
+{
+ int err = 0;
+ u32 v;
+ int type = 1; // TODO: For C45 PHYs need to set to 2
+
+ mutex_lock(&smi_lock);
+
+ // Set PHY to access via port-number
+ sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL);
+
+ // Set data to write
+ sw_w32_mask(0xffff << 16, val << 16, RTL931X_SMI_INDRT_ACCESS_CTRL_3);
+
+ // Set MMD device number and register to write to
+ sw_w32(devnum << 16 | (regnum & 0xffff), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
+
+ v = BIT(4) | type << 2 | BIT(0); // WRITE | MMD-access-type | EXEC
+ sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+
+ do {
+ v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
+ } while (v & BIT(0));
+
+ pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, val, err);
+ mutex_unlock(&smi_lock);
+ return err;
+}
+
void rtl931x_print_matrix(void)
{
volatile u64 *ptr = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0);
@@ -275,6 +342,103 @@ void rtl931x_print_matrix(void)
pr_info("CPU_PORT> %16llx\n", ptr[52]);
}
+void rtl931x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
+{
+ u32 l3shift = 0;
+ u32 newmask = 0;
+ /* unless we clarified how the algo index is configured, we set it to 0 */
+ algoidx=0;
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DIP_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT) {
+ l3shift = 4;
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT;
+ }
+ if (l3shift == 4)
+ {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT;
+ }
+ } else {
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_SMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT;
+ }
+ if (algomsk & TRUNK_DISTRIBUTION_ALGO_DMAC_BIT) {
+ newmask |= TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT;
+ }
+ }
+ sw_w32(newmask << l3shift, RTL931X_TRK_HASH_CTRL + (algoidx << 2));
+}
+
+void rtl931x_set_receive_management_action(int port, rma_ctrl_t type, action_type_t action)
+{
+ u32 value = 0;
+
+ /* hack for value mapping */
+ if (type == GRATARP && action == COPY2CPU)
+ action = TRAP2MASTERCPU;
+
+ switch(action) {
+ case FORWARD:
+ value = 0;
+ break;
+ case DROP:
+ value = 1;
+ break;
+ case TRAP2CPU:
+ value = 2;
+ break;
+ case TRAP2MASTERCPU:
+ value = 3;
+ break;
+ case FLOODALL:
+ value = 4;
+ break;
+ default:
+ break;
+ }
+
+ switch(type) {
+ case BPDU:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_BPDU_CTRL + ((port / 10) << 2));
+ break;
+ case PTP:
+ //udp
+ sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2));
+ //eth2
+ sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_UDP:
+ sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case PTP_ETH2:
+ sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2));
+ break;
+ case LLTP:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_LLTP_CTRL + ((port / 10) << 2));
+ break;
+ case EAPOL:
+ sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_EAPOL_CTRL + ((port / 10) << 2));
+ break;
+ case GRATARP:
+ sw_w32_mask(3 << ((port & 0xf) << 1), value << ((port & 0xf) << 1), RTL931X_TRAP_ARP_GRAT_PORT_ACT + ((port >> 4) << 2));
+ break;
+ }
+}
+
const struct rtl838x_reg rtl931x_reg = {
.mask_port_reg_be = rtl839x_mask_port_reg_be,
.set_port_reg_be = rtl839x_set_port_reg_be,
@@ -317,10 +481,31 @@ const struct rtl838x_reg rtl931x_reg = {
.mac_tx_pause_sts = RTL931X_MAC_TX_PAUSE_STS,
.read_l2_entry_using_hash = rtl931x_read_l2_entry_using_hash,
.read_cam = rtl931x_read_cam,
- .vlan_port_egr_filter = RTL931X_VLAN_PORT_EGR_FLTR(0),
- .vlan_port_igr_filter = RTL931X_VLAN_PORT_IGR_FLTR(0),
+ .vlan_port_egr_filter = RTL931X_VLAN_PORT_EGR_FLTR,
+ .vlan_port_igr_filter = RTL931X_VLAN_PORT_IGR_FLTR,
// .vlan_port_pb = does not exist
.vlan_port_tag_sts_ctrl = RTL931X_VLAN_PORT_TAG_CTRL,
.trk_mbr_ctr = rtl931x_trk_mbr_ctr,
+ .rma_bpdu_ctrl = RTL931X_RMA_BPDU_CTRL,
+ .rma_ptp_ctrl = RTL931X_RMA_PTP_CTRL,
+ .rma_lltp_ctrl = RTL931X_RMA_LLTP_CTRL,
+ .rma_eapol_ctrl = RTL931X_RMA_EAPOL_CTRL,
+ .rma_bpdu_ctrl_div = 10,
+ .rma_ptp_ctrl_div = 1,
+ .rma_lltp_ctrl_div = 10,
+ .rma_eapol_ctrl_div = 10,
+ .storm_ctrl_port_uc = RTL931X_STORM_CTRL_PORT_UC_0(0),
+ .storm_ctrl_port_bc = RTL931X_STORM_CTRL_PORT_BC_0(0),
+ .storm_ctrl_port_mc = RTL931X_STORM_CTRL_PORT_MC_0(0),
+ .storm_ctrl_port_uc_shift = 3,
+ .storm_ctrl_port_bc_shift = 3,
+ .storm_ctrl_port_mc_shift = 3,
+ .vlan_ctrl = RTL931X_VLAN_CTRL,
+ .sflow_ctrl = RTL931X_SFLOW_CTRL,
+ .sflow_port_rate_ctrl = RTL931X_SFLOW_PORT_RATE_CTRL,
+ .trk_hash_ctrl = RTL931X_TRK_HASH_CTRL,
+// .trk_hash_idx_ctrl = RTL931X_TRK_HASH_IDX_CTRL,
+ .set_distribution_algorithm = rtl931x_set_distribution_algorithm,
+ .set_receive_management_action = rtl931x_set_receive_management_action,
};
diff --git a/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/tc.c b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/tc.c
new file mode 100644
index 0000000000..eeba29231a
--- /dev/null
+++ b/target/linux/realtek/files-5.4/drivers/net/dsa/rtl83xx/tc.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/dsa.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include <asm/mach-rtl838x/mach-rtl83xx.h>
+#include "rtl83xx.h"
+
+/*
+ * Parse the flow rule for the matching conditions
+ */
+static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv,
+ struct flow_rule *rule, struct rtl83xx_flow *flow)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ pr_info("In %s\n", __func__);
+ /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
+ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
+ (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
+ pr_info("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ pr_info("%s: BASIC\n", __func__);
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto == htons(ETH_P_ARP))
+ flow->rule.frame_type = 0;
+ if (match.key->n_proto == htons(ETH_P_IP))
+ flow->rule.frame_type = 2;
+ if (match.key->n_proto == htons(ETH_P_IPV6))
+ flow->rule.frame_type = 3;
+ if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type)
+ flow->rule.frame_type_m = 3;
+ if (flow->rule.frame_type >= 2) {
+ if (match.key->ip_proto == IPPROTO_UDP)
+ flow->rule.frame_type_l4 = 0;
+ if (match.key->ip_proto == IPPROTO_TCP)
+ flow->rule.frame_type_l4 = 1;
+ if (match.key->ip_proto == IPPROTO_ICMP
+ || match.key->ip_proto ==IPPROTO_ICMPV6)
+ flow->rule.frame_type_l4 = 2;
+ if (match.key->ip_proto == IPPROTO_TCP)
+ flow->rule.frame_type_l4 = 3;
+ if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4)
+ flow->rule.frame_type_l4_m = 7;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ pr_info("%s: ETH_ADDR\n", __func__);
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(flow->rule.dmac, match.key->dst);
+ ether_addr_copy(flow->rule.dmac_m, match.mask->dst);
+ ether_addr_copy(flow->rule.smac, match.key->src);
+ ether_addr_copy(flow->rule.smac_m, match.mask->src);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ pr_info("%s: VLAN\n", __func__);
+ flow_rule_match_vlan(rule, &match);
+ flow->rule.itag = match.key->vlan_id;
+ flow->rule.itag_m = match.mask->vlan_id;
+ // TODO: What about match.key->vlan_priority ?
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ pr_info("%s: IPV4\n", __func__);
+ flow_rule_match_ipv4_addrs(rule, &match);
+ flow->rule.is_ipv6 = false;
+ flow->rule.dip = match.key->dst;
+ flow->rule.dip_m = match.mask->dst;
+ flow->rule.sip = match.key->src;
+ flow->rule.sip_m = match.mask->src;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ pr_info("%s: IPV6\n", __func__);
+ flow->rule.is_ipv6 = true;
+ flow_rule_match_ipv6_addrs(rule, &match);
+ flow->rule.dip6 = match.key->dst;
+ flow->rule.dip6_m = match.mask->dst;
+ flow->rule.sip6 = match.key->src;
+ flow->rule.sip6_m = match.mask->src;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ pr_info("%s: PORTS\n", __func__);
+ flow_rule_match_ports(rule, &match);
+ flow->rule.dport = match.key->dst;
+ flow->rule.dport_m = match.mask->dst;
+ flow->rule.sport = match.key->src;
+ flow->rule.sport_m = match.mask->src;
+ }
+
+ // TODO: IPv6, ICMP
+ return 0;
+}
+
+static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow)
+{
+ flow->rule.bypass_sel = true;
+ flow->rule.bypass_all = true;
+ flow->rule.bypass_igr_stp = true;
+ flow->rule.bypass_ibc_sc = true;
+}
+
+static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv,
+ const struct flow_action_entry *act, struct rtl83xx_flow *flow)
+{
+ struct net_device *dev = act->dev;
+ int port;
+
+ port = rtl83xx_port_is_under(dev, priv);
+ if (port < 0) {
+ netdev_info(dev, "%s: not a DSA device.\n", __func__);
+ return -EINVAL;
+ }
+
+ flow->rule.fwd_sel = true;
+ flow->rule.fwd_data = port;
+ pr_info("Using port index: %d\n", port);
+ rtl83xx_flow_bypass_all(flow);
+
+ pr_info("%s: data: %04x\n", __func__, flow->rule.fwd_data);
+ return 0;
+}
+
+static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f,
+ struct rtl83xx_flow *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ const struct flow_action_entry *act;
+ int i, err;
+
+ pr_info("%s\n", __func__);
+
+ rtl83xx_parse_flow_rule(priv, rule, flow);
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ pr_info("%s: DROP\n", __func__);
+ flow->rule.drop = true;
+ rtl83xx_flow_bypass_all(flow);
+ return 0;
+
+ case FLOW_ACTION_TRAP:
+ pr_info("%s: TRAP\n", __func__);
+ flow->rule.fwd_data = priv->cpu_port;
+ flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
+ rtl83xx_flow_bypass_all(flow);
+ break;
+
+ case FLOW_ACTION_MANGLE:
+ pr_info("%s: MANGLE\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_ADD:
+ pr_info("%s: ADD\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_VLAN_PUSH:
+ pr_info("%s: VLAN_PUSH\n", __func__);
+// TODO: act->vlan.proto
+ flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ivid_sel = true;
+ flow->rule.ivid_data = htons(act->vlan.vid);
+ flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ovid_sel = true;
+ flow->rule.ovid_data = htons(act->vlan.vid);
+ flow->rule.fwd_mod_to_cpu = true;
+ break;
+
+ case FLOW_ACTION_VLAN_POP:
+ pr_info("%s: VLAN_POP\n", __func__);
+ flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ivid_data = 0;
+ flow->rule.ivid_sel = true;
+ flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
+ flow->rule.ovid_data = 0;
+ flow->rule.ovid_sel = true;
+ flow->rule.fwd_mod_to_cpu = true;
+ break;
+
+ case FLOW_ACTION_CSUM:
+ pr_info("%s: CSUM\n", __func__);
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_REDIRECT:
+ pr_info("%s: REDIRECT\n", __func__);
+ err = rtl83xx_parse_fwd(priv, act, flow);
+ if (err)
+ return err;
+ flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
+ break;
+
+ case FLOW_ACTION_MIRRED:
+ pr_info("%s: MIRRED\n", __func__);
+ err = rtl83xx_parse_fwd(priv, act, flow);
+ if (err)
+ return err;
+ flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT;
+ break;
+
+ default:
+ pr_info("%s: Flow action not supported: %d\n", __func__, act->id);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct rhashtable_params tc_ht_params = {
+ .head_offset = offsetof(struct rtl83xx_flow, node),
+ .key_offset = offsetof(struct rtl83xx_flow, cookie),
+ .key_len = sizeof(((struct rtl83xx_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct rtl83xx_flow *flow;
+ int err = 0;
+
+ pr_info("In %s\n", __func__);
+
+ rcu_read_lock();
+ pr_info("Cookie %08lx\n", f->cookie);
+ flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ pr_info("%s: Got flow\n", __func__);
+ err = -EEXIST;
+ goto rcu_unlock;
+ }
+
+rcu_unlock:
+ rcu_read_unlock();
+ if (flow)
+ goto out;
+ pr_info("%s: New flow\n", __func__);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ flow->cookie = f->cookie;
+ flow->priv = priv;
+
+ err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params);
+ if (err) {
+ pr_err("Could not insert add new rule\n");
+ goto out_free;
+ }
+
+ rtl83xx_add_flow(priv, f, flow); // TODO: check error
+
+ // Add log action to flow
+ flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
+ if (flow->rule.packet_cntr >= 0) {
+ pr_info("Using packet counter %d\n", flow->rule.packet_cntr);
+ flow->rule.log_sel = true;
+ // BUG: Fix for other SoCs than 8380
+ flow->rule.log_data = flow->rule.packet_cntr;
+ }
+
+ return priv->r->pie_rule_add(priv, &flow->rule);
+out_free:
+ kfree(flow);
+out:
+ return err;
+}
+
+static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload * cls_flower)
+{
+ struct rtl83xx_flow *flow;
+
+ pr_info("In %s\n", __func__);
+ flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
+ if (!flow)
+ return -EINVAL;
+
+ priv->r->pie_rule_rm(priv, &flow->rule);
+
+ // TODO: kfree
+ // TODO: Unlink entry from hash-table, free rule-allocation
+
+ return 0;
+}
+
+static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload * cls_flower)
+{
+ struct rtl83xx_flow *flow;
+ unsigned long lastused = 0;
+ int total_packets, new_packets;
+
+ pr_info("In %s\n", __func__);
+
+ flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
+ if (!flow)
+ return -1;
+
+ if (flow->rule.packet_cntr >= 0) {
+ total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr);
+ pr_info("Total packets: %d\n", total_packets);
+ new_packets = total_packets - flow->rule.last_packet_cnt;
+ flow->rule.last_packet_cnt = total_packets;
+ }
+
+ // TODO: We need a second PIE rule to count the bytes
+ flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, lastused);
+ return 0;
+}
+
+
+static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ pr_info("%s: %d\n", __func__, cls_flower->command);
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return rtl83xx_configure_flower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return rtl83xx_delete_flower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return rtl83xx_stats_flower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct rtl838x_switch_priv *priv = cb_priv;
+
+ pr_info("%s: %d\n", __func__, type);
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ pr_info("%s: TC_SETUP_CLSFLOWER\n", __func__);
+ return rtl83xx_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(rtl83xx_block_cb_list);
+
+int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+ struct rtl838x_switch_priv *priv;
+ struct flow_block_offload *f = type_data;
+ static bool first_time = true;
+ int err;
+
+ pr_info("%s: %d\n", __func__, type);
+
+ if(!netdev_uses_dsa(dev)) {
+ pr_info("%s: no DSA\n", __func__);
+ return 0;
+ }
+ priv = dev->dsa_ptr->ds->priv;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ pr_info("%s: setting up CB\n", __func__);
+
+ if (first_time) {
+ pr_info("Initializing rhash\n");
+ first_time = false;
+ err = rhashtable_init(&priv->tc_ht, &tc_ht_params);
+ if (err)
+ pr_info("THAT DID NOT GO WELL\n");
+ }
+
+ f->unlocked_driver_cb = true;
+ return flow_block_cb_setup_simple(type_data,
+ &rtl83xx_block_cb_list,
+ rtl83xx_setup_tc_block_cb,
+ priv, priv, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.c b/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.c
index 2f678b7384..77438247ec 100644
--- a/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.c
+++ b/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.c
@@ -34,11 +34,11 @@ extern struct rtl83xx_soc_info soc_info;
* the memory used for the ring buffer.
*/
#define MAX_RXRINGS 32
-#define MAX_RXLEN 100
-#define MAX_ENTRIES (200 * 8)
+#define MAX_RXLEN 300
+#define MAX_ENTRIES (300 * 8)
#define TXRINGS 2
// BUG: TXRINGLEN can be 160
-#define TXRINGLEN 16
+#define TXRINGLEN 160
#define NOTIFY_EVENTS 10
#define NOTIFY_BLOCKS 10
#define TX_EN 0x8
@@ -47,12 +47,18 @@ extern struct rtl83xx_soc_info soc_info;
#define RX_EN_93XX 0x10
#define TX_DO 0x2
#define WRAP 0x2
+#define MAX_PORTS 57
+#define MAX_SMI_BUSSES 4
#define RING_BUFFER 1600
#define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
#define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
#define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
+
+#define RTL839X_STORM_CTRL_PORT_BC_EXCEED (0x180C)
+#define RTL839X_STORM_CTRL_PORT_MC_EXCEED (0x1814)
+#define RTL839X_STORM_CTRL_PORT_UC_EXCEED (0x181C)
#define RTL838X_ATK_PRVNT_STS (0x5B1C)
struct p_hdr {
@@ -95,7 +101,7 @@ struct notify_b {
u32 reserved2[8];
};
-void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
+static void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
{
prio &= 0x7;
@@ -112,7 +118,7 @@ void rtl838x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
}
}
-void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
+static void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
{
prio &= 0x7;
@@ -120,6 +126,7 @@ void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
// cpu_tag[0] is reserved on the RTL83XX SoCs
h->cpu_tag[1] = 0x0100;
h->cpu_tag[2] = h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
+ // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
if (dest_port >= 32) {
dest_port -= 32;
h->cpu_tag[2] = BIT(dest_port) >> 16;
@@ -128,35 +135,49 @@ void rtl839x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
h->cpu_tag[4] = BIT(dest_port) >> 16;
h->cpu_tag[5] = BIT(dest_port) & 0xffff;
}
- h->cpu_tag[6] |= BIT(21); // Enable destination port mask use
+ h->cpu_tag[2] |= BIT(5); // Enable destination port mask use
// Set internal priority and AS_PRIO
if (prio >= 0)
h->cpu_tag[1] |= prio | BIT(3);
}
}
-void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
+static void rtl930x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
{
h->cpu_tag[0] = 0x8000;
- h->cpu_tag[1] = 0; // TODO: Fill port and prio
- h->cpu_tag[2] = 0;
+ h->cpu_tag[1] = h->cpu_tag[2] = 0;
+ if (prio >= 0)
+ h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
h->cpu_tag[3] = 0;
h->cpu_tag[4] = 0;
h->cpu_tag[5] = 0;
- h->cpu_tag[6] = 0;
- h->cpu_tag[7] = 0xffff;
+ h->cpu_tag[6] = BIT(dest_port) >> 16;
+ h->cpu_tag[7] = BIT(dest_port) & 0xffff;
}
-void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
+static void rtl931x_create_tx_header(struct p_hdr *h, int dest_port, int prio)
{
h->cpu_tag[0] = 0x8000;
- h->cpu_tag[1] = 0; // TODO: Fill port and prio
- h->cpu_tag[2] = 0;
+ h->cpu_tag[1] = h->cpu_tag[2] = 0;
+ if (prio >= 0)
+ h->cpu_tag[2] = BIT(13) | prio << 8; // Enable and set Priority Queue
h->cpu_tag[3] = 0;
- h->cpu_tag[4] = 0;
- h->cpu_tag[5] = 0;
- h->cpu_tag[6] = 0;
- h->cpu_tag[7] = 0xffff;
+ h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
+ if (dest_port >= 32) {
+ dest_port -= 32;
+ h->cpu_tag[4] = BIT(dest_port) >> 16;
+ h->cpu_tag[5] = BIT(dest_port) & 0xffff;
+ } else {
+ h->cpu_tag[6] = BIT(dest_port) >> 16;
+ h->cpu_tag[7] = BIT(dest_port) & 0xffff;
+ }
+}
+
+static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
+{
+ h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
+ h->cpu_tag[2] |= (vlan >> 8) & 0xf;
+ h->cpu_tag[3] |= (vlan & 0xff) << 8;
}
struct rtl838x_rx_q {
@@ -181,6 +202,9 @@ struct rtl838x_eth_priv {
u32 lastEvent;
u16 rxrings;
u16 rxringlen;
+ u8 smi_bus[MAX_PORTS];
+ u8 smi_addr[MAX_PORTS];
+ bool smi_bus_isc45[MAX_SMI_BUSSES];
};
extern int rtl838x_phy_init(struct rtl838x_eth_priv *priv);
@@ -234,57 +258,71 @@ struct dsa_tag {
u16 port;
u8 l2_offloaded;
u8 prio;
+ bool crc_error;
};
bool rtl838x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
{
t->reason = h->cpu_tag[3] & 0xf;
- if (t->reason != 15)
- pr_debug("Reason: %d\n", t->reason);
t->queue = (h->cpu_tag[0] & 0xe0) >> 5;
+ t->port = h->cpu_tag[1] & 0x1f;
+ t->crc_error = t->reason == 13;
+
+ pr_debug("Reason: %d\n", t->reason);
if (t->reason != 4) // NIC_RX_REASON_SPECIAL_TRAP
t->l2_offloaded = 1;
else
t->l2_offloaded = 0;
- t->port = h->cpu_tag[1] & 0x1f;
return t->l2_offloaded;
}
bool rtl839x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
{
- t->reason = h->cpu_tag[4] & 0x1f;
- if (t->reason != 31)
+ t->reason = h->cpu_tag[5] & 0x1f;
+ t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
+ t->port = h->cpu_tag[1] & 0x3f;
+ t->crc_error = h->cpu_tag[3] & BIT(2);
+
pr_debug("Reason: %d\n", t->reason);
- t->queue = (h->cpu_tag[3] & 0xe000) >> 13;
- if ((t->reason != 7) && (t->reason != 8)) // NIC_RX_REASON_RMA_USR
- t->l2_offloaded = 1;
- else
+ if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
+ (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
t->l2_offloaded = 0;
-
- t->port = h->cpu_tag[1] & 0x3f;
+ else
+ t->l2_offloaded = 1;
return t->l2_offloaded;
}
-bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
+bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
{
t->reason = h->cpu_tag[7] & 0x3f;
- pr_debug("Reason %d\n", t->reason);
t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
+ t->port = (h->cpu_tag[0] >> 8) & 0x1f;
+ t->crc_error = h->cpu_tag[1] & BIT(6);
+
+ pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
if (t->reason >= 19 && t->reason <= 27)
t->l2_offloaded = 0;
else
t->l2_offloaded = 1;
- t->port = (h->cpu_tag[0] >> 8) & 0x3f;
return t->l2_offloaded;
}
-bool rtl930x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
+bool rtl931x_decode_tag(struct p_hdr *h, struct dsa_tag *t)
{
- rtl931x_decode_tag(h, t);
- t->port &= 0x1f;
+ t->reason = h->cpu_tag[7] & 0x3f;
+ t->queue = (h->cpu_tag[2] >> 11) & 0x1f;
+ t->port = (h->cpu_tag[0] >> 8) & 0x3f;
+ t->crc_error = h->cpu_tag[1] & BIT(6);
+
+ pr_debug("Reason %d, port %d, queue %d\n", t->reason, t->port, t->queue);
+ if (t->reason >= 19 && t->reason <= 27)
+ t->l2_offloaded = 0;
+ else
+ t->l2_offloaded = 1;
+
return t->l2_offloaded;
}
@@ -398,21 +436,48 @@ static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
bool triggered = false;
u32 atk = sw_r32(RTL838X_ATK_PRVNT_STS);
int i;
- u32 storm_uc = sw_r32(RTL838X_STORM_CTRL_PORT_UC_EXCEED);
- u32 storm_mc = sw_r32(RTL838X_STORM_CTRL_PORT_MC_EXCEED);
- u32 storm_bc = sw_r32(RTL838X_STORM_CTRL_PORT_BC_EXCEED);
-
- pr_debug("IRQ: %08x\n", status);
- if (storm_uc || storm_mc || storm_bc) {
- pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
- storm_uc, storm_mc, storm_bc);
-
- sw_w32(storm_uc, RTL838X_STORM_CTRL_PORT_UC_EXCEED);
- sw_w32(storm_mc, RTL838X_STORM_CTRL_PORT_MC_EXCEED);
- sw_w32(storm_bc, RTL838X_STORM_CTRL_PORT_BC_EXCEED);
-
- triggered = true;
+#if 0
+ /*
+ * this code is wrong in many points. first. the register has a per port offset. so this code here checks only the content of port 0
+ * second, the registers are different for each chipset. remove this code for now since it does not make any sense at all
+ */
+ u32 storm_uc = 0;
+ u32 storm_mc = 0;
+ u32 storm_bc = 0;
+ if (priv->family_id == RTL8390_FAMILY_ID) {
+ storm_uc = sw_r32(RTL839X_STORM_CTRL_PORT_UC_EXCEED);
+ storm_mc = sw_r32(RTL839X_STORM_CTRL_PORT_MC_EXCEED);
+ storm_bc = sw_r32(RTL839X_STORM_CTRL_PORT_BC_EXCEED);
+ pr_debug("IRQ: %08x\n", status);
+ if (storm_uc || storm_mc || storm_bc) {
+ pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
+ storm_uc, storm_mc, storm_bc);
+
+ sw_w32(storm_uc, RTL839X_STORM_CTRL_PORT_UC_EXCEED);
+ sw_w32(storm_mc, RTL839X_STORM_CTRL_PORT_MC_EXCEED);
+ sw_w32(storm_bc, RTL839X_STORM_CTRL_PORT_BC_EXCEED);
+
+ triggered = true;
+ }
}
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ storm_uc = sw_r32(RTL838X_STORM_CTRL_PORT_UC_EXCEED);
+ storm_mc = sw_r32(RTL838X_STORM_CTRL_PORT_MC_EXCEED);
+ storm_bc = sw_r32(RTL838X_STORM_CTRL_PORT_BC_EXCEED);
+ pr_debug("IRQ: %08x\n", status);
+ if (storm_uc || storm_mc || storm_bc) {
+ pr_warn("Storm control UC: %08x, MC: %08x, BC: %08x\n",
+ storm_uc, storm_mc, storm_bc);
+
+ sw_w32(storm_uc, RTL838X_STORM_CTRL_PORT_UC_EXCEED);
+ sw_w32(storm_mc, RTL838X_STORM_CTRL_PORT_MC_EXCEED);
+ sw_w32(storm_bc, RTL838X_STORM_CTRL_PORT_BC_EXCEED);
+
+ triggered = true;
+ }
+ }
+#endif
+
if (atk) {
pr_debug("Attack prevention triggered: %08x\n", atk);
@@ -441,7 +506,7 @@ static irqreturn_t rtl83xx_net_irq(int irq, void *dev_id)
/* RX buffer overrun */
if (status & 0x000ff) {
- pr_info("RX buffer overrun: status %x, mask: %x\n",
+ pr_debug("RX buffer overrun: status %x, mask: %x\n",
status, sw_r32(priv->r->dma_if_intr_msk));
sw_w32(status, priv->r->dma_if_intr_sts);
rtl838x_rb_cleanup(priv, status & 0xff);
@@ -731,8 +796,9 @@ static void rtl838x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
* | MEDIA_SEL
*/
sw_w32(0x6192F, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
- /* allow CRC errors on CPU-port */
- sw_w32_mask(0, 0x8, priv->r->mac_port_ctrl(priv->cpu_port));
+
+ /* Enable CRC checks on CPU-port */
+ sw_w32_mask(0, BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
}
static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
@@ -746,8 +812,8 @@ static void rtl839x_hw_en_rxtx(struct rtl838x_eth_priv *priv)
/* Enable DMA */
sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
- /* Restart TX/RX to CPU port */
- sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
+ /* Restart TX/RX to CPU port, enable CRC checking */
+ sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
/* CPU port joins Lookup Miss Flooding Portmask */
// TODO: The code below should also work for the RTL838x
@@ -784,8 +850,8 @@ static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
/* Enable DMA */
sw_w32_mask(0, RX_EN_93XX | TX_EN_93XX, priv->r->dma_if_ctrl);
- /* Restart TX/RX to CPU port */
- sw_w32_mask(0x0, 0x3, priv->r->mac_port_ctrl(priv->cpu_port));
+ /* Restart TX/RX to CPU port, enable CRC checking */
+ sw_w32_mask(0x0, 0x3 | BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
sw_w32_mask(0, BIT(priv->cpu_port), RTL930X_L2_UNKN_UC_FLD_PMSK);
sw_w32(0x217, priv->r->mac_force_mode_ctrl + priv->cpu_port * 4);
@@ -883,28 +949,30 @@ static int rtl838x_eth_open(struct net_device *ndev)
switch (priv->family_id) {
case RTL8380_FAMILY_ID:
rtl838x_hw_en_rxtx(priv);
- /* Trap IGMP traffic to CPU-Port */
+ /* Trap IGMP/MLD traffic to CPU-Port */
sw_w32(0x3, RTL838X_SPCL_TRAP_IGMP_CTRL);
/* Flush learned FDB entries on link down of a port */
sw_w32_mask(0, BIT(7), RTL838X_L2_CTRL_0);
break;
+
case RTL8390_FAMILY_ID:
rtl839x_hw_en_rxtx(priv);
+ // Trap MLD and IGMP messages to CPU_PORT
sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
/* Flush learned FDB entries on link down of a port */
sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
break;
+
case RTL9300_FAMILY_ID:
rtl93xx_hw_en_rxtx(priv);
/* Flush learned FDB entries on link down of a port */
sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
- sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_SABLK_CTRL);
- sw_w32_mask(BIT(28), 0, RTL930X_L2_PORT_DABLK_CTRL);
+ // Trap MLD and IGMP messages to CPU_PORT
+ sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
break;
case RTL9310_FAMILY_ID:
rtl93xx_hw_en_rxtx(priv);
-// TODO: Add trapping of IGMP frames to CPU-port
break;
}
@@ -1111,19 +1179,22 @@ static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
len = skb->len;
+ pr_debug("SEND: %08x %08x %08x %08x %08x present %d, vp %04x vt %04x\n",
+ *((u32 *)skb->data), *((u32 *)skb->data + 1), *((u32 *)skb->data + 2),
+ *((u32 *)skb->data + 3), *((u32 *)skb->data + 4),
+ skb->vlan_present, skb->vlan_proto, skb->vlan_tci
+ );
/* Check for DSA tagging at the end of the buffer */
if (netdev_uses_dsa(dev) && skb->data[len-4] == 0x80 && skb->data[len-3] > 0
- && skb->data[len-3] < 28 && skb->data[len-2] == 0x10
+ && skb->data[len-3] < priv->cpu_port && skb->data[len-2] == 0x10
&& skb->data[len-1] == 0x00) {
- /* Reuse tag space for CRC */
+ /* Reuse tag space for CRC if possible */
dest_port = skb->data[len-3];
+ skb->data[len-4] = skb->data[len-3] = skb->data[len-2] = skb->data[len-1] = 0x00;
len -= 4;
}
- if (len < ETH_ZLEN)
- len = ETH_ZLEN;
- /* ASIC expects that packet includes CRC, so we extend by 4 bytes */
- len += 4;
+ len += 4; // Add space for CRC
if (skb_padto(skb, len)) {
ret = NETDEV_TX_OK;
@@ -1137,6 +1208,11 @@ static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
h = &ring->tx_header[q][ring->c_tx[q]];
h->size = len;
h->len = len;
+ // On RTL8380 SoCs, the packet length being sent needs adjustment
+ if (priv->family_id == RTL8380_FAMILY_ID) {
+ if (len < ETH_ZLEN - 4)
+ h->len -= 4;
+ }
priv->r->create_tx_header(h, dest_port, skb->priority >> 1);
@@ -1264,6 +1340,7 @@ static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
/* Make sure data is visible */
mb();
memcpy(skb->data, (u8 *)KSEG1ADDR(data), len);
+
/* Overwrite CRC with cpu_tag */
if (dsa) {
priv->r->decode_tag(h, &tag);
@@ -1280,6 +1357,12 @@ static int rtl838x_hw_receive(struct net_device *dev, int r, int budget)
tag.queue, len, tag.reason, tag.port);
skb->protocol = eth_type_trans(skb, dev);
+ if (dev->features & NETIF_F_RXCSUM) {
+ if (tag.crc_error)
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -1596,7 +1679,7 @@ static int rtl930x_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
// TODO: These are hard-coded for the 2 Fibre Ports of the XGS1210
if (mii_id >= 26 && mii_id <= 27)
- return rtl930x_read_sds_phy(mii_id - 18, 0, regnum);
+ return rtl930x_read_sds_phy(mii_id - 23, 0, regnum);
if (regnum & MII_ADDR_C45) {
regnum &= ~MII_ADDR_C45;
@@ -1719,52 +1802,54 @@ static int rtl930x_mdio_reset(struct mii_bus *bus)
{
int i;
int pos;
+ struct rtl838x_eth_priv *priv = bus->priv;
+ u32 c45_mask = 0;
+ u32 poll_sel[2];
+ u32 poll_ctrl = 0;
- pr_info("RTL930X_SMI_PORT0_15_POLLING_SEL %08x 16-27: %08x\n",
- sw_r32(RTL930X_SMI_PORT0_15_POLLING_SEL),
- sw_r32(RTL930X_SMI_PORT16_27_POLLING_SEL));
-
- pr_info("%s: Enable SMI polling on SMI bus 0, SMI1, SMI2, disable on SMI3\n", __func__);
- sw_w32_mask(BIT(20) | BIT(21) | BIT(22), BIT(23), RTL930X_SMI_GLB_CTRL);
-
- pr_info("RTL9300 Powering on SerDes ports\n");
- rtl9300_sds_power(24, 1);
- rtl9300_sds_power(25, 1);
- rtl9300_sds_power(26, 1);
- rtl9300_sds_power(27, 1);
- mdelay(200);
-
- // RTL930X_SMI_PORT0_15_POLLING_SEL 55550000 16-27: 00f9aaaa
- // i.e SMI=0 for all ports
- for (i = 0; i < 5; i++)
- pr_info("port phy: %08x\n", sw_r32(RTL930X_SMI_PORT0_5_ADDR + i *4));
-
- // 1-to-1 mapping of port to phy-address
- for (i = 0; i < 24; i++) {
+ // Mapping of port to phy-addresses on an SMI bus
+ poll_sel[0] = poll_sel[1] = 0;
+ for (i = 0; i < 28; i++) {
pos = (i % 6) * 5;
- sw_w32_mask(0x1f << pos, i << pos, RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
+ sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos,
+ RTL930X_SMI_PORT0_5_ADDR + (i / 6) * 4);
+
+ pos = (i * 2) % 32;
+ poll_sel[i / 16] |= priv->smi_bus[i] << pos;
+ poll_ctrl |= BIT(20 + priv->smi_bus[i]);
}
- // ports 24 and 25 have PHY addresses 8 and 9, ports 26/27 PHY 26/27
- sw_w32(8 | 9 << 5 | 26 << 10 | 27 << 15, RTL930X_SMI_PORT0_5_ADDR + 4 * 4);
+ // Configure which SMI bus is behind which port number
+ sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
+ sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
- // Ports 24 and 25 live on SMI bus 1 and 2
- sw_w32_mask(0x3 << 16, 0x1 << 16, RTL930X_SMI_PORT16_27_POLLING_SEL);
- sw_w32_mask(0x3 << 18, 0x2 << 18, RTL930X_SMI_PORT16_27_POLLING_SEL);
+ // Enable polling on the respective SMI busses
+ sw_w32_mask(0, poll_ctrl, RTL930X_SMI_GLB_CTRL);
- // SMI bus 1 and 2 speak Clause 45 TODO: Configure from .dts
- sw_w32_mask(0, BIT(17) | BIT(18), RTL930X_SMI_GLB_CTRL);
+ // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
+ for (i = 0; i < 4; i++)
+ if (priv->smi_bus_isc45[i])
+ c45_mask |= BIT(i + 16);
+
+ pr_info("c45_mask: %08x\n", c45_mask);
+ sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
// Ports 24 and 25 are 2.5 Gig, set this type (1)
sw_w32_mask(0x7 << 12, 1 << 12, RTL930X_SMI_MAC_TYPE_CTRL);
sw_w32_mask(0x7 << 15, 1 << 15, RTL930X_SMI_MAC_TYPE_CTRL);
+ // Ports 26 and 27 are 10 Gig SerDes, set this type (0)
+ sw_w32_mask(0x7 << 18, 0, RTL930X_SMI_MAC_TYPE_CTRL);
+ sw_w32_mask(0x7 << 21, 0, RTL930X_SMI_MAC_TYPE_CTRL);
+
+ // TODO: Set up RTL9300_SMI_10GPHY_POLLING_SEL_0_ADDR for Aquantia PHYs on 1250
return 0;
}
static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
{
- struct device_node *mii_np;
+ struct device_node *mii_np, *dn;
+ u32 pn;
int ret;
pr_debug("%s called\n", __func__);
@@ -1817,6 +1902,28 @@ static int rtl838x_mdio_init(struct rtl838x_eth_priv *priv)
priv->mii_bus->priv = priv;
priv->mii_bus->parent = &priv->pdev->dev;
+ for_each_node_by_name(dn, "ethernet-phy") {
+ u32 smi_addr[2];
+
+ if (of_property_read_u32(dn, "reg", &pn))
+ continue;
+
+ if (of_property_read_u32_array(dn, "rtl9300,smi-address", &smi_addr[0], 2)) {
+ smi_addr[0] = 0;
+ smi_addr[1] = pn;
+ }
+
+ if (pn < MAX_PORTS) {
+ priv->smi_bus[pn] = smi_addr[0];
+ priv->smi_addr[pn] = smi_addr[1];
+ } else {
+ pr_err("%s: illegal port number %d\n", __func__, pn);
+ }
+
+ if (of_device_is_compatible(dn, "ethernet-phy-ieee802.3-c45"))
+ priv->smi_bus_isc45[smi_addr[0]] = true;
+ }
+
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
ret = of_mdiobus_register(priv->mii_bus, mii_np);
@@ -1837,6 +1944,40 @@ static int rtl838x_mdio_remove(struct rtl838x_eth_priv *priv)
return 0;
}
+static netdev_features_t rtl838x_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static int rtl83xx_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct rtl838x_eth_priv *priv = netdev_priv(dev);
+
+ if ((features ^ dev->features) & NETIF_F_RXCSUM) {
+ if (!(features & NETIF_F_RXCSUM))
+ sw_w32_mask(BIT(3), 0, priv->r->mac_port_ctrl(priv->cpu_port));
+ else
+ sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
+ }
+
+ return 0;
+}
+
+static int rtl93xx_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct rtl838x_eth_priv *priv = netdev_priv(dev);
+
+ if ((features ^ dev->features) & NETIF_F_RXCSUM) {
+ if (!(features & NETIF_F_RXCSUM))
+ sw_w32_mask(BIT(4), 0, priv->r->mac_port_ctrl(priv->cpu_port));
+ else
+ sw_w32_mask(0, BIT(4), priv->r->mac_port_ctrl(priv->cpu_port));
+ }
+
+ return 0;
+}
+
static const struct net_device_ops rtl838x_eth_netdev_ops = {
.ndo_open = rtl838x_eth_open,
.ndo_stop = rtl838x_eth_stop,
@@ -1846,6 +1987,9 @@ static const struct net_device_ops rtl838x_eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rtl838x_eth_set_multicast_list,
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
+ .ndo_set_features = rtl83xx_set_features,
+ .ndo_fix_features = rtl838x_fix_features,
+ .ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl839x_eth_netdev_ops = {
@@ -1857,6 +2001,9 @@ static const struct net_device_ops rtl839x_eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rtl839x_eth_set_multicast_list,
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
+ .ndo_set_features = rtl83xx_set_features,
+ .ndo_fix_features = rtl838x_fix_features,
+ .ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl930x_eth_netdev_ops = {
@@ -1868,6 +2015,9 @@ static const struct net_device_ops rtl930x_eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rtl930x_eth_set_multicast_list,
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
+ .ndo_set_features = rtl93xx_set_features,
+ .ndo_fix_features = rtl838x_fix_features,
+ .ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl931x_eth_netdev_ops = {
@@ -1879,6 +2029,8 @@ static const struct net_device_ops rtl931x_eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rtl931x_eth_set_multicast_list,
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
+ .ndo_set_features = rtl93xx_set_features,
+ .ndo_fix_features = rtl838x_fix_features,
};
static const struct phylink_mac_ops rtl838x_phylink_ops = {
@@ -1901,11 +2053,11 @@ static int __init rtl838x_eth_probe(struct platform_device *pdev)
struct device_node *dn = pdev->dev.of_node;
struct rtl838x_eth_priv *priv;
struct resource *res, *mem;
- const void *mac;
phy_interface_t phy_mode;
struct phylink *phylink;
int err = 0, i, rxrings, rxringlen;
struct ring_b *ring;
+ const void *mac;
pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
(u32)pdev, (u32)(&(pdev->dev)));
@@ -1947,7 +2099,7 @@ static int __init rtl838x_eth_probe(struct platform_device *pdev)
err = -ENXIO;
goto err_free;
}
-
+ pr_info("Allocate %ld bytes for DMA\n", rxrings * rxringlen * RING_BUFFER + sizeof(struct ring_b) + sizeof(struct notify_b));
/* Allocate buffer memory */
priv->membase = dmam_alloc_coherent(&pdev->dev, rxrings * rxringlen * RING_BUFFER
+ sizeof(struct ring_b) + sizeof(struct notify_b),
@@ -1975,6 +2127,8 @@ static int __init rtl838x_eth_probe(struct platform_device *pdev)
dev->ethtool_ops = &rtl838x_ethtool_ops;
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = 1536;
+ dev->features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
+ dev->hw_features = NETIF_F_RXCSUM;
priv->id = soc_info.id;
priv->family_id = soc_info.family;
diff --git a/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.h b/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.h
index d7b4317cbb..d2691bfaf5 100644
--- a/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.h
+++ b/target/linux/realtek/files-5.4/drivers/net/ethernet/rtl838x_eth.h
@@ -177,6 +177,7 @@
#define RTL839X_RMA_CTRL_2 (0x1208)
#define RTL839X_RMA_CTRL_3 (0x120C)
+#define RTL930X_VLAN_APP_PKT_CTRL (0xA23C)
#define RTL930X_RMA_CTRL_0 (0x9E60)
#define RTL930X_RMA_CTRL_1 (0x9E64)
#define RTL930X_RMA_CTRL_2 (0x9E68)
@@ -423,6 +424,6 @@ int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val);
int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val);
int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val);
int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val);
-void rtl9300_sds_power(int sds_num, int val);
+int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data);
#endif /* _RTL838X_ETH_H */
diff --git a/target/linux/realtek/files-5.4/drivers/net/phy/rtl83xx-phy.c b/target/linux/realtek/files-5.4/drivers/net/phy/rtl83xx-phy.c
index 78953c6d17..2143fbe44f 100644
--- a/target/linux/realtek/files-5.4/drivers/net/phy/rtl83xx-phy.c
+++ b/target/linux/realtek/files-5.4/drivers/net/phy/rtl83xx-phy.c
@@ -14,16 +14,38 @@
#include <asm/mach-rtl838x/mach-rtl83xx.h>
#include "rtl83xx-phy.h"
+#define PHY_CTRL_REG 0
+#define PHY_POWER_BIT 11
+
+#define PHY_PAGE_2 2
+#define PHY_PAGE_4 4
+#define PARK_PAGE 0x1f
+
+#define RTL9300_PHY_ID_MASK 0xf0ffffff
extern struct rtl83xx_soc_info soc_info;
extern struct mutex smi_lock;
+/*
+ * This lock protects the state of the SoC automatically polling the PHYs over the SMI
+ * bus to detect e.g. link and media changes. For operations on the PHYs such as
+ * patching or other configuration changes such as EEE, polling needs to be disabled
+ * since otherwise these operations may fails or lead to unpredictable results.
+ */
+DEFINE_MUTEX(poll_lock);
+
static const struct firmware rtl838x_8380_fw;
static const struct firmware rtl838x_8214fc_fw;
static const struct firmware rtl838x_8218b_fw;
+int rtl838x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
+int rtl838x_write_mmd_phy(u32 port, u32 devnum, u32 reg, u32 val);
+int rtl839x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
+int rtl839x_write_mmd_phy(u32 port, u32 devnum, u32 reg, u32 val);
int rtl930x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
-int rtl930x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val);
+int rtl930x_write_mmd_phy(u32 port, u32 devnum, u32 reg, u32 val);
+int rtl931x_read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val);
+int rtl931x_write_mmd_phy(u32 port, u32 devnum, u32 reg, u32 val);
static int read_phy(u32 port, u32 page, u32 reg, u32 *val)
{ switch (soc_info.family) {
@@ -54,6 +76,93 @@ static int write_phy(u32 port, u32 page, u32 reg, u32 val)
return -1;
}
+static int read_mmd_phy(u32 port, u32 devnum, u32 regnum, u32 *val)
+{
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ return rtl838x_read_mmd_phy(port, devnum, regnum, val);
+ case RTL8390_FAMILY_ID:
+ return rtl839x_read_mmd_phy(port, devnum, regnum, val);
+ case RTL9300_FAMILY_ID:
+ return rtl930x_read_mmd_phy(port, devnum, regnum, val);
+ case RTL9310_FAMILY_ID:
+ return rtl931x_read_mmd_phy(port, devnum, regnum, val);
+ }
+ return -1;
+}
+
+int write_mmd_phy(u32 port, u32 devnum, u32 reg, u32 val)
+{
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ return rtl838x_write_mmd_phy(port, devnum, reg, val);
+ case RTL8390_FAMILY_ID:
+ return rtl839x_write_mmd_phy(port, devnum, reg, val);
+ case RTL9300_FAMILY_ID:
+ return rtl930x_write_mmd_phy(port, devnum, reg, val);
+ case RTL9310_FAMILY_ID:
+ return rtl931x_write_mmd_phy(port, devnum, reg, val);
+ }
+ return -1;
+}
+
+static u64 disable_polling(int port)
+{
+ u64 saved_state;
+
+ mutex_lock(&poll_lock);
+
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ saved_state = sw_r32(RTL838X_SMI_POLL_CTRL);
+ sw_w32_mask(BIT(port), 0, RTL838X_SMI_POLL_CTRL);
+ break;
+ case RTL8390_FAMILY_ID:
+ saved_state = sw_r32(RTL839X_SMI_PORT_POLLING_CTRL + 4);
+ saved_state <<= 32;
+ saved_state |= sw_r32(RTL839X_SMI_PORT_POLLING_CTRL);
+ sw_w32_mask(BIT(port % 32), 0,
+ RTL839X_SMI_PORT_POLLING_CTRL + ((port >> 5) << 2));
+ break;
+ case RTL9300_FAMILY_ID:
+ saved_state = sw_r32(RTL930X_SMI_POLL_CTRL);
+ sw_w32_mask(BIT(port), 0, RTL930X_SMI_POLL_CTRL);
+ break;
+ case RTL9310_FAMILY_ID:
+ pr_warn("%s not implemented for RTL931X\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&poll_lock);
+
+ return saved_state;
+}
+
+static int resume_polling(u64 saved_state)
+{
+ mutex_lock(&poll_lock);
+
+ switch (soc_info.family) {
+ case RTL8380_FAMILY_ID:
+ sw_w32(saved_state, RTL838X_SMI_POLL_CTRL);
+ break;
+ case RTL8390_FAMILY_ID:
+ sw_w32(saved_state >> 32, RTL839X_SMI_PORT_POLLING_CTRL + 4);
+ sw_w32(saved_state, RTL839X_SMI_PORT_POLLING_CTRL);
+ break;
+ case RTL9300_FAMILY_ID:
+ sw_w32(saved_state, RTL930X_SMI_POLL_CTRL);
+ break;
+ case RTL9310_FAMILY_ID:
+ pr_warn("%s not implemented for RTL931X\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&poll_lock);
+
+ return 0;
+}
+
static void rtl8380_int_phy_on_off(int mac, bool on)
{
u32 val;
@@ -94,18 +203,6 @@ static void rtl8380_phy_reset(int mac)
write_phy(mac, 0, 0, val | BIT(15));
}
-static void rtl8380_sds_rst(int mac)
-{
- u32 offset = (mac == 24) ? 0 : 0x100;
-
- sw_w32_mask(1 << 11, 0, RTL8380_SDS4_FIB_REG0 + offset);
- sw_w32_mask(0x3, 0, RTL838X_SDS4_REG28 + offset);
- sw_w32_mask(0x3, 0x3, RTL838X_SDS4_REG28 + offset);
- sw_w32_mask(0, 0x1 << 6, RTL838X_SDS4_DUMMY0 + offset);
- sw_w32_mask(0x1 << 6, 0, RTL838X_SDS4_DUMMY0 + offset);
- pr_info("SERDES reset: %d\n", mac);
-}
-
/*
* Reset the SerDes by powering it off and set a new operations mode
* of the SerDes. 0x1f is off. Other modes are
@@ -134,7 +231,7 @@ void rtl9300_sds_rst(int sds_num, u32 mode)
sw_w32_mask(0x1f << lsb[sds_num], mode << lsb[sds_num], regs[sds_num]);
mdelay(10);
- pr_info("SDS: 194:%08x 198:%08x 2a0:%08x 2a4:%08x\n",
+ pr_info("SDS_MODE_SEL_0: %08x SDS_MODE_SEL_1: %08x SDS_MODE_SEL_2: %08x SDS_MODE_SEL_3: %08x\n",
sw_r32(0x194), sw_r32(0x198), sw_r32(0x2a0), sw_r32(0x2a4));
}
@@ -187,7 +284,7 @@ int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg)
int i;
u32 cmd = phy_addr << 2 | page << 7 | phy_reg << 13 | 1;
- pr_info("%s: phy_addr %d, phy_reg: %d\n", __func__, phy_addr, phy_reg);
+ pr_debug("%s: phy_addr(SDS-ID) %d, phy_reg: %d\n", __func__, phy_addr, phy_reg);
sw_w32(cmd, RTL930X_SDS_INDACS_CMD);
for (i = 0; i < 100; i++) {
@@ -199,7 +296,7 @@ int rtl930x_read_sds_phy(int phy_addr, int page, int phy_reg)
if (i >= 100)
return -EIO;
- pr_info("%s: returning %04x\n", __func__, sw_r32(RTL930X_SDS_INDACS_DATA) & 0xffff);
+ pr_debug("%s: returning %04x\n", __func__, sw_r32(RTL930X_SDS_INDACS_DATA) & 0xffff);
return sw_r32(RTL930X_SDS_INDACS_DATA) & 0xffff;
}
@@ -307,6 +404,7 @@ static int rtl8393_read_status(struct phy_device *phydev)
return err;
}
+
static int rtl8226_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, 0x1f);
@@ -331,20 +429,20 @@ static int rtl8226_read_status(struct phy_device *phydev)
// Link status must be read twice
for (i = 0; i < 2; i++) {
- rtl930x_read_mmd_phy(port, MMD_VEND2, 0xA402, &val);
+ read_mmd_phy(port, MMD_VEND2, 0xA402, &val);
}
phydev->link = val & BIT(2) ? 1 : 0;
if (!phydev->link)
goto out;
// Read duplex status
- ret = rtl930x_read_mmd_phy(port, MMD_VEND2, 0xA434, &val);
+ ret = read_mmd_phy(port, MMD_VEND2, 0xA434, &val);
if (ret)
goto out;
phydev->duplex = !!(val & BIT(3));
// Read speed
- ret = rtl930x_read_mmd_phy(port, MMD_VEND2, 0xA434, &val);
+ ret = read_mmd_phy(port, MMD_VEND2, 0xA434, &val);
switch (val & 0x0630) {
case 0x0000:
phydev->speed = SPEED_10;
@@ -371,7 +469,7 @@ out:
return ret;
}
-static int rtl8266_advertise_aneg(struct phy_device *phydev)
+static int rtl8226_advertise_aneg(struct phy_device *phydev)
{
int ret = 0;
u32 v;
@@ -379,7 +477,7 @@ static int rtl8266_advertise_aneg(struct phy_device *phydev)
pr_info("In %s\n", __func__);
- ret = rtl930x_read_mmd_phy(port, MMD_AN, 16, &v);
+ ret = read_mmd_phy(port, MMD_AN, 16, &v);
if (ret)
goto out;
@@ -388,31 +486,30 @@ static int rtl8266_advertise_aneg(struct phy_device *phydev)
v |= BIT(7); // HD 100M
v |= BIT(8); // FD 100M
- ret = rtl930x_write_mmd_phy(port, MMD_AN, 16, v);
+ ret = write_mmd_phy(port, MMD_AN, 16, v);
// Allow 1GBit
- ret = rtl930x_read_mmd_phy(port, MMD_VEND2, 0xA412, &v);
+ ret = read_mmd_phy(port, MMD_VEND2, 0xA412, &v);
if (ret)
goto out;
v |= BIT(9); // FD 1000M
- ret = rtl930x_write_mmd_phy(port, MMD_VEND2, 0xA412, v);
+ ret = write_mmd_phy(port, MMD_VEND2, 0xA412, v);
if (ret)
goto out;
// Allow 2.5G
- ret = rtl930x_read_mmd_phy(port, MMD_AN, 32, &v);
+ ret = read_mmd_phy(port, MMD_AN, 32, &v);
if (ret)
goto out;
v |= BIT(7);
- ret = rtl930x_write_mmd_phy(port, MMD_AN, 32, v);
+ ret = write_mmd_phy(port, MMD_AN, 32, v);
out:
return ret;
}
-
static int rtl8226_config_aneg(struct phy_device *phydev)
{
int ret = 0;
@@ -421,26 +518,26 @@ static int rtl8226_config_aneg(struct phy_device *phydev)
pr_info("In %s\n", __func__);
if (phydev->autoneg == AUTONEG_ENABLE) {
- ret = rtl8266_advertise_aneg(phydev);
+ ret = rtl8226_advertise_aneg(phydev);
if (ret)
goto out;
// AutoNegotiationEnable
- ret = rtl930x_read_mmd_phy(port, MMD_AN, 0, &v);
+ ret = read_mmd_phy(port, MMD_AN, 0, &v);
if (ret)
goto out;
v |= BIT(12); // Enable AN
- ret = rtl930x_write_mmd_phy(port, MMD_AN, 0, v);
+ ret = write_mmd_phy(port, MMD_AN, 0, v);
if (ret)
goto out;
// RestartAutoNegotiation
- ret = rtl930x_read_mmd_phy(port, MMD_VEND2, 0xA400, &v);
+ ret = read_mmd_phy(port, MMD_VEND2, 0xA400, &v);
if (ret)
goto out;
v |= BIT(9);
- ret = rtl930x_write_mmd_phy(port, MMD_VEND2, 0xA400, v);
+ ret = write_mmd_phy(port, MMD_VEND2, 0xA400, v);
}
pr_info("%s: Ret is already: %d\n", __func__, ret);
@@ -451,6 +548,68 @@ out:
return ret;
}
+static int rtl8226_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *e)
+{
+ u32 val;
+ int addr = phydev->mdio.addr;
+
+ pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled);
+
+ read_mmd_phy(addr, MMD_AN, 60, &val);
+ if (e->eee_enabled) {
+ e->eee_enabled = !!(val & BIT(1));
+ if (!e->eee_enabled) {
+ read_mmd_phy(addr, MMD_AN, 62, &val);
+ e->eee_enabled = !!(val & BIT(0));
+ }
+ }
+ pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled);
+
+ return 0;
+}
+
+static int rtl8226_set_eee(struct phy_device *phydev, struct ethtool_eee *e)
+{
+ int port = phydev->mdio.addr;
+ u64 poll_state;
+ bool an_enabled;
+ u32 val;
+
+ pr_info("In %s, port %d, enabled %d\n", __func__, port, e->eee_enabled);
+
+ poll_state = disable_polling(port);
+
+ // Remember aneg state
+ read_mmd_phy(port, MMD_AN, 0, &val);
+ an_enabled = !!(val & BIT(12));
+
+ // Setup 100/1000MBit
+ read_mmd_phy(port, MMD_AN, 60, &val);
+ if (e->eee_enabled)
+ val |= 0x6;
+ else
+ val &= 0x6;
+ write_mmd_phy(port, MMD_AN, 60, val);
+
+ // Setup 2.5GBit
+ read_mmd_phy(port, MMD_AN, 62, &val);
+ if (e->eee_enabled)
+ val |= 0x1;
+ else
+ val &= 0x1;
+ write_mmd_phy(port, MMD_AN, 62, val);
+
+ // RestartAutoNegotiation
+ read_mmd_phy(port, MMD_VEND2, 0xA400, &val);
+ val |= BIT(9);
+ write_mmd_phy(port, MMD_VEND2, 0xA400, val);
+
+ resume_polling(poll_state);
+
+ return 0;
+}
+
static struct fw_header *rtl838x_request_fw(struct phy_device *phydev,
const struct firmware *fw,
const char *name)
@@ -750,79 +909,6 @@ static int rtl8218b_ext_match_phy_device(struct phy_device *phydev)
return phydev->phy_id == PHY_ID_RTL8218B_E;
}
-/*
- * Read an mmd register of the PHY
- */
-static int rtl83xx_read_mmd_phy(u32 port, u32 addr, u32 reg, u32 *val)
-{
- u32 v;
-
- mutex_lock(&smi_lock);
-
- if (rtl838x_smi_wait_op(10000))
- goto timeout;
-
- sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
- mdelay(10);
-
- sw_w32_mask(0xffff0000, port << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
-
- v = addr << 16 | reg;
- sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_3);
-
- /* mmd-access | read | cmd-start */
- v = 1 << 1 | 0 << 2 | 1;
- sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
-
- if (rtl838x_smi_wait_op(10000))
- goto timeout;
-
- *val = sw_r32(RTL838X_SMI_ACCESS_PHY_CTRL_2) & 0xffff;
-
- mutex_unlock(&smi_lock);
- return 0;
-
-timeout:
- mutex_unlock(&smi_lock);
- return -ETIMEDOUT;
-}
-
-/*
- * Write to an mmd register of the PHY
- */
-static int rtl838x_write_mmd_phy(u32 port, u32 addr, u32 reg, u32 val)
-{
- u32 v;
-
- pr_debug("MMD write: port %d, dev %d, reg %d, val %x\n", port, addr, reg, val);
- val &= 0xffff;
- mutex_lock(&smi_lock);
-
- if (rtl838x_smi_wait_op(10000))
- goto timeout;
-
- sw_w32(1 << port, RTL838X_SMI_ACCESS_PHY_CTRL_0);
- mdelay(10);
-
- sw_w32_mask(0xffff0000, val << 16, RTL838X_SMI_ACCESS_PHY_CTRL_2);
-
- sw_w32_mask(0x1f << 16, addr << 16, RTL838X_SMI_ACCESS_PHY_CTRL_3);
- sw_w32_mask(0xffff, reg, RTL838X_SMI_ACCESS_PHY_CTRL_3);
- /* mmd-access | write | cmd-start */
- v = 1 << 1 | 1 << 2 | 1;
- sw_w32(v, RTL838X_SMI_ACCESS_PHY_CTRL_1);
-
- if (rtl838x_smi_wait_op(10000))
- goto timeout;
-
- mutex_unlock(&smi_lock);
- return 0;
-
-timeout:
- mutex_unlock(&smi_lock);
- return -ETIMEDOUT;
-}
-
static int rtl8218b_read_mmd(struct phy_device *phydev,
int devnum, u16 regnum)
{
@@ -830,7 +916,7 @@ static int rtl8218b_read_mmd(struct phy_device *phydev,
u32 val;
int addr = phydev->mdio.addr;
- ret = rtl83xx_read_mmd_phy(addr, devnum, regnum, &val);
+ ret = read_mmd_phy(addr, devnum, regnum, &val);
if (ret)
return ret;
return val;
@@ -850,8 +936,7 @@ static int rtl8226_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
int err;
u32 val;
- err = rtl930x_read_mmd_phy(port, devnum, regnum, &val);
-
+ err = read_mmd_phy(port, devnum, regnum, &val);
if (err)
return err;
return val;
@@ -861,7 +946,7 @@ static int rtl8226_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int port = phydev->mdio.addr; // the SoC translates port addresses to PHY addr
- return rtl930x_write_mmd_phy(port, devnum, regnum, val);
+ return write_mmd_phy(port, devnum, regnum, val);
}
static void rtl8380_rtl8214fc_media_set(int mac, bool set_fibre)
@@ -957,90 +1042,85 @@ static int rtl8214fc_get_port(struct phy_device *phydev)
return PORT_MII;
}
-static void rtl8218b_eee_set_u_boot(int port, bool enable)
+void rtl8218d_reset(int port)
{
- u32 val;
- bool an_enabled;
+ u32 v, tmp;
- /* Set GPHY page to copper */
- write_phy(port, 0, 30, 0x0001);
- read_phy(port, 0, 0, &val);
- an_enabled = val & (1 << 12);
+ // Port must be a base port-id for the PHY-block
+ if (port % 8)
+ return;
- if (enable) {
- /* 100/1000M EEE Capability */
- write_phy(port, 0, 13, 0x0007);
- write_phy(port, 0, 14, 0x003C);
- write_phy(port, 0, 13, 0x4007);
- write_phy(port, 0, 14, 0x0006);
+ if (read_phy(port, 0, 30, &v))
+ return;
- read_phy(port, 0x0A43, 25, &val);
- val |= 1 << 4;
- write_phy(port, 0x0A43, 25, val);
- } else {
- /* 100/1000M EEE Capability */
- write_phy(port, 0, 13, 0x0007);
- write_phy(port, 0, 14, 0x003C);
- write_phy(port, 0, 13, 0x0007);
- write_phy(port, 0, 14, 0x0000);
+ write_phy(port, 0, 30, 8);
- read_phy(port, 0x0A43, 25, &val);
- val &= ~(1 << 4);
- write_phy(port, 0x0A43, 25, val);
- }
+ /* Reset SerDes 0 */
- /* Restart AN if enabled */
- if (an_enabled) {
- read_phy(port, 0, 0, &val);
- val |= (1 << 12) | (1 << 9);
- write_phy(port, 0, 0, val);
+ /* Bit 0: SP_SDS_EN_TX, Bit 1: SP_SDS_EN_RX; */
+ read_phy(port, 0x400, 0x10, &tmp);
+ tmp &= ~0x3;
+ write_phy(port, 0x400, 0x10, tmp);
+
+ read_phy(port, 0x400, 0x10, &tmp);
+ tmp |= ~0x3;
+ write_phy(port, 0x400, 0x10, tmp);
+
+ // Check for QSGMII SerDes 1 reset
+ read_phy(port, 0x260, 18, &tmp);
+ if ((tmp & 0xf0) == 0xd0) {
+ read_phy(port, 0x500, 0x10, &tmp);
+ tmp &= ~0x3;
+ write_phy(port, 0x500, 0x10, tmp);
+
+ read_phy(port, 0x500, 0x10, &tmp);
+ tmp |= ~0x3;
+ write_phy(port, 0x500, 0x10, tmp);
}
- /* GPHY page back to auto*/
- write_phy(port, 0xa42, 29, 0);
+ write_phy(port, 0, 30, v);
}
-// TODO: unused
-void rtl8380_rtl8218b_eee_set(int port, bool enable)
+/*
+ * Enable EEE on the RTL8218B PHYs
+ * The method used is not the preferred way (which would be based on the MAC-EEE state,
+ * but the only way that works since the kernel first enables EEE in the MAC
+ * and then sets up the PHY. The MAC-based approach would require the oppsite.
+ */
+void rtl8218d_eee_set(int port, bool enable)
{
u32 val;
bool an_enabled;
pr_debug("In %s %d, enable %d\n", __func__, port, enable);
/* Set GPHY page to copper */
- write_phy(port, 0xa42, 29, 0x0001);
+ write_phy(port, 0xa42, 30, 0x0001);
read_phy(port, 0, 0, &val);
- an_enabled = val & (1 << 12);
-
- /* MAC based EEE */
- read_phy(port, 0xa43, 25, &val);
- val &= ~(1 << 5);
- write_phy(port, 0xa43, 25, val);
+ an_enabled = val & BIT(12);
- /* 100M / 1000M EEE */
- if (enable)
- rtl838x_write_mmd_phy(port, 7, 60, 0x6);
- else
- rtl838x_write_mmd_phy(port, 7, 60, 0);
+ /* Enable 100M (bit 1) / 1000M (bit 2) EEE */
+ read_mmd_phy(port, 7, 60, &val);
+ val |= BIT(2) | BIT(1);
+ write_mmd_phy(port, 7, 60, enable ? 0x6 : 0);
/* 500M EEE ability */
read_phy(port, 0xa42, 20, &val);
if (enable)
- val |= 1 << 7;
+ val |= BIT(7);
else
- val &= ~(1 << 7);
+ val &= ~BIT(7);
write_phy(port, 0xa42, 20, val);
/* Restart AN if enabled */
if (an_enabled) {
read_phy(port, 0, 0, &val);
- val |= (1 << 12) | (1 << 9);
+ val |= BIT(9);
write_phy(port, 0, 0, val);
}
/* GPHY page back to auto*/
- write_phy(port, 0xa42, 29, 0);
+ write_phy(port, 0xa42, 30, 0);
}
static int rtl8218b_get_eee(struct phy_device *phydev,
@@ -1049,16 +1129,21 @@ static int rtl8218b_get_eee(struct phy_device *phydev,
u32 val;
int addr = phydev->mdio.addr;
- pr_debug("In %s, port %d\n", __func__, addr);
+ pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled);
/* Set GPHY page to copper */
write_phy(addr, 0xa42, 29, 0x0001);
- rtl83xx_read_mmd_phy(addr, 7, 60, &val);
- if (e->eee_enabled && (!!(val & (1 << 7))))
- e->eee_enabled = !!(val & (1 << 7));
- else
- e->eee_enabled = 0;
+ read_phy(addr, 7, 60, &val);
+ if (e->eee_enabled) {
+ // Verify vs MAC-based EEE
+ e->eee_enabled = !!(val & BIT(7));
+ if (!e->eee_enabled) {
+ read_phy(addr, 0x0A43, 25, &val);
+ e->eee_enabled = !!(val & BIT(4));
+ }
+ }
+ pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled);
/* GPHY page to auto */
write_phy(addr, 0xa42, 29, 0x0000);
@@ -1066,49 +1151,24 @@ static int rtl8218b_get_eee(struct phy_device *phydev,
return 0;
}
-// TODO: unused
-void rtl8380_rtl8218b_green_set(int mac, bool enable)
-{
- u32 val;
-
- /* Set GPHY page to copper */
- write_phy(mac, 0xa42, 29, 0x0001);
-
- write_phy(mac, 0, 27, 0x8011);
- read_phy(mac, 0, 28, &val);
- if (enable) {
- val |= 1 << 9;
- write_phy(mac, 0, 27, 0x8011);
- write_phy(mac, 0, 28, val);
- } else {
- val &= ~(1 << 9);
- write_phy(mac, 0, 27, 0x8011);
- write_phy(mac, 0, 28, val);
- }
-
- /* GPHY page to auto */
- write_phy(mac, 0xa42, 29, 0x0000);
-}
-
-// TODO: unused
-int rtl8380_rtl8214fc_get_green(struct phy_device *phydev, struct ethtool_eee *e)
+static int rtl8218d_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *e)
{
u32 val;
int addr = phydev->mdio.addr;
- pr_debug("In %s %d\n", __func__, addr);
+ pr_debug("In %s, port %d, was enabled: %d\n", __func__, addr, e->eee_enabled);
+
/* Set GPHY page to copper */
- write_phy(addr, 0xa42, 29, 0x0001);
+ write_phy(addr, 0xa42, 30, 0x0001);
- write_phy(addr, 0, 27, 0x8011);
- read_phy(addr, 0, 28, &val);
- if (e->eee_enabled && (!!(val & (1 << 9))))
- e->eee_enabled = !!(val & (1 << 9));
- else
- e->eee_enabled = 0;
+ read_phy(addr, 7, 60, &val);
+ if (e->eee_enabled)
+ e->eee_enabled = !!(val & BIT(7));
+ pr_debug("%s: enabled: %d\n", __func__, e->eee_enabled);
/* GPHY page to auto */
- write_phy(addr, 0xa42, 29, 0x0000);
+ write_phy(addr, 0xa42, 30, 0x0000);
return 0;
}
@@ -1116,20 +1176,56 @@ int rtl8380_rtl8214fc_get_green(struct phy_device *phydev, struct ethtool_eee *e
static int rtl8214fc_set_eee(struct phy_device *phydev,
struct ethtool_eee *e)
{
- u32 pollMask;
- int addr = phydev->mdio.addr;
+ u32 poll_state;
+ int port = phydev->mdio.addr;
+ bool an_enabled;
+ u32 val;
- pr_debug("In %s port %d, enabled %d\n", __func__, addr, e->eee_enabled);
+ pr_debug("In %s port %d, enabled %d\n", __func__, port, e->eee_enabled);
- if (rtl8380_rtl8214fc_media_is_fibre(addr)) {
- netdev_err(phydev->attached_dev, "Port %d configured for FIBRE", addr);
+ if (rtl8380_rtl8214fc_media_is_fibre(port)) {
+ netdev_err(phydev->attached_dev, "Port %d configured for FIBRE", port);
return -ENOTSUPP;
}
- pollMask = sw_r32(RTL838X_SMI_POLL_CTRL);
- sw_w32(0, RTL838X_SMI_POLL_CTRL);
- rtl8218b_eee_set_u_boot(addr, (bool) e->eee_enabled);
- sw_w32(pollMask, RTL838X_SMI_POLL_CTRL);
+ poll_state = disable_polling(port);
+
+ /* Set GPHY page to copper */
+ write_phy(port, 0xa42, 29, 0x0001);
+
+ // Get auto-negotiation status
+ read_phy(port, 0, 0, &val);
+ an_enabled = val & BIT(12);
+
+ pr_info("%s: aneg: %d\n", __func__, an_enabled);
+ read_phy(port, 0x0A43, 25, &val);
+ val &= ~BIT(5); // Use MAC-based EEE
+ write_phy(port, 0x0A43, 25, val);
+
+ /* Enable 100M (bit 1) / 1000M (bit 2) EEE */
+ write_phy(port, 7, 60, e->eee_enabled ? 0x6 : 0);
+
+ /* 500M EEE ability */
+ read_phy(port, 0xa42, 20, &val);
+ if (e->eee_enabled)
+ val |= BIT(7);
+ else
+ val &= ~BIT(7);
+ write_phy(port, 0xa42, 20, val);
+
+ /* Restart AN if enabled */
+ if (an_enabled) {
+ pr_info("%s: doing aneg\n", __func__);
+ read_phy(port, 0, 0, &val);
+ val |= BIT(9);
+ write_phy(port, 0, 0, val);
+ }
+
+ /* GPHY page back to auto*/
+ write_phy(port, 0xa42, 29, 0);
+
+ resume_polling(poll_state);
+
return 0;
}
@@ -1147,18 +1243,72 @@ static int rtl8214fc_get_eee(struct phy_device *phydev,
return rtl8218b_get_eee(phydev, e);
}
-static int rtl8218b_set_eee(struct phy_device *phydev,
- struct ethtool_eee *e)
+static int rtl8218b_set_eee(struct phy_device *phydev, struct ethtool_eee *e)
+{
+ int port = phydev->mdio.addr;
+ u64 poll_state;
+ u32 val;
+ bool an_enabled;
+
+ pr_info("In %s, port %d, enabled %d\n", __func__, port, e->eee_enabled);
+
+ poll_state = disable_polling(port);
+
+ /* Set GPHY page to copper */
+ write_phy(port, 0, 30, 0x0001);
+ read_phy(port, 0, 0, &val);
+ an_enabled = val & BIT(12);
+
+ if (e->eee_enabled) {
+ /* 100/1000M EEE Capability */
+ write_phy(port, 0, 13, 0x0007);
+ write_phy(port, 0, 14, 0x003C);
+ write_phy(port, 0, 13, 0x4007);
+ write_phy(port, 0, 14, 0x0006);
+
+ read_phy(port, 0x0A43, 25, &val);
+ val |= BIT(4);
+ write_phy(port, 0x0A43, 25, val);
+ } else {
+ /* 100/1000M EEE Capability */
+ write_phy(port, 0, 13, 0x0007);
+ write_phy(port, 0, 14, 0x003C);
+ write_phy(port, 0, 13, 0x0007);
+ write_phy(port, 0, 14, 0x0000);
+
+ read_phy(port, 0x0A43, 25, &val);
+ val &= ~BIT(4);
+ write_phy(port, 0x0A43, 25, val);
+ }
+
+ /* Restart AN if enabled */
+ if (an_enabled) {
+ read_phy(port, 0, 0, &val);
+ val |= BIT(9);
+ write_phy(port, 0, 0, val);
+ }
+
+ /* GPHY page back to auto*/
+ write_phy(port, 0xa42, 30, 0);
+
+ pr_info("%s done\n", __func__);
+ resume_polling(poll_state);
+
+ return 0;
+}
+
+static int rtl8218d_set_eee(struct phy_device *phydev, struct ethtool_eee *e)
{
- u32 pollMask;
int addr = phydev->mdio.addr;
+ u64 poll_state;
- pr_debug("In %s, port %d, enabled %d\n", __func__, addr, e->eee_enabled);
+ pr_info("In %s, port %d, enabled %d\n", __func__, addr, e->eee_enabled);
- pollMask = sw_r32(RTL838X_SMI_POLL_CTRL);
- sw_w32(0, RTL838X_SMI_POLL_CTRL);
- rtl8218b_eee_set_u_boot(addr, (bool) e->eee_enabled);
- sw_w32(pollMask, RTL838X_SMI_POLL_CTRL);
+ poll_state = disable_polling(addr);
+
+ rtl8218d_eee_set(addr, (bool) e->eee_enabled);
+
+ resume_polling(poll_state);
return 0;
}
@@ -1490,14 +1640,441 @@ static int rtl8390_configure_serdes(struct phy_device *phydev)
return 0;
}
+void rtl9300_sds_field_w(int sds, u32 page, u32 reg, int end_bit, int start_bit, u32 v)
+{
+ int l = end_bit - start_bit - 1;
+ u32 data = v;
+
+ if (l < 32) {
+ u32 mask = BIT(l) - 1;
+
+ data = rtl930x_read_sds_phy(sds, page, reg);
+ data &= ~(mask << start_bit);
+ data |= (v & mask) << start_bit;
+ }
+
+ rtl930x_write_sds_phy(sds, page, reg, data);
+}
+
+
+u32 rtl9300_sds_field_r(int sds, u32 page, u32 reg, int end_bit, int start_bit)
+{
+ int l = end_bit - start_bit - 1;
+ u32 v = rtl930x_read_sds_phy(sds, page, reg);
+
+ if (l >= 32)
+ return v;
+
+ return (v >> start_bit) & (BIT(l) - 1);
+}
+
+/*
+ * Force PHY modes on 10GBit Serdes
+ */
+void rtl9300_force_sds_mode(int sds, phy_interface_t phy_if)
+{
+ int sds_mode;
+ bool lc_on;
+ int i, lc_value;
+ int lane_0 = (sds % 2) ? sds - 1 : sds;
+ u32 v, cr_0, cr_1, cr_2;
+ u32 m_bit, l_bit;
+
+ pr_info("%s: SDS: %d, mode %d\n", __func__, sds, phy_if);
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_SGMII:
+ sds_mode = 0x2;
+ lc_on = false;
+ lc_value = 0x1;
+ break;
+
+ case PHY_INTERFACE_MODE_HSGMII:
+ sds_mode = 0x12;
+ lc_value = 0x3;
+ // Configure LC
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ sds_mode = 0x04;
+ lc_on = false;
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ sds_mode = 0x16;
+ lc_value = 0x3;
+ // Configure LC
+ break;
+
+ case PHY_INTERFACE_MODE_10GKR:
+ sds_mode = 0x1a;
+ lc_on = true;
+ lc_value = 0x5;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ // This will disable SerDes
+ break;
+
+ default:
+ pr_err("%s: unknown serdes mode: %s\n",
+ __func__, phy_modes(phy_if));
+ return;
+ }
+
+ // Power down SerDes
+ rtl9300_sds_field_w(sds, 0x0, 0, 7, 6, 0x3);
+
+ // Force mode enable
+ rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 0x1);
+
+ /* SerDes off */
+ rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, 0x1f);
+
+ if (phy_if == PHY_INTERFACE_MODE_NA)
+ return;
+
+ // Enable LC and ring
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 3, 0, 0xf);
+
+ if (sds == lane_0)
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 5, 4, 0x1);
+ else
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 7, 6, 0x1);
+
+ rtl9300_sds_field_w(sds, 0x20, 0, 5, 4, 0x3);
+
+ if(lc_on)
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 11, 8, lc_value);
+ else
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 15, 12, lc_value);
+
+ // Force analog LC & ring on
+ rtl9300_sds_field_w(lane_0, 0x21, 11, 3, 0, 0xf);
+
+ v = lc_on ? 0x3 : 0x1;
+
+ if(sds == lane_0)
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 5, 4, v);
+ else
+ rtl9300_sds_field_w(lane_0, 0x20, 18, 7, 6, v);
+
+ // Force SerDes mode
+ rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 1);
+ rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, sds_mode);
+
+ // Toggle LC or Ring
+ for (i = 0; i < 20; i++) {
+ mdelay(200);
+
+ rtl930x_write_sds_phy(lane_0, 0x1f, 2, 53);
+
+ m_bit = (lane_0 == sds) ? (4) : (5);
+ l_bit = (lane_0 == sds) ? (4) : (5);
+
+ cr_0 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit);
+ mdelay(10);
+ cr_1 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit);
+ mdelay(10);
+ cr_2 = rtl9300_sds_field_r(lane_0, 0x1f, 20, m_bit, l_bit);
+
+ if(cr_0 && cr_1 && cr_2) {
+ u32 t;
+ if (phy_if != PHY_INTERFACE_MODE_10GKR)
+ break;
+
+ t = rtl9300_sds_field_r(sds, 0x6, 0x1, 2, 2);
+ rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, 0x1);
+
+ // Reset FSM
+ rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0);
+ mdelay(10);
+
+ // Need to read this twice
+ v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12);
+ v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12);
+
+ rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, t);
+
+ // Reset FSM again
+ rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0);
+ mdelay(10);
+
+ if (v == 1)
+ break;
+ }
+
+ m_bit = (phy_if == PHY_INTERFACE_MODE_10GKR) ? 3 : 1;
+ l_bit = (phy_if == PHY_INTERFACE_MODE_10GKR) ? 2 : 0;
+
+ rtl9300_sds_field_w(lane_0, 0x21, 11, m_bit, l_bit, 0x2);
+ mdelay(10);
+ rtl9300_sds_field_w(lane_0, 0x21, 11, m_bit, l_bit, 0x3);
+ }
+
+ /*release powndn*/
+ rtl9300_sds_field_w(sds, 0x20, 0, 7, 6, 0);
+
+ /*sds rx rst*/
+ rtl9300_sds_field_w(sds, 0x2e, 0x15, 4, 4, 0x1);
+ mdelay(5);
+ rtl9300_sds_field_w(sds, 0x2e, 0x15, 4, 4, 0x0);
+}
+
+#define RTL930X_MAC_FORCE_MODE_CTRL 0xCA1C
+
+void rtl9300_do_rx_calibration(int sds)
+{
+ int tap0_init_val = 0x1f; // Initial Decision Fed Equalizer 0 tap
+ int vth_min = 0x0;
+
+ pr_info("start_1.1.1 initial value for sds %d\n", sds);
+ rtl930x_write_sds_phy(sds, 6, 0, 0);
+
+ // FGCAL
+ rtl9300_sds_field_w(sds, 0x2e, 0x01, 14, 14, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x1c, 10, 5, 0x20);
+ rtl9300_sds_field_w(sds, 0x2f, 0x02, 0, 0, 0x1);
+
+ // DCVS
+ rtl9300_sds_field_w(sds, 0x2e, 0x1e, 14, 11, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x01, 15, 15, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x02, 11, 11, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x1c, 4, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x1d, 15, 11, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x1d, 10, 6, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x1d, 5, 1, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x02, 10, 6, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x11, 4, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2f, 0x00, 3, 0, 0xf);
+ rtl9300_sds_field_w(sds, 0x2e, 0x04, 6, 6, 0x1);
+ rtl9300_sds_field_w(sds, 0x2e, 0x04, 7, 7, 0x1);
+
+ // LEQ (Long Term Equivalent signal level)
+ rtl9300_sds_field_w(sds, 0x2e, 0x16, 14, 8, 0x0);
+
+ // DFE (Decision Fed Equalizer)
+ rtl9300_sds_field_w(sds, 0x2f, 0x03, 5, 0, tap0_init_val);
+ rtl9300_sds_field_w(sds, 0x2e, 0x09, 11, 6, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x09, 5, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x0a, 5, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2f, 0x01, 5, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2f, 0x12, 5, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x0a, 11, 6, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x06, 5, 0, 0x0);
+ rtl9300_sds_field_w(sds, 0x2f, 0x01, 5, 0, 0x0);
+
+ // Vth
+ rtl9300_sds_field_w(sds, 0x2e, 0x13, 5, 3, 0x7);
+ rtl9300_sds_field_w(sds, 0x2e, 0x13, 2, 0, 0x7);
+ rtl9300_sds_field_w(sds, 0x2f, 0x0b, 5, 3, vth_min);
+
+ pr_info("end_1.1.1 --\n");
+
+ pr_info("start_1.1.2 Load DFE init. value\n");
+
+ rtl9300_sds_field_w(sds, 0x2e, 0x0f, 13, 7, 0x7f);
+
+ pr_info("end_1.1.2\n");
+
+ pr_info("start_1.1.3 disable LEQ training,enable DFE clock\n");
+
+ rtl9300_sds_field_w(sds, 0x2e, 0x17, 7, 7, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x17, 6, 2, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x0c, 8, 8, 0x0);
+ rtl9300_sds_field_w(sds, 0x2e, 0x0b, 4, 4, 0x1);
+ rtl9300_sds_field_w(sds, 0x2e, 0x12, 14, 14, 0x0);
+ rtl9300_sds_field_w(sds, 0x2f, 0x02, 15, 15, 0x0);
+
+ pr_info("end_1.1.3 --\n");
+
+ pr_info("start_1.1.4 offset cali setting\n");
+
+ rtl9300_sds_field_w(sds, 0x2e, 0x0f, 15, 14, 0x3);
+
+ pr_info("end_1.1.4\n");
+
+ pr_info("start_1.1.5 LEQ and DFE setting\n");
+
+ // TODO: make this work for DAC cables of different lengths
+ // For a 10GBit serdes wit Fibre, SDS 8 or 9
+ rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x2);
+
+ // No serdes, check for Aquantia PHYs
+ rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x2);
+
+ rtl9300_sds_field_w(sds, 0x2e, 0x0f, 6, 0, 0x5f);
+ rtl9300_sds_field_w(sds, 0x2f, 0x05, 7, 2, 0x1f);
+ rtl9300_sds_field_w(sds, 0x2e, 0x19, 9, 5, 0x1f);
+ rtl9300_sds_field_w(sds, 0x2f, 0x0b, 15, 9, 0x3c);
+ rtl9300_sds_field_w(sds, 0x2e, 0x0b, 1, 0, 0x3);
+
+ pr_info("end_1.1.5\n");
+}
+
+void rtl9300_sds_tx_config(int sds, phy_interface_t phy_if)
+{
+ // parameters: rtl9303_80G_txParam_s2
+ int impedance = 0x8;
+ int pre_amp = 0x2;
+ int main_amp = 0x9;
+ int post_amp = 0x2;
+ int pre_en = 0x1;
+ int post_en = 0x1;
+ int page;
+
+ switch(phy_if) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ page = 0x25;
+ break;
+ case PHY_INTERFACE_MODE_HSGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ page = 0x29;
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ page = 0x2f;
+ break;
+ default:
+ pr_err("%s: unsupported PHY mode\n", __func__);
+ return;
+ }
+
+ rtl9300_sds_field_w(sds, page, 0x1, 15, 11, pre_amp);
+ rtl9300_sds_field_w(sds, page, 0x7, 0, 0, pre_en);
+ rtl9300_sds_field_w(sds, page, 0x7, 8, 4, main_amp);
+ rtl9300_sds_field_w(sds, page, 0x6, 4, 0, post_amp);
+ rtl9300_sds_field_w(sds, page, 0x7, 3, 3, post_en);
+ rtl9300_sds_field_w(sds, page, 0x18, 15, 12, impedance);
+}
+
+/*
+ * Wait for clock ready, this assumes the SerDes is in XGMII mode
+ * timeout is in ms
+ */
+int rtl9300_sds_clock_wait(int timeout)
+{
+ u32 v;
+ unsigned long start = jiffies;
+
+ do {
+ rtl9300_sds_field_w(2, 0x1f, 0x2, 15, 0, 53);
+ v = rtl9300_sds_field_r(2, 0x1f, 20, 5, 4);
+ if (v == 3)
+ return 0;
+ } while (jiffies < start + (HZ / 1000) * timeout);
+
+ return 1;
+}
+
+// dal_longan_sds_clk_routine
+void rtl9300_sds_clock_config(void)
+{
+ // Power down SDS
+ rtl9300_sds_field_w(2, 0x20, 0, 7, 6, 0x3);
+ rtl9300_sds_field_w(3, 0x20, 0, 7, 6, 0x3);
+
+ // Wait for clock on first 10GBit SerDes, timeout 2million mico seconds
+ if(!rtl9300_sds_clock_wait(2 * 1000)) {
+ rtl9300_sds_field_w(2, 0x20, 0, 7, 6, 0);
+ rtl9300_sds_field_w(3, 0x20, 0, 7, 6, 0);
+ pr_err("%s: unable to configure SerDes clock\n", __func__);
+ return;
+ }
+
+ rtl9300_sds_field_w(2, 0x20, 0x2, 11, 10, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x20, 0x2, 11, 10, 0x0);
+ mdelay(10);
+ rtl9300_sds_field_w(10, 0x20, 2, 11, 10, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(10, 0x20, 2, 11, 10, 0x0);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x1f, 15, 5, 4, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x20, 0x2, 11, 10, 0x3);
+ mdelay(10);
+ rtl9300_sds_field_w(10, 0x20, 0x2, 11, 10, 0x3);
+ mdelay(10);
+
+ /* Turn up the clock to 320MHz */
+ rtl9300_sds_field_w(2, 0x2f, 23, 1, 0, 0x1);
+ rtl9300_sds_field_w(2, 0x2f, 29, 11, 2, 0x190);
+ rtl9300_sds_field_w(2, 0x2f, 30, 15, 6, 0x190);
+ rtl9300_sds_field_w(2, 0x2f, 31, 15, 8, 0x30);
+ rtl9300_sds_field_w(2, 0x2f, 29, 15, 14, 0x0);
+ rtl9300_sds_field_w(2, 0x2f, 26, 8, 8, 0x0);
+
+ /* Toggle CMU */
+ rtl9300_sds_field_w(2, 0x21, 11, 3, 3, 1);
+ rtl9300_sds_field_w(2, 0x21, 11, 2, 2, 0);
+ rtl9300_sds_field_w(2, 0x21, 11, 2, 2, 1);
+ rtl9300_sds_field_w(2, 0x21, 11, 3, 3, 0);
+
+ mdelay(500);
+
+ /* Reset ICG */
+ rtl9300_sds_field_w(2, 0x1f, 15, 5, 4, 0);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x1f, 15, 5, 4, 1);
+ mdelay(10);
+
+ /* Turn down clock to 257MHz */
+ rtl9300_sds_field_w(2, 0x2f, 23, 1, 0, 0x0);
+ rtl9300_sds_field_w(2, 0x2f, 29, 11, 2, 0x1b8);
+ rtl9300_sds_field_w(2, 0x2f, 30, 15, 6, 0x1b8);
+ rtl9300_sds_field_w(2, 0x2f, 31, 15, 8, 0x35);
+ rtl9300_sds_field_w(2, 0x2f, 29, 15, 14, 0x1);
+ rtl9300_sds_field_w(2, 0x2f, 26, 8, 8, 0x1);
+
+ /* Toggle CMU again */
+ rtl9300_sds_field_w(2, 0x21, 11, 3, 3, 1);
+ rtl9300_sds_field_w(2, 0x21, 11, 2, 2, 0);
+ rtl9300_sds_field_w(2, 0x21, 11, 2, 2, 1);
+ rtl9300_sds_field_w(2, 0x21, 11, 3, 3, 0);
+ mdelay(500);
+
+ /* Reset ICG*/
+ rtl9300_sds_field_w(2, 0x1f, 15, 5, 4, 0);
+ mdelay(10);
+
+ /*toggle ber-notify*/
+ rtl9300_sds_field_w(2, 0x20, 2, 13, 12, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x20, 2, 13, 12, 0x0);
+ mdelay(10);
+ rtl9300_sds_field_w(10, 0x20, 2, 13, 12, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(10, 0x20, 2, 13, 12, 0x0);
+ mdelay(10);
+
+ /* Power up SerDes again */
+ rtl9300_sds_field_w(2, 0x20, 0, 7, 6, 0);
+ rtl9300_sds_field_w(3, 0x20, 0, 7, 6, 0);
+
+ rtl8218d_reset(8);
+ rtl8218d_reset(16);
+
+ /* SerDes RX reset */
+ rtl9300_sds_field_w(2, 0x2e, 0x15, 4, 4, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(2, 0x2e, 0x15, 4, 4, 0x0);
+
+ mdelay(10);
+
+ rtl9300_sds_field_w(3, 0x2e, 0x15, 4, 4, 0x1);
+ mdelay(10);
+ rtl9300_sds_field_w(3, 0x2e, 0x15, 4, 4, 0x0);
+}
+
int rtl9300_configure_serdes(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
int phy_addr = phydev->mdio.addr;
int sds_num = 0;
- int v;
-
- phydev_info(phydev, "Configuring internal RTL9300 SERDES\n");
+ u32 mode, v;
switch (phy_addr) {
case 26:
@@ -1511,6 +2088,48 @@ int rtl9300_configure_serdes(struct phy_device *phydev)
return -EINVAL;
}
+ // Maybe use dal_longan_sds_init
+
+ // ----> dal_longan_sds_mode_set
+ phydev_info(phydev, "Configuring internal RTL9300 SERDES %d\n", sds_num);
+
+ pr_info("%s: enabling link as speed 1G, link down\n", __func__);
+ mode = sw_r32(RTL930X_MAC_FORCE_MODE_CTRL + 4 * phy_addr);
+ pr_info("%s, RTL930X_MAC_FORCE_MODE_CTRL : %08x\n", __func__, v);
+ mode |= BIT(0); // MAC enabled
+ mode &= ~(7 << 3);
+ mode |= 2 << 3; // Speed = 1G
+ mode &= ~BIT(1); // Link is down
+ sw_w32(mode, RTL930X_MAC_FORCE_MODE_CTRL + 4 * phy_addr);
+ mdelay(20);
+
+ rtl9300_sds_rst(sds_num, 0x04);
+
+ // Enable 1GBit PHY
+ v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG);
+ v |= BIT(PHY_POWER_BIT);
+ rtl930x_write_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG, v);
+
+ // Enable 10GBit PHY
+ v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG);
+ v |= BIT(PHY_POWER_BIT);
+ rtl930x_write_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG, v);
+
+ rtl9300_force_sds_mode(sds_num, PHY_INTERFACE_MODE_NA);
+
+ // Do RX calibration
+ // Select rtl9300_rxCaliConf_serdes_myParam if SERDES
+ // otherwise rtl9300_rxCaliConf_phy_myParam
+ rtl9300_do_rx_calibration(sds_num);
+
+ rtl9300_sds_tx_config(sds_num, PHY_INTERFACE_MODE_1000BASEX);
+
+ rtl9300_force_sds_mode(sds_num, PHY_INTERFACE_MODE_1000BASEX);
+
+ rtl9300_sds_clock_config();
+
+ // <----- dal_longan_sds_mode_set
+
/* Set default Medium to fibre */
v = rtl930x_read_sds_phy(sds_num, 0x1f, 11);
if (v < 0) {
@@ -1521,8 +2140,9 @@ int rtl9300_configure_serdes(struct phy_device *phydev)
rtl930x_write_sds_phy(sds_num, 0x1f, 11, v);
// TODO: this needs to be configurable via ethtool/.dts
- pr_info("Setting 10G/1000BX auto fibre medium\n");
- rtl9300_sds_rst(sds_num, 0x1b);
+ pr_info("%s: setting 10G/1000BX auto fibre medium\n", __func__);
+// rtl9300_sds_rst(sds_num, 0x1b);
+ // Set to 1000BX
// TODO: Apply patch set for fibre type
@@ -1624,7 +2244,7 @@ static int rtl8218d_phy_probe(struct phy_device *phydev)
struct rtl838x_phy_priv *priv;
int addr = phydev->mdio.addr;
- pr_info("%s: id: %d\n", __func__, addr);
+ pr_debug("%s: id: %d\n", __func__, addr);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1652,6 +2272,8 @@ static int rtl8226_phy_probe(struct phy_device *phydev)
priv->name = "RTL8226";
+ // TODO: configure PHY, in particular MDI pin swap
+
return 0;
}
@@ -1791,7 +2413,10 @@ static struct phy_driver rtl83xx_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
- }, {
+ .set_eee = rtl8218d_set_eee,
+ .get_eee = rtl8218d_get_eee,
+ },
+ {
PHY_ID_MATCH_MODEL(PHY_ID_RTL8226),
.name = "REALTEK RTL8226",
.features = PHY_GBIT_FEATURES,
@@ -1805,6 +2430,8 @@ static struct phy_driver rtl83xx_phy_driver[] = {
.write_page = rtl8226_write_page,
.read_status = rtl8226_read_status,
.config_aneg = rtl8226_config_aneg,
+ .set_eee = rtl8226_set_eee,
+ .get_eee = rtl8226_get_eee,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_RTL8218B_I),
@@ -1853,6 +2480,7 @@ static struct phy_driver rtl83xx_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_RTL9300_I),
.name = "REALTEK RTL9300 SERDES",
+ .phy_id_mask = RTL9300_PHY_ID_MASK,
.features = PHY_GBIT_FIBRE_FEATURES,
.probe = rtl9300_serdes_probe,
.suspend = genphy_suspend,
diff --git a/target/linux/realtek/files-5.4/include/linux/rtl838x.h b/target/linux/realtek/files-5.4/include/linux/rtl838x.h
new file mode 100644
index 0000000000..b5d9bfba12
--- /dev/null
+++ b/target/linux/realtek/files-5.4/include/linux/rtl838x.h
@@ -0,0 +1,1072 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _RTL838X_H
+#define _RTL838X_H
+
+#include <net/dsa.h>
+#include <linux/rhashtable.h>
+
+/*
+ * Register definition
+ */
+#define RTL838X_MAC_PORT_CTRL(port) (0xd560 + (((port) << 7)))
+#define RTL839X_MAC_PORT_CTRL(port) (0x8004 + (((port) << 7)))
+#define RTL930X_MAC_PORT_CTRL(port) (0x3260 + (((port) << 6)))
+#define RTL930X_MAC_L2_PORT_CTRL(port) (0x3268 + (((port) << 6)))
+#define RTL931X_MAC_PORT_CTRL(port) (0x6004 + (((port) << 7)))
+
+#define RTL838X_RST_GLB_CTRL_0 (0x003c)
+
+#define RTL838X_MAC_FORCE_MODE_CTRL (0xa104)
+#define RTL839X_MAC_FORCE_MODE_CTRL (0x02bc)
+#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C)
+#define RTL931X_MAC_FORCE_MODE_CTRL (0x0DCC)
+
+#define RTL838X_DMY_REG31 (0x3b28)
+#define RTL838X_SDS_MODE_SEL (0x0028)
+#define RTL838X_SDS_CFG_REG (0x0034)
+#define RTL838X_INT_MODE_CTRL (0x005c)
+#define RTL838X_CHIP_INFO (0x00d8)
+#define RTL839X_CHIP_INFO (0x0ff4)
+#define RTL838X_PORT_ISO_CTRL(port) (0x4100 + ((port) << 2))
+#define RTL839X_PORT_ISO_CTRL(port) (0x1400 + ((port) << 3))
+
+/* Packet statistics */
+#define RTL838X_STAT_PORT_STD_MIB (0x1200)
+#define RTL839X_STAT_PORT_STD_MIB (0xC000)
+#define RTL930X_STAT_PORT_MIB_CNTR (0x0664)
+#define RTL838X_STAT_RST (0x3100)
+#define RTL839X_STAT_RST (0xF504)
+#define RTL930X_STAT_RST (0x3240)
+#define RTL931X_STAT_RST (0x7ef4)
+#define RTL838X_STAT_PORT_RST (0x3104)
+#define RTL839X_STAT_PORT_RST (0xF508)
+#define RTL930X_STAT_PORT_RST (0x3244)
+#define RTL931X_STAT_PORT_RST (0x7ef8)
+#define RTL838X_STAT_CTRL (0x3108)
+#define RTL839X_STAT_CTRL (0x04cc)
+#define RTL930X_STAT_CTRL (0x3248)
+#define RTL931X_STAT_CTRL (0x5720)
+
+/* Registers of the internal Serdes of the 8390 */
+#define RTL8390_SDS0_1_XSG0 (0xA000)
+#define RTL8390_SDS0_1_XSG1 (0xA100)
+#define RTL839X_SDS12_13_XSG0 (0xB800)
+#define RTL839X_SDS12_13_XSG1 (0xB900)
+#define RTL839X_SDS12_13_PWR0 (0xb880)
+#define RTL839X_SDS12_13_PWR1 (0xb980)
+
+/* Registers of the internal Serdes of the 8380 */
+#define RTL838X_SDS4_FIB_REG0 (0xF800)
+#define RTL838X_SDS4_REG28 (0xef80)
+#define RTL838X_SDS4_DUMMY0 (0xef8c)
+#define RTL838X_SDS5_EXT_REG6 (0xf18c)
+
+/* VLAN registers */
+#define RTL838X_VLAN_CTRL (0x3A74)
+#define RTL838X_VLAN_PROFILE(idx) (0x3A88 + ((idx) << 2))
+#define RTL838X_VLAN_PORT_EGR_FLTR (0x3A84)
+#define RTL838X_VLAN_PORT_PB_VLAN (0x3C00)
+#define RTL838X_VLAN_PORT_IGR_FLTR (0x3A7C)
+#define RTL838X_VLAN_PORT_TAG_STS_CTRL (0xA530)
+
+#define RTL839X_VLAN_PROFILE(idx) (0x25C0 + (((idx) << 3)))
+#define RTL839X_VLAN_CTRL (0x26D4)
+#define RTL839X_VLAN_PORT_PB_VLAN (0x26D8)
+#define RTL839X_VLAN_PORT_IGR_FLTR (0x27B4)
+#define RTL839X_VLAN_PORT_EGR_FLTR (0x27C4)
+#define RTL839X_VLAN_PORT_TAG_STS_CTRL (0x6828)
+
+#define RTL930X_VLAN_PROFILE_SET(idx) (0x9c60 + (((idx) * 20)))
+#define RTL930X_VLAN_CTRL (0x82D4)
+#define RTL930X_VLAN_PORT_PB_VLAN (0x82D8)
+#define RTL930X_VLAN_PORT_IGR_FLTR (0x83C0)
+#define RTL930X_VLAN_PORT_EGR_FLTR (0x83C8)
+#define RTL930X_VLAN_PORT_TAG_STS_CTRL (0xCE24)
+
+#define RTL931X_VLAN_PROFILE_SET(idx) (0x9800 + (((idx) * 28)))
+#define RTL931X_VLAN_CTRL (0x94E4)
+#define RTL931X_VLAN_PORT_IGR_FLTR (0x96B4)
+#define RTL931X_VLAN_PORT_EGR_FLTR (0x96C4)
+#define RTL931X_VLAN_PORT_TAG_CTRL (0x4860)
+
+/* Table access registers */
+#define RTL838X_TBL_ACCESS_CTRL_0 (0x6914)
+#define RTL838X_TBL_ACCESS_DATA_0(idx) (0x6918 + ((idx) << 2))
+#define RTL838X_TBL_ACCESS_CTRL_1 (0xA4C8)
+#define RTL838X_TBL_ACCESS_DATA_1(idx) (0xA4CC + ((idx) << 2))
+
+#define RTL839X_TBL_ACCESS_CTRL_0 (0x1190)
+#define RTL839X_TBL_ACCESS_DATA_0(idx) (0x1194 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_CTRL_1 (0x6b80)
+#define RTL839X_TBL_ACCESS_DATA_1(idx) (0x6b84 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
+#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
+
+#define RTL930X_TBL_ACCESS_CTRL_0 (0xB340)
+#define RTL930X_TBL_ACCESS_DATA_0(idx) (0xB344 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_CTRL_1 (0xB3A0)
+#define RTL930X_TBL_ACCESS_DATA_1(idx) (0xB3A4 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_CTRL_2 (0xCE04)
+#define RTL930X_TBL_ACCESS_DATA_2(i) (0xCE08 + (((i) << 2)))
+
+#define RTL931X_TBL_ACCESS_CTRL_0 (0x8500)
+#define RTL931X_TBL_ACCESS_DATA_0(idx) (0x8508 + ((idx) << 2))
+#define RTL931X_TBL_ACCESS_CTRL_1 (0x40C0)
+#define RTL931X_TBL_ACCESS_DATA_1(idx) (0x40C4 + ((idx) << 2))
+#define RTL931X_TBL_ACCESS_CTRL_2 (0x8528)
+#define RTL931X_TBL_ACCESS_DATA_2(i) (0x852C + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_3 (0x0200)
+#define RTL931X_TBL_ACCESS_DATA_3(i) (0x0204 + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_4 (0x20DC)
+#define RTL931X_TBL_ACCESS_DATA_4(i) (0x20E0 + (((i) << 2)))
+#define RTL931X_TBL_ACCESS_CTRL_5 (0x7E1C)
+#define RTL931X_TBL_ACCESS_DATA_5(i) (0x7E20 + (((i) << 2)))
+
+/* MAC handling */
+#define RTL838X_MAC_LINK_STS (0xa188)
+#define RTL839X_MAC_LINK_STS (0x0390)
+#define RTL930X_MAC_LINK_STS (0xCB10)
+#define RTL931X_MAC_LINK_STS (0x0EC0)
+#define RTL838X_MAC_LINK_SPD_STS(p) (0xa190 + (((p >> 4) << 2)))
+#define RTL839X_MAC_LINK_SPD_STS(p) (0x03a0 + (((p >> 4) << 2)))
+#define RTL930X_MAC_LINK_SPD_STS(p) (0xCB18 + (((p >> 3) << 2)))
+#define RTL931X_MAC_LINK_SPD_STS(p) (0x0ED0 + (((p >> 3) << 2)))
+#define RTL838X_MAC_LINK_DUP_STS (0xa19c)
+#define RTL839X_MAC_LINK_DUP_STS (0x03b0)
+#define RTL930X_MAC_LINK_DUP_STS (0xCB28)
+#define RTL931X_MAC_LINK_DUP_STS (0x0EF0)
+#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0)
+#define RTL839X_MAC_TX_PAUSE_STS (0x03b8)
+#define RTL930X_MAC_TX_PAUSE_STS (0xCB2C)
+#define RTL931X_MAC_TX_PAUSE_STS (0x0EF8)
+#define RTL838X_MAC_RX_PAUSE_STS (0xa1a4)
+#define RTL839X_MAC_RX_PAUSE_STS (0x03c0)
+#define RTL930X_MAC_RX_PAUSE_STS (0xCB30)
+#define RTL931X_MAC_RX_PAUSE_STS (0x0F00)
+#define RTL930X_MAC_LINK_MEDIA_STS (0xCB14)
+
+
+/* MAC link state bits */
+#define RTL830X_FORCE_EN (1 << 0)
+#define RTL830X_FORCE_LINK_EN (1 << 1)
+#define RTL830X_NWAY_EN (1 << 2)
+#define RTL830X_DUPLEX_MODE (1 << 3)
+#define RTL830X_TX_PAUSE_EN (1 << 6)
+#define RTL830X_RX_PAUSE_EN (1 << 7)
+#define RTL830X_MAC_FORCE_FC_EN (1 << 8)
+
+#define RTL839X_FORCE_EN (1 << 0)
+#define RTL839X_FORCE_LINK_EN (1 << 1)
+#define RTL839X_DUPLEX_MODE (1 << 2)
+#define RTL839X_TX_PAUSE_EN (1 << 5)
+#define RTL839X_RX_PAUSE_EN (1 << 6)
+#define RTL839X_MAC_FORCE_FC_EN (1 << 7)
+
+#define RTL930X_FORCE_EN (1 << 0)
+#define RTL930X_FORCE_LINK_EN (1 << 1)
+#define RTL930X_DUPLEX_MODE (1 << 2)
+#define RTL930X_TX_PAUSE_EN (1 << 7)
+#define RTL930X_RX_PAUSE_EN (1 << 8)
+#define RTL930X_MAC_FORCE_FC_EN (1 << 9)
+
+#define RTL931X_FORCE_EN (1 << 0)
+#define RTL931X_FORCE_LINK_EN (1 << 1)
+#define RTL931X_DUPLEX_MODE (1 << 2)
+#define RTL931X_MAC_FORCE_FC_EN (1 << 4)
+#define RTL931X_TX_PAUSE_EN (1 << 16)
+#define RTL931X_RX_PAUSE_EN (1 << 17)
+
+/* EEE */
+#define RTL838X_MAC_EEE_ABLTY (0xa1a8)
+#define RTL838X_EEE_PORT_TX_EN (0x014c)
+#define RTL838X_EEE_PORT_RX_EN (0x0150)
+#define RTL838X_EEE_CLK_STOP_CTRL (0x0148)
+#define RTL838X_EEE_TX_TIMER_GIGA_CTRL (0xaa04)
+#define RTL838X_EEE_TX_TIMER_GELITE_CTRL (0xaa08)
+
+#define RTL839X_EEE_TX_TIMER_GELITE_CTRL (0x042C)
+#define RTL839X_EEE_TX_TIMER_GIGA_CTRL (0x0430)
+#define RTL839X_EEE_TX_TIMER_10G_CTRL (0x0434)
+#define RTL839X_EEE_CTRL(p) (0x8008 + ((p) << 7))
+#define RTL839X_MAC_EEE_ABLTY (0x03C8)
+
+#define RTL930X_MAC_EEE_ABLTY (0xCB34)
+#define RTL930X_EEE_CTRL(p) (0x3274 + ((p) << 6))
+#define RTL930X_EEEP_PORT_CTRL(p) (0x3278 + ((p) << 6))
+
+/* L2 functionality */
+#define RTL838X_L2_CTRL_0 (0x3200)
+#define RTL839X_L2_CTRL_0 (0x3800)
+#define RTL930X_L2_CTRL (0x8FD8)
+#define RTL931X_L2_CTRL (0xC800)
+#define RTL838X_L2_CTRL_1 (0x3204)
+#define RTL839X_L2_CTRL_1 (0x3804)
+#define RTL930X_L2_AGE_CTRL (0x8FDC)
+#define RTL931X_L2_AGE_CTRL (0xC804)
+#define RTL838X_L2_PORT_AGING_OUT (0x3358)
+#define RTL839X_L2_PORT_AGING_OUT (0x3b74)
+#define RTL930X_L2_PORT_AGE_CTRL (0x8FE0)
+#define RTL931X_L2_PORT_AGE_CTRL (0xc808)
+#define RTL838X_TBL_ACCESS_L2_CTRL (0x6900)
+#define RTL839X_TBL_ACCESS_L2_CTRL (0x1180)
+#define RTL930X_TBL_ACCESS_L2_CTRL (0xB320)
+#define RTL930X_TBL_ACCESS_L2_METHOD_CTRL (0xB324)
+#define RTL838X_TBL_ACCESS_L2_DATA(idx) (0x6908 + ((idx) << 2))
+#define RTL839X_TBL_ACCESS_L2_DATA(idx) (0x1184 + ((idx) << 2))
+#define RTL930X_TBL_ACCESS_L2_DATA(idx) (0xab08 + ((idx) << 2))
+#define RTL838X_L2_TBL_FLUSH_CTRL (0x3370)
+#define RTL839X_L2_TBL_FLUSH_CTRL (0x3ba0)
+#define RTL930X_L2_TBL_FLUSH_CTRL (0x9404)
+#define RTL931X_L2_TBL_FLUSH_CTRL (0xCD9C)
+
+#define RTL838X_L2_LRN_CONSTRT (0x329C)
+#define RTL838X_L2_PORT_LRN_CONSTRT (0x32A0)
+#define RTL839X_L2_LRN_CONSTRT (0x3910)
+#define RTL839X_L2_PORT_LRN_CONSTRT (0x3914)
+#define RTL930X_L2_LRN_CONSTRT_CTRL (0x909c)
+#define RTL838X_L2_FLD_PMSK (0x3288)
+#define RTL839X_L2_FLD_PMSK (0x38EC)
+#define RTL930X_L2_BC_FLD_PMSK (0x9068)
+#define RTL930X_L2_UNKN_UC_FLD_PMSK (0x9064)
+#define RTL838X_L2_LRN_CONSTRT_EN (0x3368)
+
+#define RTL838X_L2_PORT_NEW_SALRN(p) (0x328c + (((p >> 4) << 2)))
+#define RTL839X_L2_PORT_NEW_SALRN(p) (0x38F0 + (((p >> 4) << 2)))
+#define RTL930X_L2_PORT_SALRN(p) (0x8FEC + (((p >> 4) << 2)))
+#define RTL931X_L2_PORT_NEW_SALRN(p) (0xC820 + (((p >> 4) << 2)))
+#define RTL838X_L2_PORT_NEW_SA_FWD(p) (0x3294 + (((p >> 4) << 2)))
+#define RTL839X_L2_PORT_NEW_SA_FWD(p) (0x3900 + (((p >> 4) << 2)))
+#define RTL930X_L2_PORT_NEW_SA_FWD(p) (0x8FF4 + (((p / 10) << 2)))
+#define RTL931X_L2_PORT_NEW_SA_FWD(p) (0xC830 + (((p / 10) << 2)))
+
+#define RTL930X_ST_CTRL (0x8798)
+
+#define RTL930X_L2_PORT_SABLK_CTRL (0x905c)
+#define RTL930X_L2_PORT_DABLK_CTRL (0x9060)
+
+#define RTL838X_RMA_BPDU_FLD_PMSK (0x4348)
+#define RTL930X_RMA_BPDU_FLD_PMSK (0x9F18)
+#define RTL931X_RMA_BPDU_FLD_PMSK (0x8950)
+#define RTL839X_RMA_BPDU_FLD_PMSK (0x125C)
+
+#define RTL838X_L2_PORT_LM_ACT(p) (0x3208 + ((p) << 2))
+#define RTL838X_VLAN_PORT_FWD (0x3A78)
+#define RTL839X_VLAN_PORT_FWD (0x27AC)
+#define RTL930X_VLAN_PORT_FWD (0x834C)
+#define RTL838X_VLAN_FID_CTRL (0x3aa8)
+
+/* Port Mirroring */
+#define RTL838X_MIR_CTRL (0x5D00)
+#define RTL838X_MIR_DPM_CTRL (0x5D20)
+#define RTL838X_MIR_SPM_CTRL (0x5D10)
+
+#define RTL839X_MIR_CTRL (0x2500)
+#define RTL839X_MIR_DPM_CTRL (0x2530)
+#define RTL839X_MIR_SPM_CTRL (0x2510)
+#define RTL839X_MIR_SAMPLE_RATE_CTRL (0x2558)
+#define RTL839X_SFLOW_CTRL (0x2400)
+#define RTL839X_SFLOW_PORT_RATE_CTRL (0x2404)
+
+#define RTL930X_MIR_CTRL (0xA2A0)
+#define RTL930X_MIR_DPM_CTRL (0xA2C0)
+#define RTL930X_MIR_SPM_CTRL (0xA2B0)
+#define RTL930X_MIR_SAMPLE_RATE_CTRL (0xA2D0)
+#define RTL930X_SFLOW_CTRL (0xBEA0)
+#define RTL930X_SFLOW_PORT_RATE_CTRL (0xBEA4)
+
+#define RTL931X_MIR_CTRL (0xAF00)
+#define RTL931X_MIR_DPM_CTRL (0xAF30)
+#define RTL931X_MIR_SPM_CTRL (0xAF10)
+#define RTL931X_MIR_SAMPLE_RATE_CTRL (0xAF50)
+#define RTL931X_SFLOW_CTRL (0x8400)
+#define RTL931X_SFLOW_PORT_RATE_CTRL (0x8404)
+
+#define RTL931X_MIR_RSPAN_VLAN_CTRL (0x69A0)
+#define RTL931X_MIR_RSPAN_TX_CTRL (0x69B0)
+#define RTL931X_MIR_RSPAN_RX_TAG_RM_CTRL (0xAF5C)
+#define RTL931X_MIR_RSPAN_RX_TAG_EN_CTRL (0x2554)
+
+
+
+/* Storm/rate control and scheduling */
+#define RTL838X_STORM_CTRL (0x4700)
+#define RTL839X_STORM_CTRL (0x1800)
+#define RTL838X_STORM_CTRL_LB_CTRL(p) (0x4884 + (((p) << 2)))
+#define RTL838X_STORM_CTRL_BURST_PPS_0 (0x4874)
+#define RTL838X_STORM_CTRL_BURST_PPS_1 (0x4878)
+#define RTL838X_STORM_CTRL_BURST_0 (0x487c)
+#define RTL838X_STORM_CTRL_BURST_1 (0x4880)
+#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_0 (0x1804)
+#define RTL839X_STORM_CTRL_LB_TICK_TKN_CTRL_1 (0x1808)
+#define RTL838X_SCHED_CTRL (0xB980)
+#define RTL839X_SCHED_CTRL (0x60F4)
+#define RTL838X_SCHED_LB_TICK_TKN_CTRL_0 (0xAD58)
+#define RTL838X_SCHED_LB_TICK_TKN_CTRL_1 (0xAD5C)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL_0 (0x1804)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL_1 (0x1808)
+#define RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL (0x2000)
+#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0 (0x1604)
+#define RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1 (0x1608)
+#define RTL839X_SCHED_LB_TICK_TKN_CTRL (0x60F8)
+#define RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL (0x6200)
+#define RTL838X_SCHED_LB_THR (0xB984)
+#define RTL839X_SCHED_LB_THR (0x60FC)
+#define RTL838X_SCHED_P_EGR_RATE_CTRL(p) (0xC008 + (((p) << 7)))
+#define RTL838X_SCHED_Q_EGR_RATE_CTRL(p, q) (0xC00C + (p << 7) + (((q) << 2)))
+#define RTL838X_STORM_CTRL_PORT_BC_EXCEED (0x470C)
+#define RTL838X_STORM_CTRL_PORT_MC_EXCEED (0x4710)
+#define RTL838X_STORM_CTRL_PORT_UC_EXCEED (0x4714)
+#define RTL839X_STORM_CTRL_PORT_BC_EXCEED(p) (0x180c + (((p >> 5) << 2)))
+#define RTL839X_STORM_CTRL_PORT_MC_EXCEED(p) (0x1814 + (((p >> 5) << 2)))
+#define RTL839X_STORM_CTRL_PORT_UC_EXCEED(p) (0x181c + (((p >> 5) << 2)))
+#define RTL838X_STORM_CTRL_PORT_UC(p) (0x4718 + (((p) << 2)))
+#define RTL838X_STORM_CTRL_PORT_MC(p) (0x478c + (((p) << 2)))
+#define RTL838X_STORM_CTRL_PORT_BC(p) (0x4800 + (((p) << 2)))
+
+#define RTL839X_STORM_CTRL_PORT_UC_0(p) (0x185C + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_UC_1(p) (0x1860 + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_MC_0(p) (0x19FC + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_MC_1(p) (0x1a00 + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_BC_0(p) (0x1B9C + (((p) << 3)))
+#define RTL839X_STORM_CTRL_PORT_BC_1(p) (0x1BA0 + (((p) << 3)))
+
+#define RTL930X_STORM_CTRL_PORT_UC_0(p) (0x1A70 + (((p) << 3)))
+#define RTL930X_STORM_CTRL_PORT_UC_1(p) (0x1A74 + (((p) << 3)))
+#define RTL930X_STORM_CTRL_PORT_MC_0(p) (0x8B60 + (((p) << 3)))
+#define RTL930X_STORM_CTRL_PORT_MC_1(p) (0x8B64 + (((p) << 3)))
+#define RTL930X_STORM_CTRL_PORT_BC_0(p) (0x8C50 + (((p) << 3)))
+#define RTL930X_STORM_CTRL_PORT_BC_1(p) (0x8C54 + (((p) << 3)))
+
+#define RTL931X_STORM_CTRL_PORT_UC_0(p) (0xB014 + (((p) << 3)))
+#define RTL931X_STORM_CTRL_PORT_UC_1(p) (0xB018 + (((p) << 3)))
+#define RTL931X_STORM_CTRL_PORT_MC_0(p) (0xB1EC + (((p) << 3)))
+#define RTL931X_STORM_CTRL_PORT_MC_1(p) (0xB1F0 + (((p) << 3)))
+#define RTL931X_STORM_CTRL_PORT_BC_0(p) (0xB3C4 + (((p) << 3)))
+#define RTL931X_STORM_CTRL_PORT_BC_1(p) (0xB3C4 + (((p) << 3)))
+
+#define RTL839X_TBL_ACCESS_CTRL_2 (0x611C)
+#define RTL839X_TBL_ACCESS_DATA_2(i) (0x6120 + (((i) << 2)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p) (0x1618 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_10G_1(p) (0x161C + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_0(p) (0x1640 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_PORT_CTRL_1(p) (0x1644 + (((p) << 3)))
+#define RTL839X_IGR_BWCTRL_CTRL_LB_THR (0x1614)
+
+/* Link aggregation (Trunking) */
+
+#define TRUNK_DISTRIBUTION_ALGO_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_SIP_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_DIP_BIT 0x10
+#define TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT 0x20
+#define TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT 0x40
+#define TRUNK_DISTRIBUTION_ALGO_MASKALL 0x7F
+
+#define TRUNK_DISTRIBUTION_ALGO_L2_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_L2_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_L2_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_L2_VLAN_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_L2_MASKALL 0xF
+
+#define TRUNK_DISTRIBUTION_ALGO_L3_SPA_BIT 0x01
+#define TRUNK_DISTRIBUTION_ALGO_L3_SMAC_BIT 0x02
+#define TRUNK_DISTRIBUTION_ALGO_L3_DMAC_BIT 0x04
+#define TRUNK_DISTRIBUTION_ALGO_L3_VLAN_BIT 0x08
+#define TRUNK_DISTRIBUTION_ALGO_L3_SIP_BIT 0x10
+#define TRUNK_DISTRIBUTION_ALGO_L3_DIP_BIT 0x20
+#define TRUNK_DISTRIBUTION_ALGO_L3_SRC_L4PORT_BIT 0x40
+#define TRUNK_DISTRIBUTION_ALGO_L3_DST_L4PORT_BIT 0x80
+#define TRUNK_DISTRIBUTION_ALGO_L3_PROTO_BIT 0x100
+#define TRUNK_DISTRIBUTION_ALGO_L3_FLOW_LABEL_BIT 0x200
+#define TRUNK_DISTRIBUTION_ALGO_L3_MASKALL 0x3FF
+
+
+#define RTL930X_TRK_MBR_CTRL (0xA41C)
+//#define RTL930X_TRK_HASH_IDX_CTRL ()
+#define RTL930X_TRK_HASH_CTRL (0x9F80)
+
+
+#define RTL931X_TRK_MBR_CTRL (0xB8D0)
+//#define RTL931X_TRK_HASH_IDX_CTRL (0x3E20)
+#define RTL931X_TRK_HASH_CTRL (0xBA70)
+
+#define RTL838X_TRK_MBR_CTR (0x3E00)
+#define RTL838X_TRK_HASH_IDX_CTRL (0x3E20)
+#define RTL838X_TRK_HASH_CTRL (0x3E24)
+
+#define RTL839X_TRK_MBR_CTR (0x2200)
+#define RTL839X_TRK_HASH_IDX_CTRL (0x2280)
+#define RTL839X_TRK_HASH_CTRL (0x2284)
+
+/* Attack prevention */
+#define RTL838X_ATK_PRVNT_PORT_EN (0x5B00)
+#define RTL838X_ATK_PRVNT_CTRL (0x5B04)
+#define RTL838X_ATK_PRVNT_ACT (0x5B08)
+#define RTL838X_ATK_PRVNT_STS (0x5B1C)
+
+/* 802.1X */
+#define RTL838X_SPCL_TRAP_EAPOL_CTRL (0x6988)
+#define RTL838X_SPCL_TRAP_ARP_CTRL (0x698C)
+#define RTL838X_SPCL_TRAP_IGMP_CTRL (0x6984)
+#define RTL838X_SPCL_TRAP_IPV6_CTRL (0x6994)
+#define RTL838X_SPCL_TRAP_SWITCH_MAC_CTRL (0x6998)
+#define RTL838X_SPCL_TRAP_CTRL (0x6980)
+
+#define RTL839X_SPCL_TRAP_CTRL (0x1054)
+#define RTL839X_SPCL_TRAP_EAPOL_CTRL (0x105C)
+#define RTL839X_SPCL_TRAP_ARP_CTRL (0x1060)
+#define RTL839X_SPCL_TRAP_IGMP_CTRL (0x1058)
+#define RTL839X_SPCL_TRAP_IPV6_CTRL (0x1064)
+#define RTL839X_SPCL_TRAP_SWITCH_MAC_CTRL (0x1068)
+#define RTL839X_SPCL_TRAP_SWITCH_IPV4_ADDR_CTRL (0x106C)
+#define RTL839X_SPCL_TRAP_CRC_CTRL (0x1070)
+
+/* special port action controls */
+/*
+ values:
+ 0 = FORWARD (default)
+ 1 = DROP
+ 2 = TRAP2CPU
+ 3 = FLOOD IN ALL PORT
+
+ Register encoding.
+ offset = CTRL + (port >> 4) << 2
+ value/mask = 3 << ((port&0xF) << 1)
+*/
+
+typedef enum {
+ BPDU = 0,
+ PTP,
+ PTP_UDP,
+ PTP_ETH2,
+ LLTP,
+ EAPOL,
+ GRATARP,
+} rma_ctrl_t;
+
+typedef enum {
+ FORWARD = 0,
+ DROP,
+ TRAP2CPU,
+ FLOODALL,
+ TRAP2MASTERCPU,
+ COPY2CPU,
+} action_type_t;
+
+
+#define RTL838X_RMA_BPDU_CTRL (0x4330)
+#define RTL839X_RMA_BPDU_CTRL (0x122C)
+#define RTL930X_RMA_BPDU_CTRL (0x9E7C)
+#define RTL931X_RMA_BPDU_CTRL (0x881C)
+
+#define RTL838X_RMA_PTP_CTRL (0x4338)
+#define RTL839X_RMA_PTP_CTRL (0x123C)
+#define RTL930X_RMA_PTP_CTRL (0x9E88)
+#define RTL931X_RMA_PTP_CTRL (0x8834)
+
+#define RTL838X_RMA_LLTP_CTRL (0x4340)
+#define RTL839X_RMA_LLTP_CTRL (0x124C)
+#define RTL930X_RMA_LLTP_CTRL (0x9EFC)
+#define RTL931X_RMA_LLTP_CTRL (0x8918)
+
+#define RTL930X_RMA_EAPOL_CTRL (0x9F08)
+#define RTL931X_RMA_EAPOL_CTRL (0x8930)
+#define RTL931X_TRAP_ARP_GRAT_PORT_ACT (0x8C04)
+
+/* QoS */
+#define RTL838X_QM_INTPRI2QID_CTRL (0x5F00)
+#define RTL839X_QM_INTPRI2QID_CTRL(q) (0x1110 + (q << 2))
+#define RTL839X_QM_PORT_QNUM(p) (0x1130 + (((p / 10) << 2)))
+#define RTL838X_PRI_SEL_PORT_PRI(p) (0x5FB8 + (((p / 10) << 2)))
+#define RTL839X_PRI_SEL_PORT_PRI(p) (0x10A8 + (((p / 10) << 2)))
+#define RTL838X_QM_PKT2CPU_INTPRI_MAP (0x5F10)
+#define RTL839X_QM_PKT2CPU_INTPRI_MAP (0x1154)
+#define RTL838X_PRI_SEL_CTRL (0x10E0)
+#define RTL839X_PRI_SEL_CTRL (0x10E0)
+#define RTL838X_PRI_SEL_TBL_CTRL(i) (0x5FD8 + (((i) << 2)))
+#define RTL839X_PRI_SEL_TBL_CTRL(i) (0x10D0 + (((i) << 2)))
+#define RTL838X_QM_PKT2CPU_INTPRI_0 (0x5F04)
+#define RTL838X_QM_PKT2CPU_INTPRI_1 (0x5F08)
+#define RTL838X_QM_PKT2CPU_INTPRI_2 (0x5F0C)
+#define RTL839X_OAM_CTRL (0x2100)
+#define RTL839X_OAM_PORT_ACT_CTRL(p) (0x2104 + (((p) << 2)))
+#define RTL839X_RMK_PORT_DEI_TAG_CTRL(p) (0x6A9C + (((p >> 5) << 2)))
+#define RTL839X_PRI_SEL_IPRI_REMAP (0x1080)
+#define RTL838X_PRI_SEL_IPRI_REMAP (0x5F8C)
+#define RTL839X_PRI_SEL_DEI2DP_REMAP (0x10EC)
+#define RTL839X_PRI_SEL_DSCP2DP_REMAP_ADDR(i) (0x10F0 + (((i >> 4) << 2)))
+#define RTL839X_RMK_DEI_CTRL (0x6AA4)
+#define RTL839X_WRED_PORT_THR_CTRL(i) (0x6084 + ((i) << 2))
+#define RTL839X_WRED_QUEUE_THR_CTRL(q, i) (0x6090 + ((q) * 12) + ((i) << 2))
+#define RTL838X_PRI_DSCP_INVLD_CTRL0 (0x5FE8)
+#define RTL838X_RMK_IPRI_CTRL (0xA460)
+#define RTL838X_RMK_OPRI_CTRL (0xA464)
+#define RTL838X_SCHED_P_TYPE_CTRL(p) (0xC04C + ((p) << 7))
+#define RTL838X_SCHED_LB_CTRL(p) (0xC004 + ((p) << 7))
+#define RTL838X_FC_P_EGR_DROP_CTRL(p) (0x6B1C + ((p) << 2))
+
+/* Debug features */
+#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8)
+
+/* Packet Inspection Engine */
+#define RTL838X_METER_GLB_CTRL (0x4B08)
+#define RTL839X_METER_GLB_CTRL (0x1300)
+#define RTL930X_METER_GLB_CTRL (0xa0a0)
+#define RTL839X_ACL_CTRL (0x1288)
+#define RTL838X_ACL_BLK_LOOKUP_CTRL (0x6100)
+#define RTL839X_ACL_BLK_LOOKUP_CTRL (0x1280)
+#define RTL930X_PIE_BLK_LOOKUP_CTRL (0xa5a0)
+#define RTL838X_ACL_BLK_PWR_CTRL (0x6104)
+#define RTL839X_PS_ACL_PWR_CTRL (0x049c)
+#define RTL838X_ACL_BLK_TMPLTE_CTRL(block) (0x6108 + ((block) << 2))
+#define RTL839X_ACL_BLK_TMPLTE_CTRL(block) (0x128c + ((block) << 2))
+#define RTL930X_PIE_BLK_TMPLTE_CTRL(block) (0xa624 + ((block) << 2))
+#define RTL838X_ACL_BLK_GROUP_CTRL (0x615C)
+#define RTL839X_ACL_BLK_GROUP_CTRL (0x12ec)
+#define RTL838X_ACL_CLR_CTRL (0x6168)
+#define RTL839X_ACL_CLR_CTRL (0x12fc)
+#define RTL930X_PIE_CLR_CTRL (0xa66c)
+#define RTL838X_DMY_REG27 (0x3378)
+#define RTL838X_ACL_PORT_LOOKUP_CTRL(p) (0x616C + (((p) << 2)))
+#define RTL930X_ACL_PORT_LOOKUP_CTRL(p) (0xA784 + (((p) << 2)))
+#define RTL930X_PIE_BLK_PHASE_CTRL (0xA5A4)
+
+// PIE actions
+#define PIE_ACT_COPY_TO_PORT 2
+#define PIE_ACT_REDIRECT_TO_PORT 4
+#define PIE_ACT_ROUTE_UC 6
+#define PIE_ACT_VID_ASSIGN 0
+
+// L3 actions
+#define L3_FORWARD 0
+#define L3_DROP 1
+#define L3_TRAP2CPU 2
+#define L3_COPY2CPU 3
+#define L3_TRAP2MASTERCPU 4
+#define L3_COPY2MASTERCPU 5
+#define L3_HARDDROP 6
+
+// Route actions
+#define ROUTE_ACT_FORWARD 0
+#define ROUTE_ACT_TRAP2CPU 1
+#define ROUTE_ACT_COPY2CPU 2
+#define ROUTE_ACT_DROP 3
+
+/* L3 Routing */
+#define RTL839X_ROUTING_SA_CTRL 0x6afc
+#define RTL930X_L3_HOST_TBL_CTRL (0xAB48)
+#define RTL930X_L3_IPUC_ROUTE_CTRL (0xAB4C)
+#define RTL930X_L3_IP6UC_ROUTE_CTRL (0xAB50)
+#define RTL930X_L3_IPMC_ROUTE_CTRL (0xAB54)
+#define RTL930X_L3_IP6MC_ROUTE_CTRL (0xAB58)
+#define RTL930X_L3_IP_MTU_CTRL(i) (0xAB5C + ((i >> 1) << 2))
+#define RTL930X_L3_IP6_MTU_CTRL(i) (0xAB6C + ((i >> 1) << 2))
+#define RTL930X_L3_HW_LU_KEY_CTRL (0xAC9C)
+#define RTL930X_L3_HW_LU_KEY_IP_CTRL (0xACA0)
+#define RTL930X_L3_HW_LU_CTRL (0xACC0)
+#define RTL930X_L3_IP_ROUTE_CTRL 0xab44
+
+#define MAX_VLANS 4096
+#define MAX_LAGS 16
+#define MAX_PRIOS 8
+#define RTL930X_PORT_IGNORE 0x3f
+#define MAX_MC_GROUPS 512
+#define UNKNOWN_MC_PMASK (MAX_MC_GROUPS - 1)
+#define PIE_BLOCK_SIZE 128
+#define MAX_PIE_ENTRIES (18 * PIE_BLOCK_SIZE)
+#define N_FIXED_FIELDS 12
+#define MAX_ROUTES 512
+#define MAX_HOST_ROUTES 1536
+#define MAX_COUNTERS 2048
+#define MAX_INTF_MTUS 8
+#define DEFAULT_MTU 1536
+#define MAX_INTERFACES 100
+#define MAX_ROUTER_MACS 64
+#define L3_EGRESS_DMACS 2048
+#define MAX_SMACS 64
+
+enum phy_type {
+ PHY_NONE = 0,
+ PHY_RTL838X_SDS = 1,
+ PHY_RTL8218B_INT = 2,
+ PHY_RTL8218B_EXT = 3,
+ PHY_RTL8214FC = 4,
+ PHY_RTL839X_SDS = 5,
+};
+
+struct rtl838x_port {
+ bool enable;
+ u64 pm;
+ u16 pvid;
+ bool eee_enabled;
+ enum phy_type phy;
+ bool is10G;
+ bool is2G5;
+ const struct dsa_port *dp;
+};
+
+struct rtl838x_vlan_info {
+ u64 untagged_ports;
+ u64 tagged_ports;
+ u8 profile_id;
+ bool hash_mc_fid;
+ bool hash_uc_fid;
+ u8 fid;
+};
+
+enum l2_entry_type {
+ L2_INVALID = 0,
+ L2_UNICAST = 1,
+ L2_MULTICAST = 2,
+ IP4_MULTICAST = 3,
+ IP6_MULTICAST = 4,
+};
+
+struct rtl838x_l2_entry {
+ u8 mac[6];
+ u16 vid;
+ u16 rvid;
+ u8 port;
+ bool valid;
+ enum l2_entry_type type;
+ bool is_static;
+ bool is_ip_mc;
+ bool is_ipv6_mc;
+ bool block_da;
+ bool block_sa;
+ bool suspended;
+ bool next_hop;
+ int age;
+ u8 trunk;
+ bool is_trunk;
+ u8 stack_dev;
+ u16 mc_portmask_index;
+ u32 mc_gip;
+ u32 mc_sip;
+ u16 mc_mac_index;
+ u16 nh_route_id;
+ bool nh_vlan_target; // Only RTL83xx: VLAN used for next hop: 0: inner, 1: outer
+};
+
+enum fwd_rule_action {
+ FWD_RULE_ACTION_NONE = 0,
+ FWD_RULE_ACTION_FWD = 1,
+};
+
+enum pie_phase {
+ PHASE_VACL = 0,
+ PHASE_IACL = 1,
+};
+
+/* Intermediate representation of a Packet Inspection Engine Rule
+ * as suggested by the Kernel's tc flower offload subsystem
+ * Field meaning is universal across SoC families, but data content is specific
+ * to SoC family (e.g. because of different port ranges) */
+struct pie_rule {
+ int id;
+ enum pie_phase phase; // Phase in which this template is applied
+ int packet_cntr; // ID of a packet counter assigned to this rule
+ int octet_cntr; // ID of a byte counter assigned to this rule
+ u32 last_packet_cnt;
+ u64 last_octet_cnt;
+
+ // The following are requirements for the pie template
+ bool is_egress;
+ bool is_ipv6; // This is a rule with IPv6 fields
+
+ // Fixed fields that are always matched against on RTL8380
+ u8 spmmask_fix;
+ u8 spn; // Source port number
+ bool stacking_port; // Source port is stacking port
+ bool mgnt_vlan; // Packet arrived on management VLAN
+ bool dmac_hit_sw; // The packet's destination MAC matches one of the device's
+ bool content_too_deep; // The content of the packet cannot be parsed: too many layers
+ bool not_first_frag; // Not the first IP frament
+ u8 frame_type_l4; // 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP
+ u8 frame_type; // 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6
+ bool otag_fmt; // 0: outer tag packet, 1: outer priority tag or untagged
+ bool itag_fmt; // 0: inner tag packet, 1: inner priority tag or untagged
+ bool otag_exist; // packet with outer tag
+ bool itag_exist; // packet with inner tag
+ bool frame_type_l2; // 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved
+ bool igr_normal_port; // Ingress port is not cpu or stacking port
+ u8 tid; // The template ID defining the what the templated fields mean
+
+ // Masks for the fields that are always matched against on RTL8380
+ u8 spmmask_fix_m;
+ u8 spn_m;
+ bool stacking_port_m;
+ bool mgnt_vlan_m;
+ bool dmac_hit_sw_m;
+ bool content_too_deep_m;
+ bool not_first_frag_m;
+ u8 frame_type_l4_m;
+ u8 frame_type_m;
+ bool otag_fmt_m;
+ bool itag_fmt_m;
+ bool otag_exist_m;
+ bool itag_exist_m;
+ bool frame_type_l2_m;
+ bool igr_normal_port_m;
+ u8 tid_m;
+
+ // Logical operations between rules, special rules for rule numbers apply
+ bool valid;
+ bool cond_not; // Matches when conditions not match
+ bool cond_and1; // And this rule 2n with the next rule 2n+1 in same block
+ bool cond_and2; // And this rule m in block 2n with rule m in block 2n+1
+ bool ivalid;
+
+ // Actions to be performed
+ bool drop; // Drop the packet
+ bool fwd_sel; // Forward packet: to port, portmask, dest route, next rule, drop
+ bool ovid_sel; // So something to outer vlan-id: shift, re-assign
+ bool ivid_sel; // Do something to inner vlan-id: shift, re-assign
+ bool flt_sel; // Filter the packet when sending to certain ports
+ bool log_sel; // Log the packet in one of the LOG-table counters
+ bool rmk_sel; // Re-mark the packet, i.e. change the priority-tag
+ bool meter_sel; // Meter the packet, i.e. limit rate of this type of packet
+ bool tagst_sel; // Change the ergress tag
+ bool mir_sel; // Mirror the packet to a Link Aggregation Group
+ bool nopri_sel; // Change the normal priority
+ bool cpupri_sel; // Change the CPU priority
+ bool otpid_sel; // Change Outer Tag Protocol Identifier (802.1q)
+ bool itpid_sel; // Change Inner Tag Protocol Identifier (802.1q)
+ bool shaper_sel; // Apply traffic shaper
+ bool mpls_sel; // MPLS actions
+ bool bypass_sel; // Bypass actions
+ bool fwd_sa_lrn; // Learn the source address when forwarding
+ bool fwd_mod_to_cpu; // Forward the modified VLAN tag format to CPU-port
+
+ // Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300
+ u64 spm; // Source Port Matrix
+ u16 otag; // Outer VLAN-ID
+ u8 smac[ETH_ALEN]; // Source MAC address
+ u8 dmac[ETH_ALEN]; // Destination MAC address
+ u16 ethertype; // Ethernet frame type field in ethernet header
+ u16 itag; // Inner VLAN-ID
+ u16 field_range_check;
+ u32 sip; // Source IP
+ struct in6_addr sip6; // IPv6 Source IP
+ u32 dip; // Destination IP
+ struct in6_addr dip6; // IPv6 Destination IP
+ u16 tos_proto; // IPv4: TOS + Protocol fields, IPv6: Traffic class + next header
+ u16 sport; // TCP/UDP source port
+ u16 dport; // TCP/UDP destination port
+ u16 icmp_igmp;
+ u16 tcp_info;
+ u16 dsap_ssap; // Destination / Source Service Access Point bytes (802.3)
+
+ u64 spm_m;
+ u16 otag_m;
+ u8 smac_m[ETH_ALEN];
+ u8 dmac_m[ETH_ALEN];
+ u8 ethertype_m;
+ u16 itag_m;
+ u16 field_range_check_m;
+ u32 sip_m;
+ struct in6_addr sip6_m; // IPv6 Source IP mask
+ u32 dip_m;
+ struct in6_addr dip6_m; // IPv6 Destination IP mask
+ u16 tos_proto_m;
+ u16 sport_m;
+ u16 dport_m;
+ u16 icmp_igmp_m;
+ u16 tcp_info_m;
+ u16 dsap_ssap_m;
+
+ // Data associated with actions
+ u8 fwd_act; // Type of forwarding action
+ // 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask
+ // 4: redirect to portid, 5: redirect to portmask
+ // 6: route, 7: vlan leaky (only 8380)
+ u16 fwd_data; // Additional data for forwarding action, e.g. destination port
+ u8 ovid_act;
+ u16 ovid_data; // Outer VLAN ID
+ u8 ivid_act;
+ u16 ivid_data; // Inner VLAN ID
+ u16 flt_data; // Filtering data
+ u16 log_data; // ID of packet or octet counter in LOG table, on RTL93xx
+ // unnecessary since PIE-Rule-ID == LOG-counter-ID
+ bool log_octets;
+ u8 mpls_act; // MPLS action type
+ u16 mpls_lib_idx; // MPLS action data
+
+ u16 rmk_data; // Data for remarking
+ u16 meter_data; // ID of meter for bandwidth control
+ u16 tagst_data;
+ u16 mir_data;
+ u16 nopri_data;
+ u16 cpupri_data;
+ u16 otpid_data;
+ u16 itpid_data;
+ u16 shaper_data;
+
+ // Bypass actions, ignored on RTL8380
+ bool bypass_all; // Not clear
+ bool bypass_igr_stp; // Bypass Ingress STP state
+ bool bypass_ibc_sc; // Bypass Ingress Bandwidth Control and Storm Control
+};
+
+struct rtl838x_l3_intf {
+ u16 vid;
+ u8 smac_idx;
+ u8 ip4_mtu_id;
+ u8 ip6_mtu_id;
+ u16 ip4_mtu;
+ u16 ip6_mtu;
+ u8 ttl_scope;
+ u8 hl_scope;
+ u8 ip4_icmp_redirect;
+ u8 ip6_icmp_redirect;
+ u8 ip4_pbr_icmp_redirect;
+ u8 ip6_pbr_icmp_redirect;
+};
+
+/*
+ * An entry in the RTL93XX SoC's ROUTER_MAC tables setting up a termination point
+ * for the L3 routing system. Packets arriving and matching an entry in this table
+ * will be considered for routing.
+ * Mask fields state whether the corresponding data fields matter for matching
+ */
+struct rtl93xx_rt_mac {
+ bool valid; // Valid or not
+ bool p_type; // Individual (0) or trunk (1) port
+ bool p_mask; // Whether the port type is used
+ u8 p_id;
+ u8 p_id_mask; // Mask for the port
+ u8 action; // Routing action performed: 0: FORWARD, 1: DROP, 2: TRAP2CPU
+ // 3: COPY2CPU, 4: TRAP2MASTERCPU, 5: COPY2MASTERCPU, 6: HARDDROP
+ u16 vid;
+ u16 vid_mask;
+ u64 mac; // MAC address used as source MAC in the routed packet
+ u64 mac_mask;
+};
+
+struct rtl83xx_nexthop {
+ u16 id; // ID: L3_NEXT_HOP table-index or route-index set in L2_NEXT_HOP
+ u32 dev_id;
+ u16 port;
+ u16 vid; // VLAN-ID for L2 table entry (saved from L2-UC entry)
+ u16 rvid; // Relay VID/FID for the L2 table entry
+ u64 mac; // The MAC address of the entry in the L2_NEXT_HOP table
+ u16 mac_id;
+ u16 l2_id; // Index of this next hop forwarding entry in L2 FIB table
+ u64 gw; // The gateway MAC address packets are forwarded to
+ int if_id; // Interface (into L3_EGR_INTF_IDX)
+};
+
+struct rtl838x_switch_priv;
+
+struct rtl83xx_flow {
+ unsigned long cookie;
+ struct rhash_head node;
+ struct rtl838x_switch_priv *priv;
+ struct pie_rule rule;
+ u32 flags;
+};
+
+struct rtl93xx_route_attr {
+ bool valid;
+ bool hit;
+ bool ttl_dec;
+ bool ttl_check;
+ bool dst_null;
+ bool qos_as;
+ u8 qos_prio;
+ u8 type;
+ u8 action;
+};
+
+struct rtl83xx_route {
+ u32 gw_ip; // IP of the route's gateway
+ u32 dst_ip; // IP of the destination net
+ struct in6_addr dst_ip6;
+ int prefix_len; // Network prefix len of the destination net
+ bool is_host_route;
+ int id; // ID number of this route
+ struct rhlist_head linkage;
+ u16 switch_mac_id; // Index into switch's own MACs, RTL839X only
+ struct rtl83xx_nexthop nh;
+ struct pie_rule pr;
+ struct rtl93xx_route_attr attr;
+};
+
+struct rtl838x_reg {
+ void (*mask_port_reg_be)(u64 clear, u64 set, int reg);
+ void (*set_port_reg_be)(u64 set, int reg);
+ u64 (*get_port_reg_be)(int reg);
+ void (*mask_port_reg_le)(u64 clear, u64 set, int reg);
+ void (*set_port_reg_le)(u64 set, int reg);
+ u64 (*get_port_reg_le)(int reg);
+ int (*port_iso_ctrl)(int p);
+ void (*traffic_enable)(int source, int dest);
+ void (*traffic_disable)(int source, int dest);
+ void (*traffic_set)(int source, u64 dest_matrix);
+ u64 (*traffic_get)(int source);
+ void (*exec_tbl0_cmd)(u32 cmd);
+ void (*exec_tbl1_cmd)(u32 cmd);
+ int (*tbl_access_data_0)(int i);
+ void (*vlan_tables_read)(u32 vlan, struct rtl838x_vlan_info *info);
+ void (*vlan_set_tagged)(u32 vlan, struct rtl838x_vlan_info *info);
+ void (*vlan_set_untagged)(u32 vlan, u64 portmask);
+ void (*vlan_profile_dump)(int index);
+ void (*vlan_profile_setup)(int profile);
+ void (*stp_get)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
+ void (*stp_set)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
+ int (*mac_force_mode_ctrl)(int port);
+ int (*mac_port_ctrl)(int port);
+ int (*l2_port_new_salrn)(int port);
+ int (*l2_port_new_sa_fwd)(int port);
+ int (*mac_link_spd_sts)(int port);
+ u64 (*read_l2_entry_using_hash)(u32 hash, u32 position, struct rtl838x_l2_entry *e);
+ void (*write_l2_entry_using_hash)(u32 hash, u32 pos, struct rtl838x_l2_entry *e);
+ u64 (*read_cam)(int idx, struct rtl838x_l2_entry *e);
+ void (*write_cam)(int idx, struct rtl838x_l2_entry *e);
+ int (*rtl838x_vlan_port_tag_sts_ctrl)(int port);
+ int (*trk_mbr_ctr)(int group);
+ void (*init_eee)(struct rtl838x_switch_priv *priv, bool enable);
+ void (*port_eee_set)(struct rtl838x_switch_priv *priv, int port, bool enable);
+ int (*eee_port_ability)(struct rtl838x_switch_priv *priv,
+ struct ethtool_eee *e, int port);
+ u64 (*l2_hash_seed)(u64 mac, u32 vid);
+ u32 (*l2_hash_key)(struct rtl838x_switch_priv *priv, u64 seed);
+ u64 (*read_mcast_pmask)(int idx);
+ void (*write_mcast_pmask)(int idx, u64 portmask);
+ void (*vlan_fwd_on_inner)(int port, bool is_set);
+ void (*pie_init)(struct rtl838x_switch_priv *priv);
+ int (*pie_rule_read)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
+ int (*pie_rule_write)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
+ int (*pie_rule_add)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
+ void (*pie_rule_rm)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
+ void (*route_read)(int idx, struct rtl83xx_route *rt);
+ void (*route_write)(int idx, struct rtl83xx_route *rt);
+ void (*host_route_write)(int idx, struct rtl83xx_route *rt);
+ int (*l3_setup)(struct rtl838x_switch_priv *priv);
+ void (*set_l3_nexthop)(int idx, u16 dmac_id, u16 interface);
+ void (*get_l3_nexthop)(int idx, u16 *dmac_id, u16 *interface);
+ void (*l2_learning_setup)(void);
+ u64 (*get_l3_egress_mac)(u32 idx);
+ void (*set_l3_egress_mac)(u32 idx, u64 mac);
+ int (*find_l3_slot)(struct rtl83xx_route *rt, bool must_exist);
+ int (*route_lookup_hw)(struct rtl83xx_route *rt);
+ void (*get_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m);
+ void (*set_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m);
+ void (*set_l3_egress_intf)(int idx, struct rtl838x_l3_intf *intf);
+ u32 (*packet_cntr_read)(int counter);
+ void (*packet_cntr_clear)(int counter);
+ void (*enable_learning)(int port, bool enable);
+ void (*enable_flood)(int port, bool enable);
+ void (*enable_mcast_flood)(int port, bool enable);
+ void (*enable_bcast_flood)(int port, bool enable);
+ void (*set_distribution_algorithm)(int group, int algoidx, u32 algomask);
+ void (*set_receive_management_action)(int port, rma_ctrl_t type, action_type_t action);
+ u32 stat_port_rst;
+ u32 stat_rst;
+ u32 stat_port_std_mib;
+ u32 l2_ctrl_0;
+ u32 l2_ctrl_1;
+ u32 l2_port_aging_out;
+ u32 smi_poll_ctrl;
+ u32 l2_tbl_flush_ctrl;
+ u32 isr_glb_src;
+ u32 isr_port_link_sts_chg;
+ u32 imr_port_link_sts_chg;
+ u32 imr_glb;
+ u32 mir_ctrl;
+ u32 mir_dpm;
+ u32 mir_spm;
+ u32 mac_link_sts;
+ u32 mac_link_dup_sts;
+ u32 mac_rx_pause_sts;
+ u32 mac_tx_pause_sts;
+ u32 vlan_port_egr_filter;
+ u32 vlan_port_igr_filter;
+ u32 vlan_port_pb;
+ u32 vlan_port_tag_sts_ctrl;
+ u32 rma_bpdu_fld_pmask;
+ u32 rma_bpdu_ctrl;
+ u32 rma_ptp_ctrl;
+ u32 rma_lltp_ctrl;
+ u32 rma_eapol_ctrl;
+ u32 rma_bpdu_ctrl_div;
+ u32 rma_ptp_ctrl_div;
+ u32 rma_lltp_ctrl_div;
+ u32 rma_eapol_ctrl_div;
+ u32 storm_ctrl_port_uc;
+ u32 storm_ctrl_port_mc;
+ u32 storm_ctrl_port_bc;
+ u32 storm_ctrl_port_uc_shift;
+ u32 storm_ctrl_port_mc_shift;
+ u32 storm_ctrl_port_bc_shift;
+ u32 vlan_ctrl;
+ u32 spcl_trap_eapol_ctrl;
+ u32 spcl_trap_arp_ctrl;
+ u32 spcl_trap_igmp_ctrl;
+ u32 spcl_trap_ipv6_ctrl;
+ u32 spcl_trap_switch_mac_ctrl;
+ u32 spcl_trap_switch_ipv4_addr_ctrl;
+ u32 spcl_trap_crc_ctrl;
+ u32 spcl_trap_ctrl;
+ u32 sflow_ctrl;
+ u32 sflow_port_rate_ctrl;
+ u32 trk_hash_idx_ctrl;
+ u32 trk_hash_ctrl;
+};
+
+
+
+struct rtl838x_switch_priv {
+ /* Switch operation */
+ struct dsa_switch *ds;
+ struct device *dev;
+ u16 id;
+ u16 family_id;
+ int mc_group_saves[MAX_MC_GROUPS];
+ char version;
+ struct rtl838x_port ports[57];
+ struct mutex reg_mutex; // Mutex for individual register manipulations
+ struct mutex pie_mutex; // Mutex for Packe Inspection Engine
+ int link_state_irq;
+ int mirror_group_ports[4];
+ struct mii_bus *mii_bus;
+ const struct rtl838x_reg *r;
+ u8 cpu_port;
+ u8 port_mask;
+ u8 port_width;
+ u8 port_ignore;
+ u64 irq_mask;
+ u32 fib_entries;
+ int l2_bucket_size;
+ struct dentry *dbgfs_dir;
+ int n_lags;
+ u64 lags_port_members[MAX_LAGS];
+ struct net_device *lag_devs[MAX_LAGS];
+ u32 lag_primary[MAX_LAGS];
+ u32 is_lagmember[57];
+ u64 lagmembers;
+ struct notifier_block nb; // TODO: change to different name
+ struct notifier_block ne_nb;
+ struct notifier_block fib_nb;
+ bool eee_enabled;
+ unsigned long int mc_group_bm[MAX_MC_GROUPS >> 5];
+ int n_pie_blocks;
+ struct rhashtable tc_ht;
+ unsigned long int pie_use_bm[MAX_PIE_ENTRIES >> 5];
+ struct rhltable routes;
+ unsigned long int route_use_bm[MAX_ROUTES >> 5];
+ unsigned long int host_route_use_bm[MAX_HOST_ROUTES >> 5];
+ int n_counters;
+ unsigned long int octet_cntr_use_bm[MAX_COUNTERS >> 5];
+ unsigned long int packet_cntr_use_bm[MAX_COUNTERS >> 4];
+ struct rtl838x_l3_intf *interfaces[MAX_INTERFACES];
+ u16 intf_mtus[MAX_INTF_MTUS];
+ int intf_mtu_count[MAX_INTF_MTUS];
+};
+
+void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv);
+void rtl930x_dbgfs_init(struct rtl838x_switch_priv *priv);
+
+#endif /* _RTL838X_H */
diff --git a/target/linux/realtek/files-5.4/net/dsa/tag_rtl83xx.c b/target/linux/realtek/files-5.4/net/dsa/tag_rtl83xx.c
new file mode 100644
index 0000000000..7c6d060210
--- /dev/null
+++ b/target/linux/realtek/files-5.4/net/dsa/tag_rtl83xx.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/tag_trailer.c - Trailer tag format handling
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/rtl838x.h>
+
+#include "dsa_priv.h"
+
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct sk_buff *nskb;
+ int padlen;
+ u8 *trailer;
+
+ /*
+ * We have to make sure that the trailer ends up as the very
+ * last 4 bytes of the packet. This means that we have to pad
+ * the packet to the minimum ethernet frame size, if necessary,
+ * before adding the trailer.
+ */
+ padlen = 0;
+ if (skb->len < 60)
+ padlen = 60 - skb->len;
+
+ nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+ skb_reserve(nskb, NET_IP_ALIGN);
+
+ skb_reset_mac_header(nskb);
+ skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
+ skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
+ skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
+ consume_skb(skb);
+
+ if (padlen) {
+ skb_put_zero(nskb, padlen);
+ }
+
+ trailer = skb_put(nskb, 4);
+ trailer[0] = 0x80;
+
+ trailer[1] = dp->index;
+ trailer[2] = 0x10;
+ trailer[3] = 0x00;
+
+ return nskb;
+}
+
+static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int i;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ u8 *trailer;
+ bool trunk = false;
+ int source_port;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ trailer = skb_tail_pointer(skb) - 4;
+
+// pr_info("lag member %X:%X:%X:%X found\n", trailer[0], trailer[1], trailer[2],trailer[3]);
+ if (trailer[0] != 0x80 || (trailer[1] & 0x80) != 0x00 ||
+ (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
+ return NULL;
+
+ if (trailer[1] & 0x40) { // forward
+ skb->offload_fwd_mark = 1;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct rtl838x_switch_priv *priv = ds->priv;
+ skb->offload_fwd_mark = 1;
+ if (priv->lagmembers & (1ULL << source_port)) {
+ pr_info("lag member %d found\n", source_port);
+ trunk = true;
+ }
+ }
+ source_port = trailer[1] & 0x3f;
+
+ if (trunk) {
+
+ /* The exact source port is not available in the tag,
+ * so we inject the frame directly on the upper
+ * team/bond.
+ */
+ skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
+ } else {
+ skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ }
+ if (!skb->dev)
+ return NULL;
+
+ if (pskb_trim_rcsum(skb, skb->len - 4))
+ return NULL;
+
+ return skb;
+}
+
+static const struct dsa_device_ops trailer_netdev_ops = {
+ .name = "rtl83xx",
+ .proto = DSA_TAG_PROTO_RTL83XX,
+ .xmit = trailer_xmit,
+ .rcv = trailer_rcv,
+ .overhead = 4,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL83XX);
+
+module_dsa_tag_driver(trailer_netdev_ops);
diff --git a/target/linux/realtek/image/Makefile b/target/linux/realtek/image/Makefile
index 18e5fedb9b..a4b2ea892c 100644
--- a/target/linux/realtek/image/Makefile
+++ b/target/linux/realtek/image/Makefile
@@ -6,8 +6,6 @@ include $(INCLUDE_DIR)/image.mk
KERNEL_LOADADDR = 0x80000000
KERNEL_ENTRY = 0x80000400
-DEVICE_VARS += ZYXEL_VERS
-
define Build/zyxel-vers
( echo VERS;\
for hw in $(1); do\
@@ -86,45 +84,47 @@ define Device/netgear_gs110tpp-v1
endef
TARGET_DEVICES += netgear_gs110tpp-v1
-define Device/zyxel_gs1900
+define Device/zyxel_gs1900-10hp
SOC := rtl8380
IMAGE_SIZE := 6976k
DEVICE_VENDOR := ZyXEL
- UIMAGE_MAGIC := 0x83800000
- KERNEL_INITRAMFS := kernel-bin | append-dtb | gzip | zyxel-vers $$$$(ZYXEL_VERS) | \
- uImage gzip
-endef
-
-define Device/zyxel_gs1900-10hp
- $(Device/zyxel_gs1900)
DEVICE_MODEL := GS1900-10HP
- ZYXEL_VERS := AAZI
+ UIMAGE_MAGIC := 0x83800000
+ KERNEL_INITRAMFS := kernel-bin | append-dtb | gzip | zyxel-vers AAZI | uImage gzip
endef
TARGET_DEVICES += zyxel_gs1900-10hp
-define Device/zyxel_gs1900-8
- $(Device/zyxel_gs1900)
- DEVICE_MODEL := GS1900-8
- ZYXEL_VERS := AAHH
-endef
-TARGET_DEVICES += zyxel_gs1900-8
-
define Device/zyxel_gs1900-8hp-v1
- $(Device/zyxel_gs1900)
+ SOC := rtl8380
+ IMAGE_SIZE := 6976k
+ DEVICE_VENDOR := ZyXEL
DEVICE_MODEL := GS1900-8HP
DEVICE_VARIANT := v1
- ZYXEL_VERS := AAHI
DEVICE_PACKAGES += lua-rs232
+ UIMAGE_MAGIC := 0x83800000
+ KERNEL_INITRAMFS := kernel-bin | append-dtb | gzip | zyxel-vers AAHI | uImage gzip
endef
TARGET_DEVICES += zyxel_gs1900-8hp-v1
define Device/zyxel_gs1900-8hp-v2
- $(Device/zyxel_gs1900)
+ SOC := rtl8380
+ IMAGE_SIZE := 6976k
+ DEVICE_VENDOR := ZyXEL
DEVICE_MODEL := GS1900-8HP
DEVICE_VARIANT := v2
- ZYXEL_VERS := AAHI
DEVICE_PACKAGES += lua-rs232
+ UIMAGE_MAGIC := 0x83800000
+ KERNEL_INITRAMFS := kernel-bin | append-dtb | gzip | zyxel-vers AAHI | uImage gzip
endef
TARGET_DEVICES += zyxel_gs1900-8hp-v2
+define Device/edgecore_ecs4100-12ph
+ SOC := rtl8392
+ IMAGE_SIZE := 14336k
+ DEVICE_VENDOR := Edgecore
+ DEVICE_MODEL := ECS4100-12PH
+ DEVICE_PACKAGES += lua-rs232
+endef
+TARGET_DEVICES += edgecore_ecs4100-12ph
+
$(eval $(call BuildImage))
diff --git a/target/linux/realtek/patches-5.4/100-dsa-lag.patch b/target/linux/realtek/patches-5.4/100-dsa-lag.patch
new file mode 100644
index 0000000000..3d1992e4cb
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/100-dsa-lag.patch
@@ -0,0 +1,3123 @@
+diff -urpN linux-5.4.137.old/drivers/net/bonding/bond_main.c linux-5.4.137/drivers/net/bonding/bond_main.c
+--- linux-5.4.137.old/drivers/net/bonding/bond_main.c 2021-08-04 14:05:38.055697349 +0700
++++ linux-5.4.137/drivers/net/bonding/bond_main.c 2021-08-04 14:05:53.887713713 +0700
+@@ -1753,6 +1753,8 @@ int bond_enslave(struct net_device *bond
+ goto err_unregister;
+ }
+
++ bond_lower_state_changed(new_slave);
++
+ res = bond_sysfs_slave_add(new_slave);
+ if (res) {
+ slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
+diff -urpN linux-5.4.137.old/drivers/net/dsa/b53/b53_common.c linux-5.4.137/drivers/net/dsa/b53/b53_common.c
+--- linux-5.4.137.old/drivers/net/dsa/b53/b53_common.c 2021-08-04 14:05:38.055697349 +0700
++++ linux-5.4.137/drivers/net/dsa/b53/b53_common.c 2021-08-04 14:05:53.887713713 +0700
+@@ -537,7 +537,7 @@ int b53_enable_port(struct dsa_switch *d
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+- cpu_port = ds->ports[port].cpu_dp->index;
++ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ b53_br_egress_floods(ds, port, true, true);
+ b53_port_set_learning(dev, port, false);
+@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(b53_fdb_dump);
+ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
+ {
+ struct b53_device *dev = ds->priv;
+- s8 cpu_port = ds->ports[port].cpu_dp->index;
++ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ u16 pvlan, reg;
+ unsigned int i;
+
+@@ -1722,7 +1722,7 @@ void b53_br_leave(struct dsa_switch *ds,
+ {
+ struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl = &dev->vlans[0];
+- s8 cpu_port = ds->ports[port].cpu_dp->index;
++ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+
+@@ -2396,10 +2396,13 @@ struct b53_device *b53_switch_alloc(stru
+ struct dsa_switch *ds;
+ struct b53_device *dev;
+
+- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
++ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
++ ds->dev = base;
++ ds->num_ports = DSA_MAX_PORTS;
++
+ dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/bcm_sf2.c linux-5.4.137/drivers/net/dsa/bcm_sf2.c
+--- linux-5.4.137.old/drivers/net/dsa/bcm_sf2.c 2021-08-04 14:05:38.055697349 +0700
++++ linux-5.4.137/drivers/net/dsa/bcm_sf2.c 2021-08-04 14:05:53.887713713 +0700
+@@ -670,7 +670,7 @@ static void bcm_sf2_sw_fixed_state(struc
+ * state machine and make it go in PHY_FORCING state instead.
+ */
+ if (!status->link)
+- netif_carrier_off(ds->ports[port].slave);
++ netif_carrier_off(dsa_to_port(ds, port)->slave);
+ status->duplex = DUPLEX_FULL;
+ } else {
+ status->link = true;
+@@ -736,7 +736,7 @@ static int bcm_sf2_sw_resume(struct dsa_
+ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+ {
+- struct net_device *p = ds->ports[port].cpu_dp->master;
++ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_wolinfo pwol = { };
+
+@@ -760,9 +760,9 @@ static void bcm_sf2_sw_get_wol(struct ds
+ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+ {
+- struct net_device *p = ds->ports[port].cpu_dp->master;
++ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+- s8 cpu_port = ds->ports[port].cpu_dp->index;
++ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ struct ethtool_wolinfo pwol = { };
+
+ if (p->ethtool_ops->get_wol)
+diff -urpN linux-5.4.137.old/drivers/net/dsa/bcm_sf2_cfp.c linux-5.4.137/drivers/net/dsa/bcm_sf2_cfp.c
+--- linux-5.4.137.old/drivers/net/dsa/bcm_sf2_cfp.c 2021-08-04 14:05:38.055697349 +0700
++++ linux-5.4.137/drivers/net/dsa/bcm_sf2_cfp.c 2021-08-04 14:05:53.887713713 +0700
+@@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struc
+ struct ethtool_rx_flow_spec *fs)
+ {
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+- s8 cpu_port = ds->ports[port].cpu_dp->index;
++ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ __u64 ring_cookie = fs->ring_cookie;
+ unsigned int queue_num, port_num;
+ int ret;
+@@ -1046,7 +1046,7 @@ static int bcm_sf2_cfp_rule_get_all(stru
+ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+ {
+- struct net_device *p = ds->ports[port].cpu_dp->master;
++ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+@@ -1089,7 +1089,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch
+ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc)
+ {
+- struct net_device *p = ds->ports[port].cpu_dp->master;
++ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+diff -urpN linux-5.4.137.old/drivers/net/dsa/dsa_loop.c linux-5.4.137/drivers/net/dsa/dsa_loop.c
+--- linux-5.4.137.old/drivers/net/dsa/dsa_loop.c 2021-08-04 14:05:38.055697349 +0700
++++ linux-5.4.137/drivers/net/dsa/dsa_loop.c 2021-08-04 14:05:53.887713713 +0700
+@@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdi
+ dev_info(&mdiodev->dev, "%s: 0x%0x\n",
+ pdata->name, pdata->enabled_ports);
+
+- ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
++ ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
++ ds->dev = &mdiodev->dev;
++ ds->num_ports = DSA_MAX_PORTS;
++
+ ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/lan9303-core.c linux-5.4.137/drivers/net/dsa/lan9303-core.c
+--- linux-5.4.137.old/drivers/net/dsa/lan9303-core.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/lan9303-core.c 2021-08-04 14:05:53.887713713 +0700
+@@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struc
+ {
+ int base;
+
+- chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
++ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
+ if (!chip->ds)
+ return -ENOMEM;
+
++ chip->ds->dev = chip->dev;
++ chip->ds->num_ports = LAN9303_NUM_PORTS;
+ chip->ds->priv = chip;
+ chip->ds->ops = &lan9303_switch_ops;
+ base = chip->phy_addr_base;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/lantiq_gswip.c linux-5.4.137/drivers/net/dsa/lantiq_gswip.c
+--- linux-5.4.137.old/drivers/net/dsa/lantiq_gswip.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/lantiq_gswip.c 2021-08-04 14:05:53.887713713 +0700
+@@ -2006,10 +2006,12 @@ static int gswip_probe(struct platform_d
+ if (!priv->hw_info)
+ return -EINVAL;
+
+- priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
++ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
++ priv->ds->dev = dev;
++ priv->ds->num_ports = priv->hw_info->max_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = &gswip_switch_ops;
+ priv->dev = dev;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/microchip/ksz_common.c linux-5.4.137/drivers/net/dsa/microchip/ksz_common.c
+--- linux-5.4.137.old/drivers/net/dsa/microchip/ksz_common.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/microchip/ksz_common.c 2021-08-04 14:05:53.891713717 +0700
+@@ -396,10 +396,13 @@ struct ksz_device *ksz_switch_alloc(stru
+ struct dsa_switch *ds;
+ struct ksz_device *swdev;
+
+- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
++ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
++ ds->dev = base;
++ ds->num_ports = DSA_MAX_PORTS;
++
+ swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return NULL;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mt7530.c linux-5.4.137/drivers/net/dsa/mt7530.c
+--- linux-5.4.137.old/drivers/net/dsa/mt7530.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mt7530.c 2021-08-04 14:05:53.891713717 +0700
+@@ -785,7 +785,7 @@ mt7530_port_set_vlan_unaware(struct dsa_
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ if (dsa_is_user_port(ds, i) &&
+- dsa_port_is_vlan_filtering(&ds->ports[i])) {
++ dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
+ all_user_ports_removed = false;
+ break;
+ }
+@@ -843,7 +843,7 @@ mt7530_port_bridge_leave(struct dsa_swit
+ * other port is still a VLAN-aware port.
+ */
+ if (dsa_is_user_port(ds, i) && i != port &&
+- !dsa_port_is_vlan_filtering(&ds->ports[i])) {
++ !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ if (priv->ports[i].enable)
+@@ -1219,7 +1219,7 @@ mt7530_setup(struct dsa_switch *ds)
+ * controller also is the container for two GMACs nodes representing
+ * as two netdev instances.
+ */
+- dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
++ dn = dsa_to_port(ds, port)->master->dev.of_node->parent;
+ ds->configure_vlan_while_not_filtering = true;
+
+ if (priv->id == ID_MT7530) {
+@@ -1306,7 +1306,7 @@ mt7530_setup(struct dsa_switch *ds)
+
+ if (!dsa_is_unused_port(ds, 5)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+- interface = of_get_phy_mode(ds->ports[5].dn);
++ interface = of_get_phy_mode(dsa_to_port(ds, 5)->dn);
+ } else {
+ /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ for_each_child_of_node(dn, mac_np) {
+@@ -1649,10 +1649,13 @@ mt7530_probe(struct mdio_device *mdiodev
+ if (!priv)
+ return -ENOMEM;
+
+- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
++ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
++ priv->ds->dev = &mdiodev->dev;
++ priv->ds->num_ports = DSA_MAX_PORTS;
++
+ /* Use medatek,mcm property to distinguish hardware type that would
+ * casues a little bit differences on power-on sequence.
+ */
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6060.c linux-5.4.137/drivers/net/dsa/mv88e6060.c
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6060.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6060.c 2021-08-04 14:05:53.891713717 +0700
+@@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_d
+
+ dev_info(dev, "switch %s detected\n", name);
+
+- ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
++ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
++ ds->dev = dev;
++ ds->num_ports = MV88E6060_PORTS;
+ ds->priv = priv;
+ ds->dev = dev;
+ ds->ops = &mv88e6060_switch_ops;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/chip.c linux-5.4.137/drivers/net/dsa/mv88e6xxx/chip.c
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/chip.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6xxx/chip.c 2021-08-04 14:05:57.643717592 +0700
+@@ -1075,7 +1075,7 @@ static u16 mv88e6xxx_port_vlan(struct mv
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ return mv88e6xxx_port_mask(chip);
+
+- br = ds->ports[port].bridge_dev;
++ br = dsa_to_port(ds, port)->bridge_dev;
+ pvlan = 0;
+
+ /* Frames from user ports can egress any local DSA links and CPU ports,
+@@ -1135,6 +1135,7 @@ static int mv88e6xxx_pri_setup(struct mv
+
+ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
+ {
++ struct dsa_switch *ds = chip->ds;
+ int target, port;
+ int err;
+
+@@ -1143,10 +1144,9 @@ static int mv88e6xxx_devmap_setup(struct
+
+ /* Initialize the routing port to the 32 possible target devices */
+ for (target = 0; target < 32; target++) {
+- port = 0x1f;
+- if (target < DSA_MAX_SWITCHES)
+- if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
+- port = chip->ds->rtable[target];
++ port = dsa_routing_port(ds, target);
++ if (port == ds->num_ports)
++ port = 0x1f;
+
+ err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
+ if (err)
+@@ -1250,14 +1250,30 @@ static int mv88e6xxx_mac_setup(struct mv
+
+ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
+ {
++ struct dsa_switch_tree *dst = chip->ds->dst;
++ struct dsa_switch *ds;
++ struct dsa_port *dp;
+ u16 pvlan = 0;
+
+ if (!mv88e6xxx_has_pvt(chip))
+ return -EOPNOTSUPP;
+
+ /* Skip the local source device, which uses in-chip port VLAN */
+- if (dev != chip->ds->index)
++ if (dev != chip->ds->index) {
+ pvlan = mv88e6xxx_port_vlan(chip, dev, port);
++ ds = dsa_switch_find(dst->index, dev);
++ dp = ds ? dsa_to_port(ds, port) : NULL;
++ if (dp && dp->lag_dev) {
++ /* As the PVT is used to limit flooding of
++ * FORWARD frames, which use the LAG ID as the
++ * source port, we must translate dev/port to
++ * the special "LAG device" in the PVT, using
++ * the LAG ID as the port number.
++ */
++ dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
++ port = dsa_lag_id(dst, dp->lag_dev);
++ }
++ }
+
+ return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
+ }
+@@ -1402,7 +1418,7 @@ static int mv88e6xxx_port_check_hw_vlan(
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
+
+- if (!ds->ports[i].slave)
++ if (!dsa_to_port(ds, i)->slave)
+ continue;
+
+ if (vlan.member[i] ==
+@@ -1410,7 +1426,7 @@ static int mv88e6xxx_port_check_hw_vlan(
+ continue;
+
+ if (dsa_to_port(ds, i)->bridge_dev ==
+- ds->ports[port].bridge_dev)
++ dsa_to_port(ds, port)->bridge_dev)
+ break; /* same bridge, check next VLAN */
+
+ if (!dsa_to_port(ds, i)->bridge_dev)
+@@ -2048,7 +2064,7 @@ static int mv88e6xxx_bridge_map(struct m
+
+ /* Remap the Port VLAN of each local bridge group member */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
+- if (chip->ds->ports[port].bridge_dev == br) {
++ if (dsa_to_port(chip->ds, port)->bridge_dev == br) {
+ err = mv88e6xxx_port_vlan_map(chip, port);
+ if (err)
+ return err;
+@@ -2065,7 +2081,7 @@ static int mv88e6xxx_bridge_map(struct m
+ break;
+
+ for (port = 0; port < ds->num_ports; ++port) {
+- if (ds->ports[port].bridge_dev == br) {
++ if (dsa_to_port(ds, port)->bridge_dev == br) {
+ err = mv88e6xxx_pvt_map(chip, dev, port);
+ if (err)
+ return err;
+@@ -5022,6 +5038,271 @@ static int mv88e6xxx_port_egress_floods(
+ return err;
+ }
+
++static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ struct dsa_port *dp;
++ int id, members = 0;
++
++ id = dsa_lag_id(ds->dst, lag);
++ if (id < 0 || id >= ds->num_lag_ids)
++ return false;
++
++ dsa_lag_foreach_port(dp, ds->dst, lag)
++ /* Includes the port joining the LAG */
++ members++;
++
++ if (members > 8)
++ return false;
++
++ /* We could potentially relax this to include active
++ * backup in the future.
++ */
++ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
++ return false;
++
++ /* Ideally we would also validate that the hash type matches
++ * the hardware. Alas, this is always set to unknown on team
++ * interfaces.
++ */
++ return true;
++}
++
++static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ struct dsa_port *dp;
++ u16 map = 0;
++ int id;
++
++ id = dsa_lag_id(ds->dst, lag);
++
++ /* Build the map of all ports to distribute flows destined for
++ * this LAG. This can be either a local user port, or a DSA
++ * port if the LAG port is on a remote chip.
++ */
++ dsa_lag_foreach_port(dp, ds->dst, lag)
++ map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
++
++ return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
++}
++
++static const u8 mv88e6xxx_lag_mask_table[8][8] = {
++ /* Row number corresponds to the number of active members in a
++ * LAG. Each column states which of the eight hash buckets are
++ * mapped to the column:th port in the LAG.
++ *
++ * Example: In a LAG with three active ports, the second port
++ * ([2][1]) would be selected for traffic mapped to buckets
++ * 3,4,5 (0x38).
++ */
++ { 0xff, 0, 0, 0, 0, 0, 0, 0 },
++ { 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
++ { 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
++ { 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
++ { 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
++ { 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
++ { 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
++ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
++};
++
++static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
++ int num_tx, int nth)
++{
++ u8 active = 0;
++ int i;
++
++ num_tx = num_tx <= 8 ? num_tx : 8;
++ if (nth < num_tx)
++ active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
++
++ for (i = 0; i < 8; i++) {
++ if (BIT(i) & active)
++ mask[i] |= BIT(port);
++ }
++}
++
++static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ unsigned int id, num_tx;
++ struct net_device *lag;
++ struct dsa_port *dp;
++ int i, err, nth;
++ u16 mask[8];
++ u16 ivec;
++
++ /* Assume no port is a member of any LAG. */
++ ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
++
++ /* Disable all masks for ports that _are_ members of a LAG. */
++ list_for_each_entry(dp, &ds->dst->ports, list) {
++ if (!dp->lag_dev || dp->ds != ds)
++ continue;
++
++ ivec &= ~BIT(dp->index);
++ }
++
++ for (i = 0; i < 8; i++)
++ mask[i] = ivec;
++
++ /* Enable the correct subset of masks for all LAG ports that
++ * are in the Tx set.
++ */
++ dsa_lags_foreach_id(id, ds->dst) {
++ lag = dsa_lag_dev(ds->dst, id);
++ if (!lag)
++ continue;
++
++ num_tx = 0;
++ dsa_lag_foreach_port(dp, ds->dst, lag) {
++ if (dp->lag_tx_enabled)
++ num_tx++;
++ }
++
++ if (!num_tx)
++ continue;
++
++ nth = 0;
++ dsa_lag_foreach_port(dp, ds->dst, lag) {
++ if (!dp->lag_tx_enabled)
++ continue;
++
++ if (dp->ds == ds)
++ mv88e6xxx_lag_set_port_mask(mask, dp->index,
++ num_tx, nth);
++
++ nth++;
++ }
++ }
++
++ for (i = 0; i < 8; i++) {
++ err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
++ struct net_device *lag)
++{
++ int err;
++
++ err = mv88e6xxx_lag_sync_masks(ds);
++
++ if (!err)
++ err = mv88e6xxx_lag_sync_map(ds, lag);
++
++ return err;
++}
++
++static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err;
++
++ mv88e6xxx_reg_lock(chip);
++ err = mv88e6xxx_lag_sync_masks(ds);
++ mv88e6xxx_reg_unlock(chip);
++ return err;
++}
++
++static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err, id;
++
++ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
++ return -EOPNOTSUPP;
++
++ id = dsa_lag_id(ds->dst, lag);
++
++ mv88e6xxx_reg_lock(chip);
++
++ err = mv88e6xxx_port_set_trunk(chip, port, true, id);
++ if (err)
++ goto err_unlock;
++
++ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
++ if (err)
++ goto err_clear_trunk;
++
++ mv88e6xxx_reg_unlock(chip);
++ return 0;
++
++err_clear_trunk:
++ mv88e6xxx_port_set_trunk(chip, port, false, 0);
++err_unlock:
++ mv88e6xxx_reg_unlock(chip);
++ return err;
++}
++
++static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
++ struct net_device *lag)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err_sync, err_trunk;
++
++ mv88e6xxx_reg_lock(chip);
++ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
++ err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
++ mv88e6xxx_reg_unlock(chip);
++ return err_sync ? : err_trunk;
++}
++
++static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
++ int port)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err;
++
++ mv88e6xxx_reg_lock(chip);
++ err = mv88e6xxx_lag_sync_masks(ds);
++ mv88e6xxx_reg_unlock(chip);
++ return err;
++}
++
++static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
++ int port, struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err;
++
++ if (!mv88e6xxx_lag_can_offload(ds, lag, info))
++ return -EOPNOTSUPP;
++
++ mv88e6xxx_reg_lock(chip);
++
++ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
++ if (err)
++ goto unlock;
++
++ err = mv88e6xxx_pvt_map(chip, sw_index, port);
++
++unlock:
++ mv88e6xxx_reg_unlock(chip);
++ return err;
++}
++
++static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
++ int port, struct net_device *lag)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err_sync, err_pvt;
++
++ mv88e6xxx_reg_lock(chip);
++ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
++ err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
++ mv88e6xxx_reg_unlock(chip);
++ return err_sync ? : err_pvt;
++}
++
+ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+ .get_tag_protocol = mv88e6xxx_get_tag_protocol,
+ .setup = mv88e6xxx_setup,
+@@ -5069,6 +5350,12 @@ static const struct dsa_switch_ops mv88e
+ .port_txtstamp = mv88e6xxx_port_txtstamp,
+ .port_rxtstamp = mv88e6xxx_port_rxtstamp,
+ .get_ts_info = mv88e6xxx_get_ts_info,
++ .port_lag_change = mv88e6xxx_port_lag_change,
++ .port_lag_join = mv88e6xxx_port_lag_join,
++ .port_lag_leave = mv88e6xxx_port_lag_leave,
++ .crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
++ .crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
++ .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
+ };
+
+ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
+@@ -5076,10 +5363,12 @@ static int mv88e6xxx_register_switch(str
+ struct device *dev = chip->dev;
+ struct dsa_switch *ds;
+
+- ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
++ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
++ ds->dev = dev;
++ ds->num_ports = mv88e6xxx_num_ports(chip);
+ ds->priv = chip;
+ ds->dev = dev;
+ ds->ops = &mv88e6xxx_switch_ops;
+@@ -5087,6 +5376,12 @@ static int mv88e6xxx_register_switch(str
+ ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
+ ds->assisted_learning_on_cpu_port = true;
+
++ /* Some chips support up to 32, but that requires enabling the
++ * 5-bit port mode, which we do not support. 640k^W16 ought to
++ * be enough for anyone.
++ */
++ ds->num_lag_ids = 16;
++
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/global2.c linux-5.4.137/drivers/net/dsa/mv88e6xxx/global2.c
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/global2.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6xxx/global2.c 2021-08-04 14:05:53.891713717 +0700
+@@ -126,8 +126,8 @@ int mv88e6xxx_g2_device_mapping_write(st
+
+ /* Offset 0x07: Trunk Mask Table register */
+
+-static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+- bool hash, u16 mask)
++int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
++ bool hash, u16 mask)
+ {
+ u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
+
+@@ -140,8 +140,8 @@ static int mv88e6xxx_g2_trunk_mask_write
+
+ /* Offset 0x08: Trunk Mapping Table register */
+
+-static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+- u16 map)
++int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
++ u16 map)
+ {
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ u16 val = (id << 11) | (map & port_mask);
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/global2.h linux-5.4.137/drivers/net/dsa/mv88e6xxx/global2.h
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/global2.h 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6xxx/global2.h 2021-08-04 14:05:53.891713717 +0700
+@@ -101,6 +101,7 @@
+ #define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
+ #define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
+ #define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
++#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
+
+ /* Offset 0x0C: Cross-chip Port VLAN Data Register */
+ #define MV88E6XXX_G2_PVT_DATA 0x0c
+@@ -336,6 +337,10 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv
+
+ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+
++int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
++ bool hash, u16 mask);
++int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
++ u16 map);
+ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
+
+ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/port.c linux-5.4.137/drivers/net/dsa/mv88e6xxx/port.c
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/port.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6xxx/port.c 2021-08-04 14:05:53.891713717 +0700
+@@ -994,6 +994,27 @@ int mv88e6xxx_port_set_message_port(stru
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+ }
+
++int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
++ bool trunk, u8 id)
++{
++ u16 val;
++ int err;
++
++ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
++ if (err)
++ return err;
++
++ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
++
++ if (trunk)
++ val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
++ (id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
++ else
++ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
++
++ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
++}
++
+ /* Offset 0x06: Port Based VLAN Map */
+
+ int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
+diff -urpN linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/port.h linux-5.4.137/drivers/net/dsa/mv88e6xxx/port.h
+--- linux-5.4.137.old/drivers/net/dsa/mv88e6xxx/port.h 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/mv88e6xxx/port.h 2021-08-04 14:05:53.891713717 +0700
+@@ -168,6 +168,9 @@
+ /* Offset 0x05: Port Control 1 */
+ #define MV88E6XXX_PORT_CTL1 0x05
+ #define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
++#define MV88E6XXX_PORT_CTL1_TRUNK_PORT 0x4000
++#define MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK 0x0f00
++#define MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT 8
+ #define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
+
+ /* Offset 0x06: Port Based VLAN Map */
+@@ -343,6 +346,8 @@ int mv88e6351_port_set_ether_type(struct
+ u16 etype);
+ int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port);
++int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
++ bool trunk, u8 id);
+ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ size_t size);
+ int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+diff -urpN linux-5.4.137.old/drivers/net/dsa/qca8k.c linux-5.4.137/drivers/net/dsa/qca8k.c
+--- linux-5.4.137.old/drivers/net/dsa/qca8k.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/qca8k.c 2021-08-04 14:05:53.891713717 +0700
+@@ -661,7 +661,7 @@ qca8k_setup(struct dsa_switch *ds)
+ return ret;
+
+ /* Initialize CPU port pad mode (xMII type, delays...) */
+- phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
++ phy_mode = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn);
+ if (phy_mode < 0) {
+ pr_err("Can't find phy-mode for master device\n");
+ return phy_mode;
+@@ -1077,10 +1077,13 @@ qca8k_sw_probe(struct mdio_device *mdiod
+ if (id != QCA8K_ID_QCA8337)
+ return -ENODEV;
+
+- priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
++ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
++ QCA8K_NUM_PORTS);
+ if (!priv->ds)
+ return -ENOMEM;
+
++ priv->ds->dev = &mdiodev->dev;
++ priv->ds->num_ports = DSA_MAX_PORTS;
+ priv->ds->priv = priv;
+ priv->ops = qca8k_switch_ops;
+ priv->ds->ops = &priv->ops;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/realtek-smi-core.c linux-5.4.137/drivers/net/dsa/realtek-smi-core.c
+--- linux-5.4.137.old/drivers/net/dsa/realtek-smi-core.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/realtek-smi-core.c 2021-08-04 14:05:53.891713717 +0700
+@@ -444,9 +444,12 @@ static int realtek_smi_probe(struct plat
+ return ret;
+ }
+
+- smi->ds = dsa_switch_alloc(dev, smi->num_ports);
++ smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
+ if (!smi->ds)
+ return -ENOMEM;
++
++ smi->ds->dev = dev;
++ smi->ds->num_ports = smi->num_ports;
+ smi->ds->priv = smi;
+
+ smi->ds->ops = var->ds_ops;
+diff -urpN linux-5.4.137.old/drivers/net/dsa/sja1105/sja1105_main.c linux-5.4.137/drivers/net/dsa/sja1105/sja1105_main.c
+--- linux-5.4.137.old/drivers/net/dsa/sja1105/sja1105_main.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/sja1105/sja1105_main.c 2021-08-04 14:05:53.891713717 +0700
+@@ -1096,7 +1096,7 @@ int sja1105pqrs_fdb_add(struct dsa_switc
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
++ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+@@ -1159,7 +1159,7 @@ int sja1105pqrs_fdb_del(struct dsa_switc
+ l2_lookup.vlanid = vid;
+ l2_lookup.iotag = SJA1105_S_TAG;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
++ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+@@ -1205,7 +1205,7 @@ static int sja1105_fdb_add(struct dsa_sw
+ * for what gets printed in 'bridge fdb show'. In the case of zero,
+ * no VID gets printed at all.
+ */
+- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
++ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ vid = 0;
+
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
+@@ -1216,7 +1216,7 @@ static int sja1105_fdb_del(struct dsa_sw
+ {
+ struct sja1105_private *priv = ds->priv;
+
+- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
++ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ vid = 0;
+
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+@@ -1255,7 +1255,7 @@ static int sja1105_fdb_dump(struct dsa_s
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ /* We need to hide the dsa_8021q VLANs from the user. */
+- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
++ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ l2_lookup.vlanid = 0;
+ cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ }
+@@ -1748,7 +1748,7 @@ static int sja1105_port_enable(struct ds
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+- slave = ds->ports[port].slave;
++ slave = dsa_to_port(ds, port)->slave;
+
+ slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+@@ -1780,7 +1780,7 @@ static int sja1105_mgmt_xmit(struct dsa_
+ }
+
+ /* Transfer skb to the host port. */
+- dsa_enqueue_skb(skb, ds->ports[port].slave);
++ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+@@ -2198,10 +2198,12 @@ static int sja1105_probe(struct spi_devi
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+- ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
++ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
++ ds->dev = dev;
++ ds->num_ports = SJA1105_NUM_PORTS;
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+@@ -2215,8 +2217,8 @@ static int sja1105_probe(struct spi_devi
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ struct sja1105_port *sp = &priv->ports[i];
+
+- ds->ports[i].priv = sp;
+- sp->dp = &ds->ports[i];
++ dsa_to_port(ds, i)->priv = sp;
++ sp->dp = dsa_to_port(ds, i);
+ sp->data = tagger_data;
+ }
+ mutex_init(&priv->mgmt_lock);
+diff -urpN linux-5.4.137.old/drivers/net/dsa/vitesse-vsc73xx-core.c linux-5.4.137/drivers/net/dsa/vitesse-vsc73xx-core.c
+--- linux-5.4.137.old/drivers/net/dsa/vitesse-vsc73xx-core.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/drivers/net/dsa/vitesse-vsc73xx-core.c 2021-08-04 14:05:53.891713717 +0700
+@@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc)
+ * We allocate 8 ports and avoid access to the nonexistant
+ * ports.
+ */
+- vsc->ds = dsa_switch_alloc(dev, 8);
++ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
+ if (!vsc->ds)
+ return -ENOMEM;
++
++ vsc->ds->dev = dev;
++ vsc->ds->num_ports = 8;
+ vsc->ds->priv = vsc;
+
+ vsc->ds->ops = &vsc73xx_ds_ops;
+diff -urpN linux-5.4.137.old/include/net/dsa.h linux-5.4.137/include/net/dsa.h
+--- linux-5.4.137.old/include/net/dsa.h 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/include/net/dsa.h 2021-08-04 14:05:57.643717592 +0700
+@@ -124,17 +124,46 @@ struct dsa_switch_tree {
+ */
+ struct dsa_platform_data *pd;
+
+- /*
+- * The switch port to which the CPU is attached.
+- */
+- struct dsa_port *cpu_dp;
++ /* List of switch ports */
++ struct list_head ports;
+
+- /*
+- * Data for the individual switch chips.
++ /* List of DSA links composing the routing table */
++ struct list_head rtable;
++
++ /* Maps offloaded LAG netdevs to a zero-based linear ID for
++ * drivers that need it.
+ */
+- struct dsa_switch *ds[DSA_MAX_SWITCHES];
++ struct net_device **lags;
++ unsigned int lags_len;
+ };
+
++#define dsa_lags_foreach_id(_id, _dst) \
++ for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
++ if ((_dst)->lags[(_id)])
++
++#define dsa_lag_foreach_port(_dp, _dst, _lag) \
++ list_for_each_entry((_dp), &(_dst)->ports, list) \
++ if ((_dp)->lag_dev == (_lag))
++
++static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
++ unsigned int id)
++{
++ return dst->lags[id];
++}
++
++static inline int dsa_lag_id(struct dsa_switch_tree *dst,
++ struct net_device *lag)
++{
++ unsigned int id;
++
++ dsa_lags_foreach_id(id, dst) {
++ if (dsa_lag_dev(dst, id) == lag)
++ return id;
++ }
++
++ return -ENODEV;
++}
++
+ /* TC matchall action types, only mirroring for now */
+ enum dsa_port_mall_action_type {
+ DSA_PORT_MALL_MIRROR,
+@@ -195,10 +224,14 @@ struct dsa_port {
+ struct devlink_port devlink_port;
+ struct phylink *pl;
+ struct phylink_config pl_config;
++ struct net_device *lag_dev;
++ bool lag_tx_enabled;
+
+ struct work_struct xmit_work;
+ struct sk_buff_head xmit_queue;
+
++ struct list_head list;
++
+ /*
+ * Give the switch driver somewhere to hang its per-port private data
+ * structures (accessible from the tagger).
+@@ -214,9 +247,24 @@ struct dsa_port {
+ * Original copy of the master netdev net_device_ops
+ */
+ const struct net_device_ops *orig_ndo_ops;
++
++ bool setup;
++};
++
++/* TODO: ideally DSA ports would have a single dp->link_dp member,
++ * and no dst->rtable nor this struct dsa_link would be needed,
++ * but this would require some more complex tree walking,
++ * so keep it stupid at the moment and list them all.
++ */
++struct dsa_link {
++ struct dsa_port *dp;
++ struct dsa_port *link_dp;
++ struct list_head list;
+ };
+
+ struct dsa_switch {
++ bool setup;
++
+ struct device *dev;
+
+ /*
+@@ -245,13 +293,6 @@ struct dsa_switch {
+ const struct dsa_switch_ops *ops;
+
+ /*
+- * An array of which element [a] indicates which port on this
+- * switch should be used to send packets to that are destined
+- * for switch a. Can be NULL if there is only one switch chip.
+- */
+- s8 rtable[DSA_MAX_SWITCHES];
+-
+- /*
+ * Slave mii_bus and devices for the individual ports.
+ */
+ u32 phys_mii_mask;
+@@ -289,14 +330,27 @@ struct dsa_switch {
+ */
+ bool vlan_filtering;
+
+- /* Dynamically allocated ports, keep last */
+ size_t num_ports;
+- struct dsa_port ports[];
++
++ /* Drivers that benefit from having an ID associated with each
++ * offloaded LAG should set this to the maximum number of
++ * supported IDs. DSA will then maintain a mapping of _at
++ * least_ these many IDs, accessible to drivers via
++ * dsa_lag_id().
++ */
++ unsigned int num_lag_ids;
+ };
+
+-static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
++static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
+ {
+- return &ds->ports[p];
++ struct dsa_switch_tree *dst = ds->dst;
++ struct dsa_port *dp;
++
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dp->ds == ds && dp->index == p)
++ return dp;
++
++ return NULL;
+ }
+
+ static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
+@@ -331,6 +385,19 @@ static inline u32 dsa_user_ports(struct
+ return mask;
+ }
+
++/* Return the local port used to reach an arbitrary switch device */
++static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
++{
++ struct dsa_switch_tree *dst = ds->dst;
++ struct dsa_link *dl;
++
++ list_for_each_entry(dl, &dst->rtable, list)
++ if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
++ return dl->dp->index;
++
++ return ds->num_ports;
++}
++
+ /* Return the local port used to reach an arbitrary switch port */
+ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+ int port)
+@@ -338,7 +405,7 @@ static inline unsigned int dsa_towards_p
+ if (device == ds->index)
+ return port;
+ else
+- return ds->rtable[device];
++ return dsa_routing_port(ds, device);
+ }
+
+ /* Return the local port used to reach the dedicated CPU port */
+@@ -539,6 +606,13 @@ struct dsa_switch_ops {
+ int port, struct net_device *br);
+ void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *br);
++ int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
++ int port);
++ int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
++ int port, struct net_device *lag,
++ struct netdev_lag_upper_info *info);
++ int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
++ int port, struct net_device *lag);
+
+ /*
+ * PTP functionality
+@@ -557,6 +631,16 @@ struct dsa_switch_ops {
+ */
+ netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
++
++ /*
++ * LAG integration
++ */
++ int (*port_lag_change)(struct dsa_switch *ds, int port);
++ int (*port_lag_join)(struct dsa_switch *ds, int port,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info);
++ int (*port_lag_leave)(struct dsa_switch *ds, int port,
++ struct net_device *lag);
+ };
+
+ struct dsa_switch_driver {
+@@ -584,7 +668,6 @@ static inline bool dsa_can_decode(const
+ return false;
+ }
+
+-struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
+ void dsa_unregister_switch(struct dsa_switch *ds);
+ int dsa_register_switch(struct dsa_switch *ds);
+ #ifdef CONFIG_PM_SLEEP
+@@ -628,6 +711,7 @@ int register_dsa_notifier(struct notifie
+ int unregister_dsa_notifier(struct notifier_block *nb);
+ int call_dsa_notifiers(unsigned long val, struct net_device *dev,
+ struct dsa_notifier_info *info);
++bool dsa_slave_dev_check(const struct net_device *dev);
+ #else
+ static inline int register_dsa_notifier(struct notifier_block *nb)
+ {
+@@ -644,6 +728,11 @@ static inline int call_dsa_notifiers(uns
+ {
+ return NOTIFY_DONE;
+ }
++
++static inline bool dsa_slave_dev_check(const struct net_device *dev)
++{
++ return false;
++}
+ #endif
+
+ /* Broadcom tag specific helpers to insert and extract queue/port number */
+diff -urpN linux-5.4.137.old/net/dsa/Kconfig linux-5.4.137/net/dsa/Kconfig
+--- linux-5.4.137.old/net/dsa/Kconfig 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/Kconfig 2021-08-04 14:05:53.891713717 +0700
+@@ -56,14 +56,19 @@ config NET_DSA_TAG_GSWIP
+ Say Y or M if you want to enable support for tagging frames for the
+ Lantiq / Intel GSWIP switches.
+
++config NET_DSA_TAG_DSA_COMMON
++ tristate
++
+ config NET_DSA_TAG_DSA
+ tristate "Tag driver for Marvell switches using DSA headers"
++ select NET_DSA_TAG_DSA_COMMON
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Marvell switches which use DSA headers.
+
+ config NET_DSA_TAG_EDSA
+ tristate "Tag driver for Marvell switches using EtherType DSA headers"
++ select NET_DSA_TAG_DSA_COMMON
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Marvell switches which use EtherType DSA headers.
+diff -urpN linux-5.4.137.old/net/dsa/Makefile linux-5.4.137/net/dsa/Makefile
+--- linux-5.4.137.old/net/dsa/Makefile 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/Makefile 2021-08-04 14:05:53.891713717 +0700
+@@ -6,8 +6,7 @@ dsa_core-y += dsa.o dsa2.o master.o port
+ # tagging formats
+ obj-$(CONFIG_NET_DSA_TAG_8021Q) += tag_8021q.o
+ obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
+-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
++obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
+ obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
+ obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
+ obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+diff -urpN linux-5.4.137.old/net/dsa/dsa.c linux-5.4.137/net/dsa/dsa.c
+--- linux-5.4.137.old/net/dsa/dsa.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/dsa.c 2021-08-04 14:05:53.891713717 +0700
+@@ -224,11 +224,21 @@ static int dsa_switch_rcv(struct sk_buff
+ }
+
+ skb = nskb;
+- p = netdev_priv(skb->dev);
+ skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
++ if (unlikely(!dsa_slave_dev_check(skb->dev))) {
++ /* Packet is to be injected directly on an upper
++ * device, e.g. a team/bond, so skip all DSA-port
++ * specific actions.
++ */
++ netif_rx(skb);
++ return 0;
++ }
++
++ p = netdev_priv(skb->dev);
++
+ s = this_cpu_ptr(p->stats64);
+ u64_stats_update_begin(&s->syncp);
+ s->rx_packets++;
+@@ -246,7 +256,9 @@ static int dsa_switch_rcv(struct sk_buff
+ #ifdef CONFIG_PM_SLEEP
+ static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+ {
+- return dsa_is_user_port(ds, p) && ds->ports[p].slave;
++ const struct dsa_port *dp = dsa_to_port(ds, p);
++
++ return dp->type == DSA_PORT_TYPE_USER && dp->slave;
+ }
+
+ int dsa_switch_suspend(struct dsa_switch *ds)
+@@ -258,7 +270,7 @@ int dsa_switch_suspend(struct dsa_switch
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+- ret = dsa_slave_suspend(ds->ports[i].slave);
++ ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
+ if (ret)
+ return ret;
+ }
+@@ -285,7 +297,7 @@ int dsa_switch_resume(struct dsa_switch
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+- ret = dsa_slave_resume(ds->ports[i].slave);
++ ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
+ if (ret)
+ return ret;
+ }
+diff -urpN linux-5.4.137.old/net/dsa/dsa2.c linux-5.4.137/net/dsa/dsa2.c
+--- linux-5.4.137.old/net/dsa/dsa2.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/dsa2.c 2021-08-04 14:05:57.643717592 +0700
+@@ -25,6 +25,65 @@ static DEFINE_MUTEX(dsa2_mutex);
+ static const struct devlink_ops dsa_devlink_ops = {
+ };
+
++/**
++ * dsa_lag_map() - Map LAG netdev to a linear LAG ID
++ * @dst: Tree in which to record the mapping.
++ * @lag: Netdev that is to be mapped to an ID.
++ *
++ * dsa_lag_id/dsa_lag_dev can then be used to translate between the
++ * two spaces. The size of the mapping space is determined by the
++ * driver by setting ds->num_lag_ids. It is perfectly legal to leave
++ * it unset if it is not needed, in which case these functions become
++ * no-ops.
++ */
++void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
++{
++ unsigned int id;
++
++ if (dsa_lag_id(dst, lag) >= 0)
++ /* Already mapped */
++ return;
++
++ for (id = 0; id < dst->lags_len; id++) {
++ if (!dsa_lag_dev(dst, id)) {
++ dst->lags[id] = lag;
++ return;
++ }
++ }
++
++ /* No IDs left, which is OK. Some drivers do not need it. The
++ * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
++ * returns an error for this device when joining the LAG. The
++ * driver can then return -EOPNOTSUPP back to DSA, which will
++ * fall back to a software LAG.
++ */
++}
++
++/**
++ * dsa_lag_unmap() - Remove a LAG ID mapping
++ * @dst: Tree in which the mapping is recorded.
++ * @lag: Netdev that was mapped.
++ *
++ * As there may be multiple users of the mapping, it is only removed
++ * if there are no other references to it.
++ */
++void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
++{
++ struct dsa_port *dp;
++ unsigned int id;
++
++ dsa_lag_foreach_port(dp, dst, lag)
++ /* There are remaining users of this mapping */
++ return;
++
++ dsa_lags_foreach_id(id, dst) {
++ if (dsa_lag_dev(dst, id) == lag) {
++ dst->lags[id] = NULL;
++ break;
++ }
++ }
++}
++
+ static struct dsa_switch_tree *dsa_tree_find(int index)
+ {
+ struct dsa_switch_tree *dst;
+@@ -46,6 +105,10 @@ static struct dsa_switch_tree *dsa_tree_
+
+ dst->index = index;
+
++ INIT_LIST_HEAD(&dst->rtable);
++
++ INIT_LIST_HEAD(&dst->ports);
++
+ INIT_LIST_HEAD(&dst->list);
+ list_add_tail(&dst->list, &dsa_tree_list);
+
+@@ -112,24 +175,38 @@ static bool dsa_port_is_user(struct dsa_
+ static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
+ struct device_node *dn)
+ {
+- struct dsa_switch *ds;
+ struct dsa_port *dp;
+- int device, port;
+
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dp->dn == dn)
++ return dp;
+
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
++ return NULL;
++}
+
+- if (dp->dn == dn)
+- return dp;
+- }
+- }
++struct dsa_link *dsa_link_touch(struct dsa_port *dp, struct dsa_port *link_dp)
++{
++ struct dsa_switch *ds = dp->ds;
++ struct dsa_switch_tree *dst;
++ struct dsa_link *dl;
+
+- return NULL;
++ dst = ds->dst;
++
++ list_for_each_entry(dl, &dst->rtable, list)
++ if (dl->dp == dp && dl->link_dp == link_dp)
++ return dl;
++
++ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
++ if (!dl)
++ return NULL;
++
++ dl->dp = dp;
++ dl->link_dp = link_dp;
++
++ INIT_LIST_HEAD(&dl->list);
++ list_add_tail(&dl->list, &dst->rtable);
++
++ return dl;
+ }
+
+ static bool dsa_port_setup_routing_table(struct dsa_port *dp)
+@@ -139,6 +216,7 @@ static bool dsa_port_setup_routing_table
+ struct device_node *dn = dp->dn;
+ struct of_phandle_iterator it;
+ struct dsa_port *link_dp;
++ struct dsa_link *dl;
+ int err;
+
+ of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
+@@ -148,24 +226,22 @@ static bool dsa_port_setup_routing_table
+ return false;
+ }
+
+- ds->rtable[link_dp->ds->index] = dp->index;
++ dl = dsa_link_touch(dp, link_dp);
++ if (!dl) {
++ of_node_put(it.node);
++ return false;
++ }
+ }
+
+ return true;
+ }
+
+-static bool dsa_switch_setup_routing_table(struct dsa_switch *ds)
++static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
+ {
+ bool complete = true;
+ struct dsa_port *dp;
+- int i;
+-
+- for (i = 0; i < DSA_MAX_SWITCHES; i++)
+- ds->rtable[i] = DSA_RTABLE_NONE;
+-
+- for (i = 0; i < ds->num_ports; i++) {
+- dp = &ds->ports[i];
+
++ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_is_dsa(dp)) {
+ complete = dsa_port_setup_routing_table(dp);
+ if (!complete)
+@@ -176,81 +252,42 @@ static bool dsa_switch_setup_routing_tab
+ return complete;
+ }
+
+-static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
+-{
+- struct dsa_switch *ds;
+- bool complete = true;
+- int device;
+-
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
+-
+- complete = dsa_switch_setup_routing_table(ds);
+- if (!complete)
+- break;
+- }
+-
+- return complete;
+-}
+-
+ static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
+ {
+- struct dsa_switch *ds;
+ struct dsa_port *dp;
+- int device, port;
+
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
+-
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
+-
+- if (dsa_port_is_cpu(dp))
+- return dp;
+- }
+- }
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dsa_port_is_cpu(dp))
++ return dp;
+
+ return NULL;
+ }
+
+ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
+ {
+- struct dsa_switch *ds;
+- struct dsa_port *dp;
+- int device, port;
++ struct dsa_port *cpu_dp, *dp;
+
+- /* DSA currently only supports a single CPU port */
+- dst->cpu_dp = dsa_tree_find_first_cpu(dst);
+- if (!dst->cpu_dp) {
+- pr_warn("Tree has no master device\n");
++ cpu_dp = dsa_tree_find_first_cpu(dst);
++ if (!cpu_dp) {
++ pr_err("DSA: tree %d has no CPU port\n", dst->index);
+ return -EINVAL;
+ }
+
+ /* Assign the default CPU port to all ports of the fabric */
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
+-
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
+-
+- if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
+- dp->cpu_dp = dst->cpu_dp;
+- }
+- }
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
++ dp->cpu_dp = cpu_dp;
+
+ return 0;
+ }
+
+ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
+ {
+- /* DSA currently only supports a single CPU port */
+- dst->cpu_dp = NULL;
++ struct dsa_port *dp;
++
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
++ dp->cpu_dp = NULL;
+ }
+
+ static int dsa_port_setup(struct dsa_port *dp)
+@@ -266,6 +303,9 @@ static int dsa_port_setup(struct dsa_por
+ bool dsa_port_enabled = false;
+ int err = 0;
+
++ if (dp->setup)
++ return 0;
++
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ dsa_port_disable(dp);
+@@ -335,14 +375,21 @@ static int dsa_port_setup(struct dsa_por
+ dsa_port_link_unregister_of(dp);
+ if (err && devlink_port_registered)
+ devlink_port_unregister(dlp);
++ if (err)
++ return err;
+
+- return err;
++ dp->setup = true;
++
++ return 0;
+ }
+
+ static void dsa_port_teardown(struct dsa_port *dp)
+ {
+ struct devlink_port *dlp = &dp->devlink_port;
+
++ if (!dp->setup)
++ return;
++
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ break;
+@@ -365,11 +412,16 @@ static void dsa_port_teardown(struct dsa
+ }
+ break;
+ }
++
++ dp->setup = false;
+ }
+
+ static int dsa_switch_setup(struct dsa_switch *ds)
+ {
+- int err = 0;
++ int err;
++
++ if (ds->setup)
++ return 0;
+
+ /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
+ * driver and before ops->setup() has run, since the switch drivers and
+@@ -411,6 +463,8 @@ static int dsa_switch_setup(struct dsa_s
+ goto teardown;
+ }
+
++ ds->setup = true;
++
+ return 0;
+
+ teardown:
+@@ -429,6 +483,9 @@ free_devlink:
+
+ static void dsa_switch_teardown(struct dsa_switch *ds)
+ {
++ if (!ds->setup)
++ return;
++
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_unregister(ds->slave_mii_bus);
+
+@@ -443,89 +500,98 @@ static void dsa_switch_teardown(struct d
+ ds->devlink = NULL;
+ }
+
++ ds->setup = false;
+ }
+
+ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+ {
+- struct dsa_switch *ds;
+ struct dsa_port *dp;
+- int device, port, i;
+- int err = 0;
+-
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
++ int err;
+
+- err = dsa_switch_setup(ds);
++ list_for_each_entry(dp, &dst->ports, list) {
++ err = dsa_switch_setup(dp->ds);
+ if (err)
+- goto switch_teardown;
+-
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
+-
+- err = dsa_port_setup(dp);
+- if (err)
+- continue;
+- }
++ goto teardown;
+ }
+
+- return 0;
+-
+-switch_teardown:
+- for (i = 0; i < device; i++) {
+- ds = dst->ds[i];
+- if (!ds)
+- continue;
+-
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
++ list_for_each_entry(dp, &dst->ports, list) {
++ err = dsa_port_setup(dp);
++ if (err)
++ goto teardown;
++ }
+
+- dsa_port_teardown(dp);
+- }
++ return 0;
+
+- dsa_switch_teardown(ds);
+- }
++teardown:
++ list_for_each_entry(dp, &dst->ports, list)
++ dsa_port_teardown(dp);
++
++ list_for_each_entry(dp, &dst->ports, list)
++ dsa_switch_teardown(dp->ds);
+
+ return err;
+ }
+
+ static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+ {
+- struct dsa_switch *ds;
+ struct dsa_port *dp;
+- int device, port;
+
+- for (device = 0; device < DSA_MAX_SWITCHES; device++) {
+- ds = dst->ds[device];
+- if (!ds)
+- continue;
++ list_for_each_entry(dp, &dst->ports, list)
++ dsa_port_teardown(dp);
+
+- for (port = 0; port < ds->num_ports; port++) {
+- dp = &ds->ports[port];
++ list_for_each_entry(dp, &dst->ports, list)
++ dsa_switch_teardown(dp->ds);
++}
+
+- dsa_port_teardown(dp);
+- }
++static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
++{
++ struct dsa_port *dp;
++ int err;
+
+- dsa_switch_teardown(ds);
++ list_for_each_entry(dp, &dst->ports, list) {
++ if (dsa_port_is_cpu(dp)) {
++ err = dsa_master_setup(dp->master, dp);
++ if (err)
++ return err;
++ }
+ }
++
++ return 0;
+ }
+
+-static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
++static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
+ {
+- struct dsa_port *cpu_dp = dst->cpu_dp;
+- struct net_device *master = cpu_dp->master;
++ struct dsa_port *dp;
+
+- /* DSA currently supports a single pair of CPU port and master device */
+- return dsa_master_setup(master, cpu_dp);
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dsa_port_is_cpu(dp))
++ dsa_master_teardown(dp->master);
+ }
+
+-static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
++static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
+ {
+- struct dsa_port *cpu_dp = dst->cpu_dp;
+- struct net_device *master = cpu_dp->master;
++ unsigned int len = 0;
++ struct dsa_port *dp;
++
++ list_for_each_entry(dp, &dst->ports, list) {
++ if (dp->ds->num_lag_ids > len)
++ len = dp->ds->num_lag_ids;
++ }
+
+- return dsa_master_teardown(master);
++ if (!len)
++ return 0;
++
++ dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
++ if (!dst->lags)
++ return -ENOMEM;
++
++ dst->lags_len = len;
++ return 0;
++}
++
++static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
++{
++ kfree(dst->lags);
+ }
+
+ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+@@ -555,12 +621,18 @@ static int dsa_tree_setup(struct dsa_swi
+ if (err)
+ goto teardown_switches;
+
++ err = dsa_tree_setup_lags(dst);
++ if (err)
++ goto teardown_master;
++
+ dst->setup = true;
+
+ pr_info("DSA: tree %d setup\n", dst->index);
+
+ return 0;
+
++teardown_master:
++ dsa_tree_teardown_master(dst);
+ teardown_switches:
+ dsa_tree_teardown_switches(dst);
+ teardown_default_cpu:
+@@ -571,48 +643,49 @@ teardown_default_cpu:
+
+ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+ {
++ struct dsa_link *dl, *next;
++
+ if (!dst->setup)
+ return;
+
++ dsa_tree_teardown_lags(dst);
++
+ dsa_tree_teardown_master(dst);
+
+ dsa_tree_teardown_switches(dst);
+
+ dsa_tree_teardown_default_cpu(dst);
+
++ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
++ list_del(&dl->list);
++ kfree(dl);
++ }
++
+ pr_info("DSA: tree %d torn down\n", dst->index);
+
+ dst->setup = false;
+ }
+
+-static void dsa_tree_remove_switch(struct dsa_switch_tree *dst,
+- unsigned int index)
++static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
+ {
+- dsa_tree_teardown(dst);
++ struct dsa_switch_tree *dst = ds->dst;
++ struct dsa_port *dp;
+
+- dst->ds[index] = NULL;
+- dsa_tree_put(dst);
+-}
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dp->ds == ds && dp->index == index)
++ return dp;
+
+-static int dsa_tree_add_switch(struct dsa_switch_tree *dst,
+- struct dsa_switch *ds)
+-{
+- unsigned int index = ds->index;
+- int err;
++ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
++ if (!dp)
++ return NULL;
+
+- if (dst->ds[index])
+- return -EBUSY;
++ dp->ds = ds;
++ dp->index = index;
+
+- dsa_tree_get(dst);
+- dst->ds[index] = ds;
++ INIT_LIST_HEAD(&dp->list);
++ list_add_tail(&dp->list, &dst->ports);
+
+- err = dsa_tree_setup(dst);
+- if (err) {
+- dst->ds[index] = NULL;
+- dsa_tree_put(dst);
+- }
+-
+- return err;
++ return dp;
+ }
+
+ static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
+@@ -707,7 +780,7 @@ static int dsa_switch_parse_ports_of(str
+ goto out_put_node;
+ }
+
+- dp = &ds->ports[reg];
++ dp = dsa_to_port(ds, reg);
+
+ err = dsa_port_parse_of(dp, port);
+ if (err)
+@@ -731,8 +804,6 @@ static int dsa_switch_parse_member_of(st
+ return sz;
+
+ ds->index = m[1];
+- if (ds->index >= DSA_MAX_SWITCHES)
+- return -EINVAL;
+
+ ds->dst = dsa_tree_touch(m[0]);
+ if (!ds->dst)
+@@ -741,6 +812,20 @@ static int dsa_switch_parse_member_of(st
+ return 0;
+ }
+
++static int dsa_switch_touch_ports(struct dsa_switch *ds)
++{
++ struct dsa_port *dp;
++ int port;
++
++ for (port = 0; port < ds->num_ports; port++) {
++ dp = dsa_port_touch(ds, port);
++ if (!dp)
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
+ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
+ {
+ int err;
+@@ -749,6 +834,10 @@ static int dsa_switch_parse_of(struct ds
+ if (err)
+ return err;
+
++ err = dsa_switch_touch_ports(ds);
++ if (err)
++ return err;
++
+ return dsa_switch_parse_ports_of(ds, dn);
+ }
+
+@@ -786,7 +875,7 @@ static int dsa_switch_parse_ports(struct
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ name = cd->port_names[i];
+ dev = cd->netdev[i];
+- dp = &ds->ports[i];
++ dp = dsa_to_port(ds, i);
+
+ if (!name)
+ continue;
+@@ -806,6 +895,8 @@ static int dsa_switch_parse_ports(struct
+
+ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
+ {
++ int err;
++
+ ds->cd = cd;
+
+ /* We don't support interconnected switches nor multiple trees via
+@@ -816,22 +907,26 @@ static int dsa_switch_parse(struct dsa_s
+ if (!ds->dst)
+ return -ENOMEM;
+
+- return dsa_switch_parse_ports(ds, cd);
+-}
+-
+-static int dsa_switch_add(struct dsa_switch *ds)
+-{
+- struct dsa_switch_tree *dst = ds->dst;
++ err = dsa_switch_touch_ports(ds);
++ if (err)
++ return err;
+
+- return dsa_tree_add_switch(dst, ds);
++ return dsa_switch_parse_ports(ds, cd);
+ }
+
+ static int dsa_switch_probe(struct dsa_switch *ds)
+ {
++ struct dsa_switch_tree *dst;
+ struct dsa_chip_data *pdata = ds->dev->platform_data;
+ struct device_node *np = ds->dev->of_node;
+ int err;
+
++ if (!ds->dev)
++ return -ENODEV;
++
++ if (!ds->num_ports)
++ return -EINVAL;
++
+ if (np)
+ err = dsa_switch_parse_of(ds, np);
+ else if (pdata)
+@@ -842,29 +937,14 @@ static int dsa_switch_probe(struct dsa_s
+ if (err)
+ return err;
+
+- return dsa_switch_add(ds);
+-}
+-
+-struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
+-{
+- struct dsa_switch *ds;
+- int i;
+-
+- ds = devm_kzalloc(dev, struct_size(ds, ports, n), GFP_KERNEL);
+- if (!ds)
+- return NULL;
+-
+- ds->dev = dev;
+- ds->num_ports = n;
+-
+- for (i = 0; i < ds->num_ports; ++i) {
+- ds->ports[i].index = i;
+- ds->ports[i].ds = ds;
+- }
++ dst = ds->dst;
++ dsa_tree_get(dst);
++ err = dsa_tree_setup(dst);
++ if (err)
++ dsa_tree_put(dst);
+
+- return ds;
++ return err;
+ }
+-EXPORT_SYMBOL_GPL(dsa_switch_alloc);
+
+ int dsa_register_switch(struct dsa_switch *ds)
+ {
+@@ -882,9 +962,15 @@ EXPORT_SYMBOL_GPL(dsa_register_switch);
+ static void dsa_switch_remove(struct dsa_switch *ds)
+ {
+ struct dsa_switch_tree *dst = ds->dst;
+- unsigned int index = ds->index;
++ struct dsa_port *dp, *next;
++
++ list_for_each_entry_safe(dp, next, &dst->ports, list) {
++ list_del(&dp->list);
++ kfree(dp);
++ }
+
+- dsa_tree_remove_switch(dst, index);
++ dsa_tree_teardown(dst);
++ dsa_tree_put(dst);
+ }
+
+ void dsa_unregister_switch(struct dsa_switch *ds)
+diff -urpN linux-5.4.137.old/net/dsa/dsa_priv.h linux-5.4.137/net/dsa/dsa_priv.h
+--- linux-5.4.137.old/net/dsa/dsa_priv.h 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/dsa_priv.h 2021-08-04 14:05:53.891713717 +0700
+@@ -19,6 +19,9 @@ enum {
+ DSA_NOTIFIER_BRIDGE_LEAVE,
+ DSA_NOTIFIER_FDB_ADD,
+ DSA_NOTIFIER_FDB_DEL,
++ DSA_NOTIFIER_LAG_CHANGE,
++ DSA_NOTIFIER_LAG_JOIN,
++ DSA_NOTIFIER_LAG_LEAVE,
+ DSA_NOTIFIER_MDB_ADD,
+ DSA_NOTIFIER_MDB_DEL,
+ DSA_NOTIFIER_VLAN_ADD,
+@@ -54,6 +57,15 @@ struct dsa_notifier_mdb_info {
+ int port;
+ };
+
++/* DSA_NOTIFIER_LAG_* */
++struct dsa_notifier_lag_info {
++ struct net_device *lag;
++ int sw_index;
++ int port;
++
++ struct netdev_lag_upper_info *info;
++};
++
+ /* DSA_NOTIFIER_VLAN_* */
+ struct dsa_notifier_vlan_info {
+ const struct switchdev_obj_port_vlan *vlan;
+@@ -119,25 +131,14 @@ static inline struct net_device *dsa_mas
+ {
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+- struct dsa_switch *ds;
+- struct dsa_port *slave_port;
+-
+- if (device < 0 || device >= DSA_MAX_SWITCHES)
+- return NULL;
+-
+- ds = dst->ds[device];
+- if (!ds)
+- return NULL;
+-
+- if (port < 0 || port >= ds->num_ports)
+- return NULL;
++ struct dsa_port *dp;
+
+- slave_port = &ds->ports[port];
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dp->ds->index == device && dp->index == port &&
++ dp->type == DSA_PORT_TYPE_USER)
++ return dp->slave;
+
+- if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
+- return NULL;
+-
+- return slave_port->slave;
++ return NULL;
+ }
+
+ /* port.c */
+@@ -149,6 +150,11 @@ void dsa_port_disable_rt(struct dsa_port
+ void dsa_port_disable(struct dsa_port *dp);
+ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
+ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
++int dsa_port_lag_change(struct dsa_port *dp,
++ struct netdev_lag_lower_state_info *linfo);
++int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
++ struct netdev_lag_upper_info *uinfo);
++void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
+ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans);
+ bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
+@@ -197,6 +203,22 @@ void dsa_port_phylink_mac_link_up(struct
+ struct phy_device *phydev);
+ extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
+
++static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
++ struct net_device *dev)
++{
++ /* Switchdev offloading can be configured on: */
++
++ if (dev == dp->slave)
++ /* DSA ports directly connected to a bridge. */
++ return true;
++
++ if (dp->lag_dev == dev)
++ /* DSA ports connected to a bridge via a LAG */
++ return true;
++
++ return false;
++}
++
+ /* slave.c */
+ extern const struct dsa_device_ops notag_netdev_ops;
+ void dsa_slave_mii_bus_init(struct dsa_switch *ds);
+@@ -227,4 +249,9 @@ dsa_slave_to_master(const struct net_dev
+ /* switch.c */
+ int dsa_switch_register_notifier(struct dsa_switch *ds);
+ void dsa_switch_unregister_notifier(struct dsa_switch *ds);
++
++/* dsa2.c */
++void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
++void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
++
+ #endif
+diff -urpN linux-5.4.137.old/net/dsa/port.c linux-5.4.137/net/dsa/port.c
+--- linux-5.4.137.old/net/dsa/port.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/port.c 2021-08-04 14:05:53.891713717 +0700
+@@ -174,6 +174,85 @@ void dsa_port_bridge_leave(struct dsa_po
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+ }
+
++int dsa_port_lag_change(struct dsa_port *dp,
++ struct netdev_lag_lower_state_info *linfo)
++{
++ struct dsa_notifier_lag_info info = {
++ .sw_index = dp->ds->index,
++ .port = dp->index,
++ };
++ bool tx_enabled;
++
++ if (!dp->lag_dev)
++ return 0;
++
++ /* On statically configured aggregates (e.g. loadbalance
++ * without LACP) ports will always be tx_enabled, even if the
++ * link is down. Thus we require both link_up and tx_enabled
++ * in order to include it in the tx set.
++ */
++ tx_enabled = linfo->link_up && linfo->tx_enabled;
++
++ if (tx_enabled == dp->lag_tx_enabled)
++ return 0;
++
++ dp->lag_tx_enabled = tx_enabled;
++
++ return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
++}
++
++int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
++ struct netdev_lag_upper_info *uinfo)
++{
++ struct dsa_notifier_lag_info info = {
++ .sw_index = dp->ds->index,
++ .port = dp->index,
++ .lag = lag,
++ .info = uinfo,
++ };
++ int err;
++
++ dsa_lag_map(dp->ds->dst, lag);
++ dp->lag_dev = lag;
++
++ err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
++ if (err) {
++ dp->lag_dev = NULL;
++ dsa_lag_unmap(dp->ds->dst, lag);
++ }
++
++ return err;
++}
++
++void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
++{
++ struct dsa_notifier_lag_info info = {
++ .sw_index = dp->ds->index,
++ .port = dp->index,
++ .lag = lag,
++ };
++ int err;
++
++ if (!dp->lag_dev)
++ return;
++
++ /* Port might have been part of a LAG that in turn was
++ * attached to a bridge.
++ */
++ if (dp->bridge_dev)
++ dsa_port_bridge_leave(dp, dp->bridge_dev);
++
++ dp->lag_tx_enabled = false;
++ dp->lag_dev = NULL;
++
++ err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
++ if (err)
++ pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
++ err);
++
++ dsa_lag_unmap(dp->ds->dst, lag);
++}
++
+ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
+ bool vlan_filtering)
+ {
+diff -urpN linux-5.4.137.old/net/dsa/slave.c linux-5.4.137/net/dsa/slave.c
+--- linux-5.4.137.old/net/dsa/slave.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/slave.c 2021-08-04 14:05:53.891713717 +0700
+@@ -26,8 +26,6 @@
+
+ #include "dsa_priv.h"
+
+-static bool dsa_slave_dev_check(const struct net_device *dev);
+-
+ /* slave mii_bus handling ***************************************************/
+ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
+ {
+@@ -286,6 +284,9 @@ static int dsa_slave_port_attr_set(struc
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int ret;
+
++ if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
+@@ -323,7 +324,7 @@ static int dsa_slave_vlan_add(struct net
+ struct switchdev_obj_port_vlan vlan;
+ int err;
+
+- if (obj->orig_dev != dev)
++ if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+@@ -363,7 +364,7 @@ static int dsa_slave_port_obj_add(struct
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+- if (obj->orig_dev != dev)
++ if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
+ break;
+@@ -390,7 +391,7 @@ static int dsa_slave_vlan_del(struct net
+ {
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+- if (obj->orig_dev != dev)
++ if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+@@ -410,7 +411,7 @@ static int dsa_slave_port_obj_del(struct
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+- if (obj->orig_dev != dev)
++ if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+@@ -1527,10 +1528,11 @@ void dsa_slave_destroy(struct net_device
+ free_netdev(slave_dev);
+ }
+
+-static bool dsa_slave_dev_check(const struct net_device *dev)
++bool dsa_slave_dev_check(const struct net_device *dev)
+ {
+ return dev->netdev_ops == &dsa_slave_netdev_ops;
+ }
++EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
+
+ static int dsa_slave_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+@@ -1546,6 +1548,46 @@ static int dsa_slave_changeupper(struct
+ dsa_port_bridge_leave(dp, info->upper_dev);
+ err = NOTIFY_OK;
+ }
++ } else if (netif_is_lag_master(info->upper_dev)) {
++ if (info->linking) {
++ err = dsa_port_lag_join(dp, info->upper_dev,
++ info->upper_info);
++ if (err == -EOPNOTSUPP) {
++ NL_SET_ERR_MSG_MOD(info->info.extack,
++ "Offloading not supported");
++ err = 0;
++ }
++ err = notifier_from_errno(err);
++ } else {
++ dsa_port_lag_leave(dp, info->upper_dev);
++ err = NOTIFY_OK;
++ }
++ }
++
++ return err;
++}
++
++static int
++dsa_slave_lag_changeupper(struct net_device *dev,
++ struct netdev_notifier_changeupper_info *info)
++{
++ struct net_device *lower;
++ struct list_head *iter;
++ int err = NOTIFY_DONE;
++ struct dsa_port *dp;
++
++ netdev_for_each_lower_dev(dev, lower, iter) {
++ if (!dsa_slave_dev_check(lower))
++ continue;
++
++ dp = dsa_slave_to_port(lower);
++ if (!dp->lag_dev)
++ /* Software LAG */
++ continue;
++
++ err = dsa_slave_changeupper(lower, info);
++ if (notifier_to_errno(err))
++ break;
+ }
+
+ return err;
+@@ -1588,11 +1630,33 @@ static int dsa_slave_netdevice_event(str
+ {
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+- if (event == NETDEV_CHANGEUPPER) {
++ switch(event) {
++ case NETDEV_PRECHANGEUPPER:
+ if (!dsa_slave_dev_check(dev))
+ return dsa_slave_upper_vlan_check(dev, ptr);
+
+- return dsa_slave_changeupper(dev, ptr);
++ break;
++ case NETDEV_CHANGEUPPER:
++ if (dsa_slave_dev_check(dev))
++ return dsa_slave_changeupper(dev, ptr);
++
++ if (netif_is_lag_master(dev))
++ return dsa_slave_lag_changeupper(dev, ptr);
++
++ break;
++ case NETDEV_CHANGELOWERSTATE: {
++ struct netdev_notifier_changelowerstate_info *info = ptr;
++ struct dsa_port *dp;
++ int err;
++
++ if (!dsa_slave_dev_check(dev))
++ break;
++
++ dp = dsa_slave_to_port(dev);
++
++ err = dsa_port_lag_change(dp, info->lower_state_info);
++ return notifier_from_errno(err);
++ }
+ }
+
+ return NOTIFY_DONE;
+diff -urpN linux-5.4.137.old/net/dsa/switch.c linux-5.4.137/net/dsa/switch.c
+--- linux-5.4.137.old/net/dsa/switch.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/switch.c 2021-08-04 14:05:53.891713717 +0700
+@@ -20,7 +20,7 @@ static unsigned int dsa_switch_fastest_a
+ int i;
+
+ for (i = 0; i < ds->num_ports; ++i) {
+- struct dsa_port *dp = &ds->ports[i];
++ struct dsa_port *dp = dsa_to_port(ds, i);
+
+ if (dp->ageing_time && dp->ageing_time < ageing_time)
+ ageing_time = dp->ageing_time;
+@@ -98,7 +98,7 @@ static int dsa_switch_bridge_leave(struc
+ if (unset_vlan_filtering) {
+ struct switchdev_trans trans = {0};
+
+- err = dsa_port_vlan_filtering(&ds->ports[info->port],
++ err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
+ false, &trans);
+ if (err && err != EOPNOTSUPP)
+ return err;
+@@ -128,6 +128,47 @@ static int dsa_switch_fdb_del(struct dsa
+ return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+ }
+
++static int dsa_switch_lag_change(struct dsa_switch *ds,
++ struct dsa_notifier_lag_info *info)
++{
++ if (ds->index == info->sw_index && ds->ops->port_lag_change)
++ return ds->ops->port_lag_change(ds, info->port);
++
++ if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
++ return ds->ops->crosschip_lag_change(ds, info->sw_index,
++ info->port);
++
++ return 0;
++}
++
++static int dsa_switch_lag_join(struct dsa_switch *ds,
++ struct dsa_notifier_lag_info *info)
++{
++ if (ds->index == info->sw_index && ds->ops->port_lag_join)
++ return ds->ops->port_lag_join(ds, info->port, info->lag,
++ info->info);
++
++ if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
++ return ds->ops->crosschip_lag_join(ds, info->sw_index,
++ info->port, info->lag,
++ info->info);
++
++ return 0;
++}
++
++static int dsa_switch_lag_leave(struct dsa_switch *ds,
++ struct dsa_notifier_lag_info *info)
++{
++ if (ds->index == info->sw_index && ds->ops->port_lag_leave)
++ return ds->ops->port_lag_leave(ds, info->port, info->lag);
++
++ if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
++ return ds->ops->crosschip_lag_leave(ds, info->sw_index,
++ info->port, info->lag);
++
++ return 0;
++}
++
+ static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
+ struct dsa_notifier_mdb_info *info)
+ {
+@@ -316,6 +357,15 @@ static int dsa_switch_event(struct notif
+ case DSA_NOTIFIER_FDB_DEL:
+ err = dsa_switch_fdb_del(ds, info);
+ break;
++ case DSA_NOTIFIER_LAG_CHANGE:
++ err = dsa_switch_lag_change(ds, info);
++ break;
++ case DSA_NOTIFIER_LAG_JOIN:
++ err = dsa_switch_lag_join(ds, info);
++ break;
++ case DSA_NOTIFIER_LAG_LEAVE:
++ err = dsa_switch_lag_leave(ds, info);
++ break;
+ case DSA_NOTIFIER_MDB_ADD:
+ err = dsa_switch_mdb_add(ds, info);
+ break;
+diff -urpN linux-5.4.137.old/net/dsa/tag_8021q.c linux-5.4.137/net/dsa/tag_8021q.c
+--- linux-5.4.137.old/net/dsa/tag_8021q.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/tag_8021q.c 2021-08-04 14:05:57.643717592 +0700
+@@ -31,15 +31,14 @@
+ * Must be transmitted as zero and ignored on receive.
+ *
+ * SWITCH_ID - VID[8:6]:
+- * Index of switch within DSA tree. Must be between 0 and
+- * DSA_MAX_SWITCHES - 1.
++ * Index of switch within DSA tree. Must be between 0 and 7.
+ *
+ * RSV - VID[5:4]:
+ * To be used for further expansion of PORT or for other purposes.
+ * Must be transmitted as zero and ignored on receive.
+ *
+ * PORT - VID[3:0]:
+- * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
++ * Index of switch port. Must be between 0 and 15.
+ */
+
+ #define DSA_8021Q_DIR_SHIFT 10
+@@ -103,7 +102,7 @@ static int dsa_8021q_restore_pvid(struct
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+- slave = ds->ports[port].slave;
++ slave = dsa_to_port(ds, port)->slave;
+
+ err = br_vlan_get_pvid(slave, &pvid);
+ if (!pvid || err < 0)
+@@ -118,7 +117,7 @@ static int dsa_8021q_restore_pvid(struct
+ return err;
+ }
+
+- return dsa_port_vid_add(&ds->ports[port], pvid, vinfo.flags);
++ return dsa_port_vid_add(dsa_to_port(ds, port), pvid, vinfo.flags);
+ }
+
+ /* If @enabled is true, installs @vid with @flags into the switch port's HW
+@@ -130,7 +129,7 @@ static int dsa_8021q_restore_pvid(struct
+ static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags, bool enabled)
+ {
+- struct dsa_port *dp = &ds->ports[port];
++ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct bridge_vlan_info vinfo;
+ int err;
+
+diff -urpN linux-5.4.137.old/net/dsa/tag_dsa.c linux-5.4.137/net/dsa/tag_dsa.c
+--- linux-5.4.137.old/net/dsa/tag_dsa.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/tag_dsa.c 2021-08-04 14:05:53.891713717 +0700
+@@ -1,7 +1,48 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ /*
+- * net/dsa/tag_dsa.c - (Non-ethertype) DSA tagging
++ * Regular and Ethertype DSA tagging
+ * Copyright (c) 2008-2009 Marvell Semiconductor
++ *
++ * Regular DSA
++ * -----------
++
++ * For untagged (in 802.1Q terms) packets, the switch will splice in
++ * the tag between the SA and the ethertype of the original
++ * packet. Tagged frames will instead have their outermost .1Q tag
++ * converted to a DSA tag. It expects the same layout when receiving
++ * packets from the CPU.
++ *
++ * Example:
++ *
++ * .----.----.----.---------
++ * Pu: | DA | SA | ET | Payload ...
++ * '----'----'----'---------
++ * 6 6 2 N
++ * .----.----.--------.-----.----.---------
++ * Pt: | DA | SA | 0x8100 | TCI | ET | Payload ...
++ * '----'----'--------'-----'----'---------
++ * 6 6 2 2 2 N
++ * .----.----.-----.----.---------
++ * Pd: | DA | SA | DSA | ET | Payload ...
++ * '----'----'-----'----'---------
++ * 6 6 4 2 N
++ *
++ * No matter if a packet is received untagged (Pu) or tagged (Pt),
++ * they will both have the same layout (Pd) when they are sent to the
++ * CPU. This is done by ignoring 802.3, replacing the ethertype field
++ * with more metadata, among which is a bit to signal if the original
++ * packet was tagged or not.
++ *
++ * Ethertype DSA
++ * -------------
++ * Uses the exact same tag format as regular DSA, but also includes a
++ * proper ethertype field (which the mv88e6xxx driver sets to
++ * ETH_P_EDSA/0xdada) followed by two zero bytes:
++ *
++ * .----.----.--------.--------.-----.----.---------
++ * | DA | SA | 0xdada | 0x0000 | DSA | ET | Payload ...
++ * '----'----'--------'--------'-----'----'---------
++ * 6 6 2 2 4 2 N
+ */
+
+ #include <linux/etherdevice.h>
+@@ -12,16 +53,81 @@
+
+ #define DSA_HLEN 4
+
+-static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
++/**
++ * enum dsa_cmd - DSA Command
++ * @DSA_CMD_TO_CPU: Set on packets that were trapped or mirrored to
++ * the CPU port. This is needed to implement control protocols,
++ * e.g. STP and LLDP, that must not allow those control packets to
++ * be switched according to the normal rules.
++ * @DSA_CMD_FROM_CPU: Used by the CPU to send a packet to a specific
++ * port, ignoring all the barriers that the switch normally
++ * enforces (VLANs, STP port states etc.). No source address
++ * learning takes place. "sudo send packet"
++ * @DSA_CMD_TO_SNIFFER: Set on the copies of packets that matched some
++ * user configured ingress or egress monitor criteria. These are
++ * forwarded by the switch tree to the user configured ingress or
++ * egress monitor port, which can be set to the CPU port or a
++ * regular port. If the destination is a regular port, the tag
++ * will be removed before egressing the port. If the destination
++ * is the CPU port, the tag will not be removed.
++ * @DSA_CMD_FORWARD: This tag is used on all bulk traffic passing
++ * through the switch tree, including the flows that are directed
++ * towards the CPU. Its device/port tuple encodes the original
++ * source port on which the packet ingressed. It can also be used
++ * on transmit by the CPU to defer the forwarding decision to the
++ * hardware, based on the current config of PVT/VTU/ATU
++ * etc. Source address learning takes places if enabled on the
++ * receiving DSA/CPU port.
++ */
++enum dsa_cmd {
++ DSA_CMD_TO_CPU = 0,
++ DSA_CMD_FROM_CPU = 1,
++ DSA_CMD_TO_SNIFFER = 2,
++ DSA_CMD_FORWARD = 3
++};
++
++/**
++ * enum dsa_code - TO_CPU Code
++ *
++ * @DSA_CODE_MGMT_TRAP: DA was classified as a management
++ * address. Typical examples include STP BPDUs and LLDP.
++ * @DSA_CODE_FRAME2REG: Response to a "remote management" request.
++ * @DSA_CODE_IGMP_MLD_TRAP: IGMP/MLD signaling.
++ * @DSA_CODE_POLICY_TRAP: Frame matched some policy configuration on
++ * the device. Typical examples are matching on DA/SA/VID and DHCP
++ * snooping.
++ * @DSA_CODE_ARP_MIRROR: The name says it all really.
++ * @DSA_CODE_POLICY_MIRROR: Same as @DSA_CODE_POLICY_TRAP, but the
++ * particular policy was set to trigger a mirror instead of a
++ * trap.
++ * @DSA_CODE_RESERVED_6: Unused on all devices up to at least 6393X.
++ * @DSA_CODE_RESERVED_7: Unused on all devices up to at least 6393X.
++ *
++ * A 3-bit code is used to relay why a particular frame was sent to
++ * the CPU. We only use this to determine if the packet was mirrored
++ * or trapped, i.e. whether the packet has been forwarded by hardware
++ * or not.
++ *
++ * This is the superset of all possible codes. Any particular device
++ * may only implement a subset.
++ */
++enum dsa_code {
++ DSA_CODE_MGMT_TRAP = 0,
++ DSA_CODE_FRAME2REG = 1,
++ DSA_CODE_IGMP_MLD_TRAP = 2,
++ DSA_CODE_POLICY_TRAP = 3,
++ DSA_CODE_ARP_MIRROR = 4,
++ DSA_CODE_POLICY_MIRROR = 5,
++ DSA_CODE_RESERVED_6 = 6,
++ DSA_CODE_RESERVED_7 = 7
++};
++
++static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
++ u8 extra)
+ {
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *dsa_header;
+
+- /*
+- * Convert the outermost 802.1q tag to a DSA tag for tagged
+- * packets, or insert a DSA tag between the addresses and
+- * the ethertype field for untagged packets.
+- */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ if (skb_cow_head(skb, 0) < 0)
+ return NULL;
+@@ -29,8 +135,13 @@ static struct sk_buff *dsa_xmit(struct s
+ /*
+ * Construct tagged FROM_CPU DSA tag from 802.1q tag.
+ */
+- dsa_header = skb->data + 2 * ETH_ALEN;
+- dsa_header[0] = 0x60 | dp->ds->index;
++ if (extra) {
++ skb_push(skb, extra);
++ memmove(skb->data, skb->data + extra, 2 * ETH_ALEN);
++ }
++
++ dsa_header = skb->data + 2 * ETH_ALEN + extra;
++ dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | 0x20 | dp->ds->index;
+ dsa_header[1] = dp->index << 3;
+
+ /*
+@@ -43,15 +154,12 @@ static struct sk_buff *dsa_xmit(struct s
+ } else {
+ if (skb_cow_head(skb, DSA_HLEN) < 0)
+ return NULL;
+- skb_push(skb, DSA_HLEN);
++ skb_push(skb, DSA_HLEN + extra);
++ memmove(skb->data, skb->data + DSA_HLEN + extra, 2 * ETH_ALEN);
+
+- memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
+-
+- /*
+- * Construct untagged FROM_CPU DSA tag.
+- */
+- dsa_header = skb->data + 2 * ETH_ALEN;
+- dsa_header[0] = 0x40 | dp->ds->index;
++ /* Construct untagged FROM_CPU DSA tag. */
++ dsa_header = skb->data + 2 * ETH_ALEN + extra;
++ dsa_header[0] = (DSA_CMD_FROM_CPU << 6) | dp->ds->index;
+ dsa_header[1] = dp->index << 3;
+ dsa_header[2] = 0x00;
+ dsa_header[3] = 0x00;
+@@ -60,47 +168,91 @@ static struct sk_buff *dsa_xmit(struct s
+ return skb;
+ }
+
+-static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
+- struct packet_type *pt)
++static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
++ u8 extra)
+ {
++ int source_device, source_port;
++ bool trunk = false;
++ enum dsa_code code;
++ enum dsa_cmd cmd;
+ u8 *dsa_header;
+- int source_device;
+- int source_port;
+
+- if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
+- return NULL;
+-
+- /*
+- * The ethertype field is part of the DSA header.
+- */
++ /* The ethertype field is part of the DSA header. */
+ dsa_header = skb->data - 2;
+
+- /*
+- * Check that frame type is either TO_CPU or FORWARD.
+- */
+- if ((dsa_header[0] & 0xc0) != 0x00 && (dsa_header[0] & 0xc0) != 0xc0)
++ cmd = dsa_header[0] >> 6;
++ switch (cmd) {
++ case DSA_CMD_FORWARD:
++ skb->offload_fwd_mark = 1;
++
++ trunk = !!(dsa_header[1] & 7);
++ break;
++
++ case DSA_CMD_TO_CPU:
++ code = (dsa_header[1] & 0x6) | ((dsa_header[2] >> 4) & 1);
++
++ switch (code) {
++ case DSA_CODE_FRAME2REG:
++ /* Remote management is not implemented yet,
++ * drop.
++ */
++ return NULL;
++ case DSA_CODE_ARP_MIRROR:
++ case DSA_CODE_POLICY_MIRROR:
++ /* Mark mirrored packets to notify any upper
++ * device (like a bridge) that forwarding has
++ * already been done by hardware.
++ */
++ skb->offload_fwd_mark = 1;
++ break;
++ case DSA_CODE_MGMT_TRAP:
++ case DSA_CODE_IGMP_MLD_TRAP:
++ case DSA_CODE_POLICY_TRAP:
++ /* Traps have, by definition, not been
++ * forwarded by hardware, so don't mark them.
++ */
++ break;
++ default:
++ /* Reserved code, this could be anything. Drop
++ * seems like the safest option.
++ */
++ return NULL;
++ }
++
++ break;
++
++ default:
+ return NULL;
++ }
+
+- /*
+- * Determine source device and port.
+- */
+ source_device = dsa_header[0] & 0x1f;
+ source_port = (dsa_header[1] >> 3) & 0x1f;
+
+- skb->dev = dsa_master_find_slave(dev, source_device, source_port);
++ if (trunk) {
++ struct dsa_port *cpu_dp = dev->dsa_ptr;
++
++ /* The exact source port is not available in the tag,
++ * so we inject the frame directly on the upper
++ * team/bond.
++ */
++ skb->dev = dsa_lag_dev(cpu_dp->dst, source_port);
++ } else {
++ skb->dev = dsa_master_find_slave(dev, source_device,
++ source_port);
++ }
++
+ if (!skb->dev)
+ return NULL;
+
+- /*
+- * Convert the DSA header to an 802.1q header if the 'tagged'
+- * bit in the DSA header is set. If the 'tagged' bit is clear,
+- * delete the DSA header entirely.
++ /* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
++ * tag, and delete the ethertype (extra) if applicable. If the
++ * 'tagged' bit is cleared; delete the DSA tag, and ethertype
++ * if applicable.
+ */
+ if (dsa_header[0] & 0x20) {
+ u8 new_header[4];
+
+- /*
+- * Insert 802.1q ethertype and copy the VLAN-related
++ /* Insert 802.1Q ethertype and copy the VLAN-related
+ * fields, but clear the bit that will hold CFI (since
+ * DSA uses that bit location for another purpose).
+ */
+@@ -109,16 +261,13 @@ static struct sk_buff *dsa_rcv(struct sk
+ new_header[2] = dsa_header[2] & ~0x10;
+ new_header[3] = dsa_header[3];
+
+- /*
+- * Move CFI bit from its place in the DSA header to
+- * its 802.1q-designated place.
++ /* Move CFI bit from its place in the DSA header to
++ * its 802.1Q-designated place.
+ */
+ if (dsa_header[1] & 0x01)
+ new_header[2] |= 0x10;
+
+- /*
+- * Update packet checksum if skb is CHECKSUM_COMPLETE.
+- */
++ /* Update packet checksum if skb is CHECKSUM_COMPLETE. */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __wsum c = skb->csum;
+ c = csum_add(c, csum_partial(new_header + 2, 2, 0));
+@@ -126,19 +275,20 @@ static struct sk_buff *dsa_rcv(struct sk
+ skb->csum = c;
+ }
+
++
+ memcpy(dsa_header, new_header, DSA_HLEN);
++
++ if (extra)
++ memmove(skb->data - ETH_HLEN,
++ skb->data - ETH_HLEN - extra,
++ 2 * ETH_ALEN);
+ } else {
+- /*
+- * Remove DSA tag and update checksum.
+- */
+ skb_pull_rcsum(skb, DSA_HLEN);
+ memmove(skb->data - ETH_HLEN,
+- skb->data - ETH_HLEN - DSA_HLEN,
++ skb->data - ETH_HLEN - DSA_HLEN - extra,
+ 2 * ETH_ALEN);
+ }
+
+- skb->offload_fwd_mark = 1;
+-
+ return skb;
+ }
+
+@@ -150,16 +300,88 @@ static int dsa_tag_flow_dissect(const st
+ return 0;
+ }
+
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
++
++static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ return dsa_xmit_ll(skb, dev, 0);
++}
++
++static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
++ struct packet_type *pt)
++{
++ if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
++ return NULL;
++
++ return dsa_rcv_ll(skb, dev, 0);
++}
++
+ static const struct dsa_device_ops dsa_netdev_ops = {
+- .name = "dsa",
+- .proto = DSA_TAG_PROTO_DSA,
+- .xmit = dsa_xmit,
+- .rcv = dsa_rcv,
++ .name = "dsa",
++ .proto = DSA_TAG_PROTO_DSA,
++ .xmit = dsa_xmit,
++ .rcv = dsa_rcv,
+ .flow_dissect = dsa_tag_flow_dissect,
+ .overhead = DSA_HLEN,
+ };
+
+-MODULE_LICENSE("GPL");
++DSA_TAG_DRIVER(dsa_netdev_ops);
+ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
+
+-module_dsa_tag_driver(dsa_netdev_ops);
++#endif /* CONFIG_NET_DSA_TAG_DSA */
++
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
++
++#define EDSA_HLEN 8
++
++static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ u8 *edsa_header;
++
++ skb = dsa_xmit_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
++ if (!skb)
++ return NULL;
++
++ edsa_header = skb->data + 2 * ETH_ALEN;
++ edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
++ edsa_header[1] = ETH_P_EDSA & 0xff;
++ edsa_header[2] = 0x00;
++ edsa_header[3] = 0x00;
++ return skb;
++}
++
++static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
++ struct packet_type *pt)
++{
++ if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
++ return NULL;
++
++ skb_pull_rcsum(skb, EDSA_HLEN - DSA_HLEN);
++
++ return dsa_rcv_ll(skb, dev, EDSA_HLEN - DSA_HLEN);
++}
++
++static const struct dsa_device_ops edsa_netdev_ops = {
++ .name = "edsa",
++ .proto = DSA_TAG_PROTO_EDSA,
++ .xmit = edsa_xmit,
++ .rcv = edsa_rcv,
++ .overhead = EDSA_HLEN,
++};
++
++DSA_TAG_DRIVER(edsa_netdev_ops);
++MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
++#endif /* CONFIG_NET_DSA_TAG_EDSA */
++
++static struct dsa_tag_driver *dsa_tag_drivers[] = {
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_DSA)
++ &DSA_TAG_DRIVER_NAME(dsa_netdev_ops),
++#endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_EDSA)
++ &DSA_TAG_DRIVER_NAME(edsa_netdev_ops),
++#endif
++};
++
++module_dsa_tag_drivers(dsa_tag_drivers);
++
++MODULE_LICENSE("GPL");
+diff -urpN linux-5.4.137.old/net/dsa/tag_edsa.c linux-5.4.137/net/dsa/tag_edsa.c
+--- linux-5.4.137.old/net/dsa/tag_edsa.c 2021-08-04 14:05:38.059697353 +0700
++++ linux-5.4.137/net/dsa/tag_edsa.c 1970-01-01 07:00:00.000000000 +0700
+@@ -1,215 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * net/dsa/tag_edsa.c - Ethertype DSA tagging
+- * Copyright (c) 2008-2009 Marvell Semiconductor
+- */
+-
+-#include <linux/etherdevice.h>
+-#include <linux/list.h>
+-#include <linux/slab.h>
+-
+-#include "dsa_priv.h"
+-
+-#define DSA_HLEN 4
+-#define EDSA_HLEN 8
+-
+-#define FRAME_TYPE_TO_CPU 0x00
+-#define FRAME_TYPE_FORWARD 0x03
+-
+-#define TO_CPU_CODE_MGMT_TRAP 0x00
+-#define TO_CPU_CODE_FRAME2REG 0x01
+-#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
+-#define TO_CPU_CODE_POLICY_TRAP 0x03
+-#define TO_CPU_CODE_ARP_MIRROR 0x04
+-#define TO_CPU_CODE_POLICY_MIRROR 0x05
+-
+-static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+-{
+- struct dsa_port *dp = dsa_slave_to_port(dev);
+- u8 *edsa_header;
+-
+- /*
+- * Convert the outermost 802.1q tag to a DSA tag and prepend
+- * a DSA ethertype field is the packet is tagged, or insert
+- * a DSA ethertype plus DSA tag between the addresses and the
+- * current ethertype field if the packet is untagged.
+- */
+- if (skb->protocol == htons(ETH_P_8021Q)) {
+- if (skb_cow_head(skb, DSA_HLEN) < 0)
+- return NULL;
+- skb_push(skb, DSA_HLEN);
+-
+- memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
+-
+- /*
+- * Construct tagged FROM_CPU DSA tag from 802.1q tag.
+- */
+- edsa_header = skb->data + 2 * ETH_ALEN;
+- edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
+- edsa_header[1] = ETH_P_EDSA & 0xff;
+- edsa_header[2] = 0x00;
+- edsa_header[3] = 0x00;
+- edsa_header[4] = 0x60 | dp->ds->index;
+- edsa_header[5] = dp->index << 3;
+-
+- /*
+- * Move CFI field from byte 6 to byte 5.
+- */
+- if (edsa_header[6] & 0x10) {
+- edsa_header[5] |= 0x01;
+- edsa_header[6] &= ~0x10;
+- }
+- } else {
+- if (skb_cow_head(skb, EDSA_HLEN) < 0)
+- return NULL;
+- skb_push(skb, EDSA_HLEN);
+-
+- memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
+-
+- /*
+- * Construct untagged FROM_CPU DSA tag.
+- */
+- edsa_header = skb->data + 2 * ETH_ALEN;
+- edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
+- edsa_header[1] = ETH_P_EDSA & 0xff;
+- edsa_header[2] = 0x00;
+- edsa_header[3] = 0x00;
+- edsa_header[4] = 0x40 | dp->ds->index;
+- edsa_header[5] = dp->index << 3;
+- edsa_header[6] = 0x00;
+- edsa_header[7] = 0x00;
+- }
+-
+- return skb;
+-}
+-
+-static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
+- struct packet_type *pt)
+-{
+- u8 *edsa_header;
+- int frame_type;
+- int code;
+- int source_device;
+- int source_port;
+-
+- if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
+- return NULL;
+-
+- /*
+- * Skip the two null bytes after the ethertype.
+- */
+- edsa_header = skb->data + 2;
+-
+- /*
+- * Check that frame type is either TO_CPU or FORWARD.
+- */
+- frame_type = edsa_header[0] >> 6;
+-
+- switch (frame_type) {
+- case FRAME_TYPE_TO_CPU:
+- code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+-
+- /*
+- * Mark the frame to never egress on any port of the same switch
+- * unless it's a trapped IGMP/MLD packet, in which case the
+- * bridge might want to forward it.
+- */
+- if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+- skb->offload_fwd_mark = 1;
+-
+- break;
+-
+- case FRAME_TYPE_FORWARD:
+- skb->offload_fwd_mark = 1;
+- break;
+-
+- default:
+- return NULL;
+- }
+-
+- /*
+- * Determine source device and port.
+- */
+- source_device = edsa_header[0] & 0x1f;
+- source_port = (edsa_header[1] >> 3) & 0x1f;
+-
+- skb->dev = dsa_master_find_slave(dev, source_device, source_port);
+- if (!skb->dev)
+- return NULL;
+-
+- /*
+- * If the 'tagged' bit is set, convert the DSA tag to a 802.1q
+- * tag and delete the ethertype part. If the 'tagged' bit is
+- * clear, delete the ethertype and the DSA tag parts.
+- */
+- if (edsa_header[0] & 0x20) {
+- u8 new_header[4];
+-
+- /*
+- * Insert 802.1q ethertype and copy the VLAN-related
+- * fields, but clear the bit that will hold CFI (since
+- * DSA uses that bit location for another purpose).
+- */
+- new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
+- new_header[1] = ETH_P_8021Q & 0xff;
+- new_header[2] = edsa_header[2] & ~0x10;
+- new_header[3] = edsa_header[3];
+-
+- /*
+- * Move CFI bit from its place in the DSA header to
+- * its 802.1q-designated place.
+- */
+- if (edsa_header[1] & 0x01)
+- new_header[2] |= 0x10;
+-
+- skb_pull_rcsum(skb, DSA_HLEN);
+-
+- /*
+- * Update packet checksum if skb is CHECKSUM_COMPLETE.
+- */
+- if (skb->ip_summed == CHECKSUM_COMPLETE) {
+- __wsum c = skb->csum;
+- c = csum_add(c, csum_partial(new_header + 2, 2, 0));
+- c = csum_sub(c, csum_partial(edsa_header + 2, 2, 0));
+- skb->csum = c;
+- }
+-
+- memcpy(edsa_header, new_header, DSA_HLEN);
+-
+- memmove(skb->data - ETH_HLEN,
+- skb->data - ETH_HLEN - DSA_HLEN,
+- 2 * ETH_ALEN);
+- } else {
+- /*
+- * Remove DSA tag and update checksum.
+- */
+- skb_pull_rcsum(skb, EDSA_HLEN);
+- memmove(skb->data - ETH_HLEN,
+- skb->data - ETH_HLEN - EDSA_HLEN,
+- 2 * ETH_ALEN);
+- }
+-
+- return skb;
+-}
+-
+-static int edsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+- int *offset)
+-{
+- *offset = 8;
+- *proto = ((__be16 *)skb->data)[3];
+- return 0;
+-}
+-
+-static const struct dsa_device_ops edsa_netdev_ops = {
+- .name = "edsa",
+- .proto = DSA_TAG_PROTO_EDSA,
+- .xmit = edsa_xmit,
+- .rcv = edsa_rcv,
+- .flow_dissect = edsa_tag_flow_dissect,
+- .overhead = EDSA_HLEN,
+-};
+-
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
+-
+-module_dsa_tag_driver(edsa_netdev_ops);
diff --git a/target/linux/realtek/patches-5.4/101-brflood-api.patch b/target/linux/realtek/patches-5.4/101-brflood-api.patch
new file mode 100644
index 0000000000..0a7f1986ea
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/101-brflood-api.patch
@@ -0,0 +1,817 @@
+Index: linux-5.4.111/drivers/net/dsa/b53/b53_common.c
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/b53/b53_common.c
++++ linux-5.4.111/drivers/net/dsa/b53/b53_common.c
+@@ -527,6 +527,39 @@ static void b53_port_set_learning(struct
+ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
+ }
+
++static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
++ bool unicast)
++{
++ u16 uc;
++
++ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
++ if (unicast)
++ uc |= BIT(port);
++ else
++ uc &= ~BIT(port);
++ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
++}
++
++static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
++ bool multicast)
++{
++ u16 mc;
++
++ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
++ if (multicast)
++ mc |= BIT(port);
++ else
++ mc &= ~BIT(port);
++ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
++
++ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
++ if (multicast)
++ mc |= BIT(port);
++ else
++ mc &= ~BIT(port);
++ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
++}
++
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ {
+ struct b53_device *dev = ds->priv;
+@@ -539,7 +572,8 @@ int b53_enable_port(struct dsa_switch *d
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+- b53_br_egress_floods(ds, port, true, true);
++ b53_port_set_ucast_flood(dev, port, true);
++ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+
+ if (dev->ops->irq_enable)
+@@ -658,7 +692,8 @@ static void b53_enable_cpu_port(struct b
+
+ b53_brcm_hdr_setup(dev->ds, port);
+
+- b53_br_egress_floods(dev->ds, port, true, true);
++ b53_port_set_ucast_flood(dev, port, true);
++ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+ }
+
+@@ -1808,37 +1843,37 @@ void b53_br_fast_age(struct dsa_switch *
+ }
+ EXPORT_SYMBOL(b53_br_fast_age);
+
+-int b53_br_egress_floods(struct dsa_switch *ds, int port,
+- bool unicast, bool multicast)
++static int b53_br_flags_pre(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack)
+ {
+- struct b53_device *dev = ds->priv;
+- u16 uc, mc;
+-
+- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+- if (unicast)
+- uc |= BIT(port);
+- else
+- uc &= ~BIT(port);
+- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
++ if (flags & ~(BR_FLOOD | BR_MCAST_FLOOD))
++ return -EINVAL;
+
+- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+- if (multicast)
+- mc |= BIT(port);
+- else
+- mc &= ~BIT(port);
+- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
++ return 0;
++}
+
+- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+- if (multicast)
+- mc |= BIT(port);
+- else
+- mc &= ~BIT(port);
+- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
++static int b53_br_flags(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack)
++{
++ if (flags & BR_FLOOD)
++ b53_port_set_ucast_flood(ds->priv, port,
++ !!(flags.val & BR_FLOOD));
++ if (flags & BR_MCAST_FLOOD)
++ b53_port_set_mcast_flood(ds->priv, port,
++ !!(flags.val & BR_MCAST_FLOOD));
+
+ return 0;
++}
+
++static int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
++ struct netlink_ext_ack *extack)
++{
++ b53_port_set_mcast_flood(ds->priv, port, mrouter);
++
++ return 0;
+ }
+-EXPORT_SYMBOL(b53_br_egress_floods);
+
+ static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
+ {
+@@ -2037,9 +2072,11 @@ static const struct dsa_switch_ops b53_s
+ .set_mac_eee = b53_set_mac_eee,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
++ .port_pre_bridge_flags = b53_br_flags_pre,
++ .port_bridge_flags = b53_br_flags,
++ .port_set_mrouter = b53_set_mrouter,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
+- .port_egress_floods = b53_br_egress_floods,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_prepare = b53_vlan_prepare,
+ .port_vlan_add = b53_vlan_add,
+Index: linux-5.4.111/drivers/net/dsa/b53/b53_priv.h
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/b53/b53_priv.h
++++ linux-5.4.111/drivers/net/dsa/b53/b53_priv.h
+@@ -319,8 +319,6 @@ int b53_br_join(struct dsa_switch *ds, i
+ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
+ void b53_br_fast_age(struct dsa_switch *ds, int port);
+-int b53_br_egress_floods(struct dsa_switch *ds, int port,
+- bool unicast, bool multicast);
+ void b53_port_event(struct dsa_switch *ds, int port);
+ void b53_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+Index: linux-5.4.111/drivers/net/dsa/mv88e6xxx/chip.c
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/mv88e6xxx/chip.c
++++ linux-5.4.111/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2274,12 +2274,20 @@ static int mv88e6xxx_setup_egress_floods
+ {
+ struct dsa_switch *ds = chip->ds;
+ bool flood;
++ int err;
+
+ /* Upstream ports flood frames with unknown unicast or multicast DA */
+ flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
+- if (chip->info->ops->port_set_egress_floods)
+- return chip->info->ops->port_set_egress_floods(chip, port,
+- flood, flood);
++ if (chip->info->ops->port_set_ucast_flood) {
++ err = chip->info->ops->port_set_ucast_flood(chip, port, flood);
++ if (err)
++ return err;
++ }
++ if (chip->info->ops->port_set_mcast_flood) {
++ err = chip->info->ops->port_set_mcast_flood(chip, port, flood);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+@@ -3019,7 +3027,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+@@ -3058,7 +3067,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
++ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_link_state = mv88e6185_port_link_state,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+@@ -3090,7 +3100,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+@@ -3128,7 +3139,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6352_port_link_state,
+@@ -3162,7 +3174,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
++ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+@@ -3207,7 +3220,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3253,7 +3267,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3329,7 +3344,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3372,7 +3388,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3416,7 +3433,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3459,7 +3477,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3503,7 +3522,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6185_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
++ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_pause = mv88e6185_port_set_pause,
+@@ -3545,7 +3565,6 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+@@ -3594,7 +3613,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+@@ -3642,7 +3662,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+@@ -3692,7 +3713,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3743,7 +3765,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6250_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+@@ -3784,7 +3807,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+@@ -3833,7 +3857,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3877,7 +3902,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3921,7 +3947,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -3970,7 +3997,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_set_speed = mv88e6185_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -4055,7 +4083,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -4110,7 +4139,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -4163,7 +4193,8 @@ static const struct mv88e6xxx_ops mv88e6
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+- .port_set_egress_floods = mv88e6352_port_set_egress_floods,
++ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
++ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+@@ -5016,17 +5047,72 @@ static void mv88e6xxx_port_mirror_del(st
+ mutex_unlock(&chip->reg_lock);
+ }
+
+-static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
+- bool unicast, bool multicast)
++static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ const struct mv88e6xxx_ops *ops;
++
++ if (flags & ~(BR_FLOOD | BR_MCAST_FLOOD))
++ return -EINVAL;
++
++ ops = chip->info->ops;
++
++ if ((flags & BR_FLOOD) && !ops->port_set_ucast_flood)
++ return -EINVAL;
++
++ if ((flags & BR_MCAST_FLOOD) && !ops->port_set_mcast_flood)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack)
+ {
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err = -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+- if (chip->info->ops->port_set_egress_floods)
+- err = chip->info->ops->port_set_egress_floods(chip, port,
+- unicast,
+- multicast);
++
++ if (flags & BR_FLOOD) {
++ bool unicast = !!(flags.val & BR_FLOOD);
++
++ err = chip->info->ops->port_set_ucast_flood(chip, port,
++ unicast);
++ if (err)
++ goto out;
++ }
++
++ if (flags & BR_MCAST_FLOOD) {
++ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
++
++ err = chip->info->ops->port_set_mcast_flood(chip, port,
++ multicast);
++ if (err)
++ goto out;
++ }
++
++out:
++ mv88e6xxx_reg_unlock(chip);
++
++ return err;
++}
++
++static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port,
++ bool mrouter,
++ struct netlink_ext_ack *extack)
++{
++ struct mv88e6xxx_chip *chip = ds->priv;
++ int err;
++
++ if (!chip->info->ops->port_set_mcast_flood)
++ return -EOPNOTSUPP;
++
++ mv88e6xxx_reg_lock(chip);
++ err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+@@ -5322,7 +5408,9 @@ static const struct dsa_switch_ops mv88e
+ .set_ageing_time = mv88e6xxx_set_ageing_time,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
+- .port_egress_floods = mv88e6xxx_port_egress_floods,
++ .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
++ .port_bridge_flags = mv88e6xxx_port_bridge_flags,
++ .port_set_mrouter = mv88e6xxx_port_set_mrouter,
+ .port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
+Index: linux-5.4.111/drivers/net/dsa/mv88e6xxx/chip.h
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/mv88e6xxx/chip.h
++++ linux-5.4.111/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -429,8 +429,10 @@ struct mv88e6xxx_ops {
+
+ int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+- int (*port_set_egress_floods)(struct mv88e6xxx_chip *chip, int port,
+- bool unicast, bool multicast);
++ int (*port_set_ucast_flood)(struct mv88e6xxx_chip *chip, int port,
++ bool unicast);
++ int (*port_set_mcast_flood)(struct mv88e6xxx_chip *chip, int port,
++ bool multicast);
+ int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+ int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
+Index: linux-5.4.111/drivers/net/dsa/mv88e6xxx/port.c
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/mv88e6xxx/port.c
++++ linux-5.4.111/drivers/net/dsa/mv88e6xxx/port.c
+@@ -932,8 +932,8 @@ int mv88e6351_port_set_frame_mode(struct
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ }
+
+-static int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+- int port, bool unicast)
++int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
++ int port, bool unicast)
+ {
+ int err;
+ u16 reg;
+@@ -950,8 +950,8 @@ static int mv88e6185_port_set_forward_un
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ }
+
+-int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+- bool unicast, bool multicast)
++int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
++ bool unicast)
+ {
+ int err;
+ u16 reg;
+@@ -960,16 +960,28 @@ int mv88e6352_port_set_egress_floods(str
+ if (err)
+ return err;
+
+- reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK;
++ if (unicast)
++ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
++ else
++ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
++
++ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
++}
++
++int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
++ bool multicast)
++{
++ int err;
++ u16 reg;
++
++ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
++ if (err)
++ return err;
+
+- if (unicast && multicast)
+- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA;
+- else if (unicast)
+- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA;
+- else if (multicast)
+- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA;
++ if (multicast)
++ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
+ else
+- reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA;
++ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ }
+@@ -1156,8 +1168,8 @@ static const char * const mv88e6xxx_port
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
+ };
+
+-static int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+- int port, bool multicast)
++int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
++ int port, bool multicast)
+ {
+ int err;
+ u16 reg;
+@@ -1174,18 +1186,6 @@ static int mv88e6185_port_set_default_fo
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ }
+
+-int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+- bool unicast, bool multicast)
+-{
+- int err;
+-
+- err = mv88e6185_port_set_forward_unknown(chip, port, unicast);
+- if (err)
+- return err;
+-
+- return mv88e6185_port_set_default_forward(chip, port, multicast);
+-}
+-
+ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
+ {
+Index: linux-5.4.111/drivers/net/dsa/mv88e6xxx/port.h
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/mv88e6xxx/port.h
++++ linux-5.4.111/drivers/net/dsa/mv88e6xxx/port.h
+@@ -154,11 +154,8 @@
+ #define MV88E6185_PORT_CTL0_USE_IP 0x0020
+ #define MV88E6185_PORT_CTL0_USE_TAG 0x0010
+ #define MV88E6185_PORT_CTL0_FORWARD_UNKNOWN 0x0004
+-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MASK 0x000c
+-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_DA 0x0000
+-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_MC_DA 0x0004
+-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_NO_UNKNOWN_UC_DA 0x0008
+-#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_ALL_UNKNOWN_DA 0x000c
++#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC 0x0004
++#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC 0x0008
+ #define MV88E6XXX_PORT_CTL0_STATE_MASK 0x0003
+ #define MV88E6XXX_PORT_CTL0_STATE_DISABLED 0x0000
+ #define MV88E6XXX_PORT_CTL0_STATE_BLOCKING 0x0001
+@@ -335,10 +332,14 @@ int mv88e6085_port_set_frame_mode(struct
+ enum mv88e6xxx_frame_mode mode);
+ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+-int mv88e6185_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+- bool unicast, bool multicast);
+-int mv88e6352_port_set_egress_floods(struct mv88e6xxx_chip *chip, int port,
+- bool unicast, bool multicast);
++int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
++ int port, bool unicast);
++int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
++ int port, bool multicast);
++int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
++ bool unicast);
++int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
++ bool multicast);
+ int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action);
+Index: linux-5.4.111/include/net/dsa.h
+===================================================================
+--- linux-5.4.111.orig/include/net/dsa.h
++++ linux-5.4.111/include/net/dsa.h
+@@ -547,8 +547,14 @@ struct dsa_switch_ops {
+ void (*port_stp_state_set)(struct dsa_switch *ds, int port,
+ u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
+- int (*port_egress_floods)(struct dsa_switch *ds, int port,
+- bool unicast, bool multicast);
++ int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack);
++ int (*port_bridge_flags)(struct dsa_switch *ds, int port,
++ unsigned long flags,
++ struct netlink_ext_ack *extack);
++ int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter,
++ struct netlink_ext_ack *extack);
+
+ /*
+ * VLAN support
+Index: linux-5.4.111/net/dsa/dsa_priv.h
+===================================================================
+--- linux-5.4.111.orig/net/dsa/dsa_priv.h
++++ linux-5.4.111/net/dsa/dsa_priv.h
+@@ -171,11 +171,11 @@ int dsa_port_mdb_add(const struct dsa_po
+ int dsa_port_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+ int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+- struct switchdev_trans *trans);
++ struct switchdev_trans *trans, struct netlink_ext_ack *extack);
+ int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+- struct switchdev_trans *trans);
++ struct switchdev_trans *trans, struct netlink_ext_ack *extack);
+ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
+- struct switchdev_trans *trans);
++ struct switchdev_trans *trans, struct netlink_ext_ack *extack);
+ int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+Index: linux-5.4.111/net/dsa/port.c
+===================================================================
+--- linux-5.4.111.orig/net/dsa/port.c
++++ linux-5.4.111/net/dsa/port.c
+@@ -127,7 +127,7 @@ int dsa_port_bridge_join(struct dsa_port
+ int err;
+
+ /* Set the flooding mode before joining the port in the switch */
+- err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL);
++ err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL, NULL);
+ if (err)
+ return err;
+
+@@ -140,7 +140,7 @@ int dsa_port_bridge_join(struct dsa_port
+
+ /* The bridging is rolled back on error */
+ if (err) {
+- dsa_port_bridge_flags(dp, 0, NULL);
++ dsa_port_bridge_flags(dp, 0, NULL, NULL);
+ dp->bridge_dev = NULL;
+ }
+
+@@ -166,7 +166,7 @@ void dsa_port_bridge_leave(struct dsa_po
+ pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
+
+ /* Port is leaving the bridge, disable flooding */
+- dsa_port_bridge_flags(dp, 0, NULL);
++ dsa_port_bridge_flags(dp, 0, NULL, NULL);
+
+ /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+ * so allow it to be in BR_STATE_FORWARDING to be kept functional
+@@ -350,44 +350,44 @@ int dsa_port_ageing_time(struct dsa_port
+ }
+
+ int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+- struct switchdev_trans *trans)
++ struct switchdev_trans *trans, struct netlink_ext_ack *extack)
+ {
+ struct dsa_switch *ds = dp->ds;
+
+- if (!ds->ops->port_egress_floods ||
+- (flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
++ if (!ds->ops->port_pre_bridge_flags)
+ return -EINVAL;
+
+- return 0;
++ return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
+ }
+
+ int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+- struct switchdev_trans *trans)
++ struct switchdev_trans *trans, struct netlink_ext_ack *extack)
+ {
+ struct dsa_switch *ds = dp->ds;
+- int port = dp->index;
+- int err = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+- if (ds->ops->port_egress_floods)
+- err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
+- flags & BR_MCAST_FLOOD);
++ if (!ds->ops->port_bridge_flags)
++ return -EINVAL;
++
++ return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
+
+- return err;
+ }
+
+ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
+- struct switchdev_trans *trans)
++ struct switchdev_trans *trans,
++ struct netlink_ext_ack *extack)
+ {
+ struct dsa_switch *ds = dp->ds;
+- int port = dp->index;
+
+ if (switchdev_trans_ph_prepare(trans))
+- return ds->ops->port_egress_floods ? 0 : -EOPNOTSUPP;
++ return ds->ops->port_set_mrouter ? 0 : -EOPNOTSUPP;
++
++ if (!ds->ops->port_set_mrouter)
++ return -EOPNOTSUPP;
+
+- return ds->ops->port_egress_floods(ds, port, true, mrouter);
++ return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
+ }
+
+ int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+Index: linux-5.4.111/net/dsa/slave.c
+===================================================================
+--- linux-5.4.111.orig/net/dsa/slave.c
++++ linux-5.4.111/net/dsa/slave.c
+@@ -293,13 +293,13 @@ static int dsa_slave_port_attr_set(struc
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
+- trans);
++ trans, NULL);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+- ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
++ ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans, NULL);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+- ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
++ ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans, NULL);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
diff --git a/target/linux/realtek/patches-5.4/500-gpio-Add-Realtek-Otto-GPIO-support.patch b/target/linux/realtek/patches-5.4/500-gpio-Add-Realtek-Otto-GPIO-support.patch
new file mode 100644
index 0000000000..8b43f07e8a
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/500-gpio-Add-Realtek-Otto-GPIO-support.patch
@@ -0,0 +1,405 @@
+From patchwork Tue Mar 30 17:48:43 2021
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [v6,2/2] gpio: Add Realtek Otto GPIO support
+X-Patchwork-Submitter: Sander Vanheule <sander@svanheule.net>
+X-Patchwork-Id: 411993
+Message-Id: <2d00064530e88fbef4fff2e759a66bdda261cca7.1617126277.git.sander@svanheule.net>
+To: devicetree@vger.kernel.org, linux-gpio@vger.kernel.org
+Cc: andy.shevchenko@gmail.com, bert@biot.com,
+ bgolaszewski@baylibre.com, linus.walleij@linaro.org,
+ linux-kernel@vger.kernel.org, maz@kernel.org, robh+dt@kernel.org,
+ Sander Vanheule <sander@svanheule.net>
+Date: Tue, 30 Mar 2021 19:48:43 +0200
+From: Sander Vanheule <sander@svanheule.net>
+List-Id: <linux-gpio.vger.kernel.org>
+
+Realtek MIPS SoCs (platform name Otto) have GPIO controllers with up to
+64 GPIOs, divided over two banks. Each bank has a set of registers for
+32 GPIOs, with support for edge-triggered interrupts.
+
+Each GPIO bank consists of four 8-bit GPIO ports (ABCD and EFGH). Most
+registers pack one bit per GPIO, except for the IMR register, which
+packs two bits per GPIO (AB-CD).
+
+Although the byte order is currently assumed to have port A..D at offset
+0x0..0x3, this has been observed to be reversed on other, Lexra-based,
+SoCs (e.g. RTL8196E/97D/97F).
+
+Interrupt support is disabled for the fallback devicetree-compatible
+'realtek,otto-gpio'. This allows for quick support of GPIO banks in
+which the byte order would be unknown. In this case, the port ordering
+in the IMR registers may not match the reversed order in the other
+registers (DCBA, and BA-DC or DC-BA).
+
+Signed-off-by: Sander Vanheule <sander@svanheule.net>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+---
+ drivers/gpio/Kconfig | 13 ++
+ drivers/gpio/Makefile | 1 +
+ drivers/gpio/gpio-realtek-otto.c | 325 +++++++++++++++++++++++++++++++
+ 3 files changed, 339 insertions(+)
+ create mode 100644 drivers/gpio/gpio-realtek-otto.c
+
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -435,6 +435,19 @@ config GPIO_RCAR
+ help
+ Say yes here to support GPIO on Renesas R-Car SoCs.
+
++config GPIO_REALTEK_OTTO
++ tristate "Realtek Otto GPIO support"
++ depends on RTL838X
++ default RTL838X
++ select GPIO_GENERIC
++ select GPIOLIB_IRQCHIP
++ help
++ The GPIO controller on the Otto MIPS platform supports up to two
++ banks of 32 GPIOs, with edge triggered interrupts. The 32 GPIOs
++ are grouped in four 8-bit wide ports.
++
++ When built as a module, the module will be called realtek_otto_gpio.
++
+ config GPIO_REG
+ bool
+ help
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -116,6 +116,7 @@ obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gp
+ obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
+ obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+ obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
++obj-$(CONFIG_GPIO_REALTEK_OTTO) += gpio-realtek-otto.o
+ obj-$(CONFIG_GPIO_REG) += gpio-reg.o
+ obj-$(CONFIG_GPIO_RTL8231) += gpio-rtl8231.o
+ #obj-$(CONFIG_GPIO_RTL838X) += gpio-rtl838x.o
+--- /dev/null
++++ b/drivers/gpio/gpio-realtek-otto.c
+@@ -0,0 +1,325 @@
++// SPDX-License-Identifier: GPL-2.0-only
++
++#include <linux/gpio/driver.h>
++#include <linux/irq.h>
++//#include <linux/minmax.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
++
++/*
++ * Total register block size is 0x1C for one bank of four ports (A, B, C, D).
++ * An optional second bank, with ports E, F, G, and H, may be present, starting
++ * at register offset 0x1C.
++ */
++
++/*
++ * Pin select: (0) "normal", (1) "dedicate peripheral"
++ * Not used on RTL8380/RTL8390, peripheral selection is managed by control bits
++ * in the peripheral registers.
++ */
++#define REALTEK_GPIO_REG_CNR 0x00
++/* Clear bit (0) for input, set bit (1) for output */
++#define REALTEK_GPIO_REG_DIR 0x08
++#define REALTEK_GPIO_REG_DATA 0x0C
++/* Read bit for IRQ status, write 1 to clear IRQ */
++#define REALTEK_GPIO_REG_ISR 0x10
++/* Two bits per GPIO in IMR registers */
++#define REALTEK_GPIO_REG_IMR 0x14
++#define REALTEK_GPIO_REG_IMR_AB 0x14
++#define REALTEK_GPIO_REG_IMR_CD 0x18
++#define REALTEK_GPIO_IMR_LINE_MASK GENMASK(1, 0)
++#define REALTEK_GPIO_IRQ_EDGE_FALLING 1
++#define REALTEK_GPIO_IRQ_EDGE_RISING 2
++#define REALTEK_GPIO_IRQ_EDGE_BOTH 3
++
++#define REALTEK_GPIO_MAX 32
++#define REALTEK_GPIO_PORTS_PER_BANK 4
++
++/**
++ * realtek_gpio_ctrl - Realtek Otto GPIO driver data
++ *
++ * @gc: Associated gpio_chip instance
++ * @base: Base address of the register block for a GPIO bank
++ * @lock: Lock for accessing the IRQ registers and values
++ * @intr_mask: Mask for interrupts lines
++ * @intr_type: Interrupt type selection
++ *
++ * Because the interrupt mask register (IMR) combines the function of IRQ type
++ * selection and masking, two extra values are stored. @intr_mask is used to
++ * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
++ * the selected interrupt types. The logical AND of these values is written to
++ * IMR on changes.
++ */
++struct realtek_gpio_ctrl {
++ struct gpio_chip gc;
++ void __iomem *base;
++ raw_spinlock_t lock;
++ u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
++ u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
++};
++
++/* Expand with more flags as devices with other quirks are added */
++enum realtek_gpio_flags {
++ /*
++ * Allow disabling interrupts, for cases where the port order is
++ * unknown. This may result in a port mismatch between ISR and IMR.
++ * An interrupt would appear to come from a different line than the
++ * line the IRQ handler was assigned to, causing uncaught interrupts.
++ */
++ GPIO_INTERRUPTS_DISABLED = BIT(0),
++};
++
++static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
++{
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
++
++ return container_of(gc, struct realtek_gpio_ctrl, gc);
++}
++
++/*
++ * Normal port order register access
++ *
++ * Port information is stored with the first port at offset 0, followed by the
++ * second, etc. Most registers store one bit per GPIO and use a u8 value per
++ * port. The two interrupt mask registers store two bits per GPIO, so use u16
++ * values.
++ */
++static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
++ unsigned int port, u16 irq_type, u16 irq_mask)
++{
++ iowrite16(irq_type & irq_mask, ctrl->base + REALTEK_GPIO_REG_IMR + 2 * port);
++}
++
++static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
++ unsigned int port, u8 mask)
++{
++ iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + port);
++}
++
++static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
++{
++ return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + port);
++}
++
++/* Set the rising and falling edge mask bits for a GPIO port pin */
++static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
++{
++ return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
++}
++
++static void realtek_gpio_irq_ack(struct irq_data *data)
++{
++ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
++ irq_hw_number_t line = irqd_to_hwirq(data);
++ unsigned int port = line / 8;
++ unsigned int port_pin = line % 8;
++
++ realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
++}
++
++static void realtek_gpio_irq_unmask(struct irq_data *data)
++{
++ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
++ unsigned int line = irqd_to_hwirq(data);
++ unsigned int port = line / 8;
++ unsigned int port_pin = line % 8;
++ unsigned long flags;
++ u16 m;
++
++ raw_spin_lock_irqsave(&ctrl->lock, flags);
++ m = ctrl->intr_mask[port];
++ m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
++ ctrl->intr_mask[port] = m;
++ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
++ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
++}
++
++static void realtek_gpio_irq_mask(struct irq_data *data)
++{
++ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
++ unsigned int line = irqd_to_hwirq(data);
++ unsigned int port = line / 8;
++ unsigned int port_pin = line % 8;
++ unsigned long flags;
++ u16 m;
++
++ raw_spin_lock_irqsave(&ctrl->lock, flags);
++ m = ctrl->intr_mask[port];
++ m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
++ ctrl->intr_mask[port] = m;
++ realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
++ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
++}
++
++static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
++{
++ struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
++ unsigned int line = irqd_to_hwirq(data);
++ unsigned int port = line / 8;
++ unsigned int port_pin = line % 8;
++ unsigned long flags;
++ u16 type, t;
++
++ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
++ case IRQ_TYPE_EDGE_FALLING:
++ type = REALTEK_GPIO_IRQ_EDGE_FALLING;
++ break;
++ case IRQ_TYPE_EDGE_RISING:
++ type = REALTEK_GPIO_IRQ_EDGE_RISING;
++ break;
++ case IRQ_TYPE_EDGE_BOTH:
++ type = REALTEK_GPIO_IRQ_EDGE_BOTH;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ irq_set_handler_locked(data, handle_edge_irq);
++
++ raw_spin_lock_irqsave(&ctrl->lock, flags);
++ t = ctrl->intr_type[port];
++ t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
++ t |= realtek_gpio_imr_bits(port_pin, type);
++ ctrl->intr_type[port] = t;
++ realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
++ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
++
++ return 0;
++}
++
++static void realtek_gpio_irq_handler(struct irq_desc *desc)
++{
++ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
++ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
++ struct irq_chip *irq_chip = irq_desc_get_chip(desc);
++ unsigned int lines_done;
++ unsigned int port_pin_count;
++ unsigned int irq;
++ unsigned long status;
++ int offset;
++
++ chained_irq_enter(irq_chip, desc);
++
++ for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
++ status = realtek_gpio_read_isr(ctrl, lines_done / 8);
++ port_pin_count = min(gc->ngpio - lines_done, 8U);
++ for_each_set_bit(offset, &status, port_pin_count) {
++ irq = irq_find_mapping(gc->irq.domain, offset);
++ generic_handle_irq(irq);
++ }
++ }
++
++ chained_irq_exit(irq_chip, desc);
++}
++
++static int realtek_gpio_irq_init(struct gpio_chip *gc)
++{
++ struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
++ unsigned int port;
++
++ for (port = 0; (port * 8) < gc->ngpio; port++) {
++ realtek_gpio_write_imr(ctrl, port, 0, 0);
++ realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
++ }
++
++ return 0;
++}
++
++static struct irq_chip realtek_gpio_irq_chip = {
++ .name = "realtek-otto-gpio",
++ .irq_ack = realtek_gpio_irq_ack,
++ .irq_mask = realtek_gpio_irq_mask,
++ .irq_unmask = realtek_gpio_irq_unmask,
++ .irq_set_type = realtek_gpio_irq_set_type,
++};
++
++static const struct of_device_id realtek_gpio_of_match[] = {
++ {
++ .compatible = "realtek,otto-gpio",
++ .data = (void *)GPIO_INTERRUPTS_DISABLED,
++ },
++ {
++ .compatible = "realtek,rtl8380-gpio",
++ },
++ {
++ .compatible = "realtek,rtl8390-gpio",
++ },
++ {}
++};
++MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
++
++static int realtek_gpio_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ unsigned int dev_flags;
++ struct gpio_irq_chip *girq;
++ struct realtek_gpio_ctrl *ctrl;
++ u32 ngpios;
++ int err, irq;
++
++ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
++ if (!ctrl)
++ return -ENOMEM;
++
++ dev_flags = (unsigned int) device_get_match_data(dev);
++
++ ngpios = REALTEK_GPIO_MAX;
++ device_property_read_u32(dev, "ngpios", &ngpios);
++
++ if (ngpios > REALTEK_GPIO_MAX) {
++ dev_err(&pdev->dev, "invalid ngpios (max. %d)\n",
++ REALTEK_GPIO_MAX);
++ return -EINVAL;
++ }
++
++ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(ctrl->base))
++ return PTR_ERR(ctrl->base);
++
++ raw_spin_lock_init(&ctrl->lock);
++
++ err = bgpio_init(&ctrl->gc, dev, 4,
++ ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
++ ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
++ BGPIOF_BIG_ENDIAN_BYTE_ORDER);
++ if (err) {
++ dev_err(dev, "unable to init generic GPIO");
++ return err;
++ }
++
++ ctrl->gc.ngpio = ngpios;
++ ctrl->gc.owner = THIS_MODULE;
++
++ irq = platform_get_irq_optional(pdev, 0);
++ if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
++ girq = &ctrl->gc.irq;
++ girq->chip = &realtek_gpio_irq_chip;
++ girq->default_type = IRQ_TYPE_NONE;
++ girq->handler = handle_bad_irq;
++ girq->parent_handler = realtek_gpio_irq_handler;
++ girq->num_parents = 1;
++ girq->parents = devm_kcalloc(dev, girq->num_parents,
++ sizeof(*girq->parents), GFP_KERNEL);
++ if (!girq->parents)
++ return -ENOMEM;
++ girq->parents[0] = irq;
++ girq->init_hw = realtek_gpio_irq_init;
++ }
++
++ return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
++}
++
++static struct platform_driver realtek_gpio_driver = {
++ .driver = {
++ .name = "realtek-otto-gpio",
++ .of_match_table = realtek_gpio_of_match,
++ },
++ .probe = realtek_gpio_probe,
++};
++module_platform_driver(realtek_gpio_driver);
++
++MODULE_DESCRIPTION("Realtek Otto GPIO support");
++MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
++MODULE_LICENSE("GPL v2");
diff --git a/target/linux/realtek/patches-5.4/703-include-linux-add-phy-ops-for-rtl838x.patch b/target/linux/realtek/patches-5.4/703-include-linux-add-phy-ops-for-rtl838x.patch
index 03accd1e07..3682eb30a3 100644
--- a/target/linux/realtek/patches-5.4/703-include-linux-add-phy-ops-for-rtl838x.patch
+++ b/target/linux/realtek/patches-5.4/703-include-linux-add-phy-ops-for-rtl838x.patch
@@ -1,6 +1,6 @@
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
-@@ -644,6 +644,10 @@ struct phy_driver {
+@@ -645,6 +645,10 @@ struct phy_driver {
struct ethtool_tunable *tuna,
const void *data);
int (*set_loopback)(struct phy_device *dev, bool enable);
diff --git a/target/linux/realtek/patches-5.4/704-include-linux-add-phy-hsgmii-mode.patch b/target/linux/realtek/patches-5.4/704-include-linux-add-phy-hsgmii-mode.patch
new file mode 100644
index 0000000000..9a61a70c44
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/704-include-linux-add-phy-hsgmii-mode.patch
@@ -0,0 +1,19 @@
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -93,6 +93,7 @@
+ PHY_INTERFACE_MODE_SMII,
+ PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_MOCA,
++ PHY_INTERFACE_MODE_HSGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+@@ -163,6 +164,8 @@
+ return "xgmii";
+ case PHY_INTERFACE_MODE_MOCA:
+ return "moca";
++ case PHY_INTERFACE_MODE_HSGMII:
++ return "hsgmii";
+ case PHY_INTERFACE_MODE_QSGMII:
+ return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
diff --git a/target/linux/realtek/patches-5.4/706-sysled.patch b/target/linux/realtek/patches-5.4/706-sysled.patch
new file mode 100644
index 0000000000..184aa70b44
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/706-sysled.patch
@@ -0,0 +1,288 @@
+From c1a89fdf22862379bb4150fc76504e2d3384cd67 Mon Sep 17 00:00:00 2001
+From: Bert Vermeulen <bert@biot.com>
+Date: Mon, 1 Mar 2021 12:41:35 +0100
+Subject: [PATCH] mfd: Add Realtek RTL838x/RTL839x sys-led driver
+
+---
+ drivers/mfd/Kconfig | 11 ++
+ drivers/mfd/Makefile | 1 +
+ drivers/mfd/realtek-eio.c | 243 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 255 insertions(+)
+ create mode 100644 drivers/mfd/realtek-eio.c
+
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -923,6 +923,16 @@ config MFD_RETU
+ Retu and Tahvo are a multi-function devices found on Nokia
+ Internet Tablets (770, N800 and N810).
+
++config MFD_REALTEK_EIO
++ tristate "Realtek external LED and GPIO driver"
++ select MFD_CORE
++ select MFD_SYSCON
++ select GENERIC_PINCONF
++ default y
++ help
++ Say yes here if you want external LED/GPIO support for Realtek
++ switch SoCs.
++
+ config MFD_PCF50633
+ tristate "NXP PCF50633"
+ depends on I2C
+--- /dev/null
++++ b/drivers/mfd/realtek-eio.c
+@@ -0,0 +1,246 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/leds.h>
++#include <linux/mfd/core.h>
++#include <linux/mfd/syscon.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++
++#define REALTEK_EIO_GLOBAL_CTRL 0x0
++
++/*
++ * Management of external RTL8231 GPIO expanders.
++ * One RTL8231's GPIO registers can be shadowed to the internal GPIO_DIR
++ * and GPIO_DAT registers.
++ */
++#define RTL8380_EIO_GPIO_INDIRECT_ACCESS 0x9C
++#define RTL8380_EIO_GPIO_CTRL 0xE0
++#define RTL8380_EIO_GPIO_DIR(pin) (0xE4 + 4*((pin)/32))
++#define RTL8380_EIO_GPIO_DAT(pin) (0xEC + 4*((pin)/32))
++
++struct realtek_eio_ctrl;
++
++struct realtek_eio_data {
++ unsigned int sys_led_pos;
++ const struct mfd_cell *mfd_devices;
++ unsigned int mfd_device_count;
++};
++
++struct realtek_eio_ctrl {
++ struct device *dev;
++ struct regmap *map;
++ const struct realtek_eio_data *data;
++ struct led_classdev sys_led;
++ bool active_low;
++};
++
++
++#define OF_MFD_CELL(_name, _compat) \
++ { \
++ .name = (_name), \
++ .of_compatible = (_compat), \
++ }
++
++/*
++ * Realtek hardware system LED
++ *
++ * The switch SoC supports one hardware managed direct LED output
++ * to manage a system LED, with two supported blinking rates.
++ */
++enum {
++ REALTEK_SYS_LED_OFF = 0,
++ REALTEK_SYS_LED_BLINK_64MS,
++ REALTEK_SYS_LED_BLINK_1024MS,
++ REALTEK_SYS_LED_ON
++};
++
++static void realtek_sys_led_set(const struct realtek_eio_ctrl *ctrl,
++ unsigned int mode)
++{
++ regmap_update_bits(ctrl->map, REALTEK_EIO_GLOBAL_CTRL,
++ (0x3 << ctrl->data->sys_led_pos),
++ ((mode & 0x3) << ctrl->data->sys_led_pos));
++}
++
++static void realtek_sys_led_brightness_set(struct led_classdev *led_cdev,
++ enum led_brightness brightness)
++{
++ struct realtek_eio_ctrl *ctrl =
++ container_of(led_cdev, struct realtek_eio_ctrl, sys_led);
++
++ if ((!ctrl->active_low && brightness == LED_OFF) ||
++ (ctrl->active_low && brightness != LED_OFF))
++ realtek_sys_led_set(ctrl, REALTEK_SYS_LED_OFF);
++ else
++ realtek_sys_led_set(ctrl, REALTEK_SYS_LED_ON);
++}
++
++static enum led_brightness realtek_sys_led_brightness_get(
++ struct led_classdev *led_cdev)
++{
++ struct realtek_eio_ctrl *ctrl =
++ container_of(led_cdev, struct realtek_eio_ctrl, sys_led);
++ u32 val;
++
++ regmap_read(ctrl->map, REALTEK_EIO_GLOBAL_CTRL, &val);
++ val = (val >> ctrl->data->sys_led_pos) & 0x3;
++
++ if ((!ctrl->active_low && val == REALTEK_SYS_LED_OFF) ||
++ (ctrl->active_low && val == REALTEK_SYS_LED_ON))
++ return LED_OFF;
++ else
++ return LED_ON;
++}
++
++static int realtek_sys_led_blink_set(struct led_classdev *led_cdev,
++ unsigned long *delay_on, unsigned long *delay_off)
++{
++ struct realtek_eio_ctrl *ctrl =
++ container_of(led_cdev, struct realtek_eio_ctrl, sys_led);
++ u32 blink_interval = *delay_on + *delay_off;
++
++ /* Split range at geometric mean of 64 and 1024 */
++ if (blink_interval == 0 || blink_interval > 2*256) {
++ *delay_on = 1024;
++ *delay_off = 1024;
++ realtek_sys_led_set(ctrl, REALTEK_SYS_LED_BLINK_1024MS);
++ }
++ else {
++ *delay_on = 64;
++ *delay_off = 64;
++ realtek_sys_led_set(ctrl, REALTEK_SYS_LED_BLINK_64MS);
++ }
++
++ return 0;
++}
++
++static int realtek_sys_led_probe(struct realtek_eio_ctrl *ctrl,
++ struct device *parent, struct device_node *np)
++{
++ struct led_classdev *sys_led = &ctrl->sys_led;
++ struct led_init_data init_data = {};
++
++ init_data.fwnode = of_fwnode_handle(np);
++
++ ctrl->active_low = of_property_read_bool(np, "active-low");
++
++ sys_led->max_brightness = 1;
++ sys_led->brightness_set = realtek_sys_led_brightness_set;
++ sys_led->brightness_get = realtek_sys_led_brightness_get;
++ sys_led->blink_set = realtek_sys_led_blink_set;
++
++ return devm_led_classdev_register_ext(parent, sys_led, &init_data);
++}
++
++static const struct mfd_cell rtl8380_mfd_devices[] = {
++ OF_MFD_CELL("realtek-eio-port-leds", "realtek,rtl8380-eio-port-led"),
++ OF_MFD_CELL("realtek-eio-mdio", "realtek,rtl8380-eio-mdio"),
++ OF_MFD_CELL("realtek-eio-pinctrl", "realtek,rtl8380-eio-pinctrl"),
++};
++
++static const struct realtek_eio_data rtl8380_eio_data = {
++ .sys_led_pos = 16,
++ .mfd_devices = rtl8380_mfd_devices,
++ .mfd_device_count = ARRAY_SIZE(rtl8380_mfd_devices)
++};
++
++static const struct mfd_cell rtl8390_mfd_devices[] = {
++ OF_MFD_CELL("realtek-eio-port-leds", "realtek,rtl8390-eio-port-led"),
++};
++
++static struct realtek_eio_data rtl8390_eio_data = {
++ .sys_led_pos = 15,
++ .mfd_devices = rtl8390_mfd_devices,
++ .mfd_device_count = ARRAY_SIZE(rtl8390_mfd_devices)
++};
++
++static const struct of_device_id of_realtek_eio_match[] = {
++ {
++ .compatible = "realtek,rtl8380-eio",
++ .data = &rtl8380_eio_data,
++ },
++ {
++ .compatible = "realtek,rtl8390-eio",
++ .data = &rtl8390_eio_data,
++ },
++};
++
++MODULE_DEVICE_TABLE(of, of_realtek_eio_match);
++
++static int realtek_eio_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct device_node *np = dev->of_node;
++ struct device_node *np_sys_led;
++ const struct of_device_id *match;
++ struct realtek_eio_ctrl *ctrl;
++ int err, val;
++ unsigned r;
++
++ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
++ if (!ctrl)
++ return -ENOMEM;
++
++ match = of_match_device(of_realtek_eio_match, dev);
++ if (match)
++ ctrl->data = (struct realtek_eio_data *) match->data;
++ else {
++ dev_err(dev, "no device match\n");
++ return -EINVAL;
++ }
++
++ ctrl->dev = dev;
++
++ if (!np) {
++ dev_err(dev, "no DT node found\n");
++ return -EINVAL;
++ }
++
++ ctrl->map = device_node_to_regmap(np);
++ if (!ctrl->map) {
++ dev_err(dev, "failed to get regmap\n");
++ return -EINVAL;
++ }
++
++ /* Parse optional sys-led child */
++ np_sys_led = of_get_child_by_name(np, "sys-led");
++ if (IS_ERR(np_sys_led))
++ return PTR_ERR(np_sys_led);
++
++ if (np_sys_led) {
++ err = realtek_sys_led_probe(ctrl, dev, np_sys_led);
++ if (err)
++ return err;
++ }
++
++ /* Find sub-devices */
++ if (ctrl->data->mfd_devices)
++ mfd_add_devices(dev, 0, ctrl->data->mfd_devices,
++ ctrl->data->mfd_device_count, NULL, 0, NULL);
++
++ /* Dump register values */
++ for (r = 0; r <= regmap_get_max_register(ctrl->map); r += 4) {
++ regmap_read(ctrl->map, r, &val);
++ dev_info(dev, "%02x %08x\n", r, val);
++ }
++
++ return 0;
++}
++
++static struct platform_driver realtek_eio_driver = {
++ .probe = realtek_eio_probe,
++ .driver = {
++ .name = "realtek-ext-io",
++ .of_match_table = of_realtek_eio_match
++ }
++};
++
++module_platform_driver(realtek_eio_driver);
++
++MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
++MODULE_DESCRIPTION("Realtek switch SoC external LED/GPIO driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -255,4 +255,4 @@ obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o
+ obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
+ obj-$(CONFIG_MFD_STMFX) += stmfx.o
+-
++obj-$(CONFIG_MFD_REALTEK_EIO) += realtek-eio.o
diff --git a/target/linux/realtek/patches-5.4/707-dsa-trailer-hack.patch b/target/linux/realtek/patches-5.4/707-dsa-trailer-hack.patch
new file mode 100644
index 0000000000..7f4b1aaa13
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/707-dsa-trailer-hack.patch
@@ -0,0 +1,44 @@
+diff -urpN linux-5.4.139.old/include/net/dsa.h linux-5.4.139/include/net/dsa.h
+--- linux-5.4.139.old/include/net/dsa.h 2021-08-12 11:07:00.456095739 +0700
++++ linux-5.4.139/include/net/dsa.h 2021-08-12 11:11:24.556448045 +0700
+@@ -43,6 +43,7 @@ struct phylink_link_state;
+ #define DSA_TAG_PROTO_SJA1105_VALUE 13
+ #define DSA_TAG_PROTO_KSZ8795_VALUE 14
+ #define DSA_TAG_PROTO_RTL4_A_VALUE 17
++#define DSA_TAG_PROTO_RTL83XX_VALUE 18
+
+ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
+@@ -61,6 +62,7 @@ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
+ DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
+ DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
++ DSA_TAG_PROTO_RTL83XX = DSA_TAG_PROTO_RTL83XX_VALUE,
+ };
+
+ struct packet_type;
+diff -urpN linux-5.4.139.old/net/dsa/Kconfig linux-5.4.139/net/dsa/Kconfig
+--- linux-5.4.139.old/net/dsa/Kconfig 2021-08-12 11:08:44.196228650 +0700
++++ linux-5.4.139/net/dsa/Kconfig 2021-08-12 11:09:01.260252005 +0700
+@@ -120,4 +120,10 @@ config NET_DSA_TAG_TRAILER
+ Say Y or M if you want to enable support for tagging frames at
+ with a trailed. e.g. Marvell 88E6060.
+
++config NET_DSA_TAG_RTL83XX
++ tristate "Tag driver for RTL83XX switches"
++ help
++ Say Y or M if you want to enable support for tagging frames for
++ the RTL83XX chipsets.
++
+ endif
+diff -urpN linux-5.4.139.old/net/dsa/Makefile linux-5.4.139/net/dsa/Makefile
+--- linux-5.4.139.old/net/dsa/Makefile 2021-08-12 11:08:44.196228650 +0700
++++ linux-5.4.139/net/dsa/Makefile 2021-08-12 11:09:01.260252005 +0700
+@@ -15,3 +15,4 @@ obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk
+ obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+ obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
+ obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
++obj-$(CONFIG_NET_DSA_TAG_RTL83XX) += tag_rtl83xx.o
+\ No newline at end of file
+diff -urpN linux-5.4.139.old/net/dsa/tag_rtl83xx.c linux-5.4.139/net/dsa/tag_rtl83xx.c
+diff -urpN linux-5.4.139.old/net/dsa/tag_trailer.c linux-5.4.139/net/dsa/tag_trailer.c
diff --git a/target/linux/realtek/patches-5.4/707-reboot.patch b/target/linux/realtek/patches-5.4/707-reboot.patch
new file mode 100644
index 0000000000..bdea960c45
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/707-reboot.patch
@@ -0,0 +1,7 @@
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -172,3 +172,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtens
+ obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
+ obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
+ obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
++obj-y += edgecore_reboot.o
diff --git a/target/linux/realtek/patches-5.4/708-dsa-backports.patch b/target/linux/realtek/patches-5.4/708-dsa-backports.patch
new file mode 100644
index 0000000000..103d6ba113
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/708-dsa-backports.patch
@@ -0,0 +1,239 @@
+diff -urpN linux-5.4.139.old/include/net/dsa.h linux-5.4.139/include/net/dsa.h
+--- linux-5.4.139.old/include/net/dsa.h 2021-08-12 12:42:24.015154362 +0700
++++ linux-5.4.139/include/net/dsa.h 2021-08-12 12:44:13.935282068 +0700
+@@ -432,6 +432,18 @@ static inline bool dsa_port_is_vlan_filt
+ return dp->vlan_filtering;
+ }
+
++static inline
++struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
++{
++ if (!dp->bridge_dev)
++ return NULL;
++
++ if (dp->lag_dev)
++ return dp->lag_dev;
++
++ return dp->slave;
++}
++
+ typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
+ bool is_static, void *data);
+ struct dsa_switch_ops {
+diff -urpN linux-5.4.139.old/net/dsa/dsa_priv.h linux-5.4.139/net/dsa/dsa_priv.h
+--- linux-5.4.139.old/net/dsa/dsa_priv.h 2021-08-12 12:42:24.015154362 +0700
++++ linux-5.4.139/net/dsa/dsa_priv.h 2021-08-12 12:42:40.619173663 +0700
+@@ -203,18 +203,30 @@ void dsa_port_phylink_mac_link_up(struct
+ struct phy_device *phydev);
+ extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
+
+-static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
+- struct net_device *dev)
++static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
++ struct net_device *dev)
+ {
+- /* Switchdev offloading can be configured on: */
++ return dsa_port_to_bridge_port(dp) == dev;
++}
+
+- if (dev == dp->slave)
+- /* DSA ports directly connected to a bridge. */
+- return true;
++static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
++ struct net_device *bridge_dev)
++{
++ /* DSA ports connected to a bridge, and event was emitted
++ * for the bridge.
++ */
++ return dp->bridge_dev == bridge_dev;
++}
++
++/* Returns true if any port of this tree offloads the given net_device */
++static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
++ struct net_device *dev)
++{
++ struct dsa_port *dp;
+
+- if (dp->lag_dev == dev)
+- /* DSA ports connected to a bridge via a LAG */
+- return true;
++ list_for_each_entry(dp, &dst->ports, list)
++ if (dsa_port_offloads_bridge_port(dp, dev))
++ return true;
+
+ return false;
+ }
+diff -urpN linux-5.4.139.old/net/dsa/port.c linux-5.4.139/net/dsa/port.c
+--- linux-5.4.139.old/net/dsa/port.c 2021-08-12 12:41:26.839087913 +0700
++++ linux-5.4.139/net/dsa/port.c 2021-08-12 12:46:43.067455364 +0700
+@@ -210,17 +210,31 @@ int dsa_port_lag_join(struct dsa_port *d
+ .lag = lag,
+ .info = uinfo,
+ };
++ struct net_device *bridge_dev;
+ int err;
+
+ dsa_lag_map(dp->ds->dst, lag);
+ dp->lag_dev = lag;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
+- if (err) {
+- dp->lag_dev = NULL;
+- dsa_lag_unmap(dp->ds->dst, lag);
+- }
++ if (err)
++ goto err_lag_join;
+
++ bridge_dev = netdev_master_upper_dev_get(lag);
++ if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
++ return 0;
++
++ err = dsa_port_bridge_join(dp, bridge_dev);
++ if (err)
++ goto err_bridge_join;
++
++ return 0;
++
++err_bridge_join:
++ dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
++err_lag_join:
++ dp->lag_dev = NULL;
++ dsa_lag_unmap(dp->ds->dst, lag);
+ return err;
+ }
+
+diff -urpN linux-5.4.139.old/net/dsa/port.c.orig linux-5.4.139/net/dsa/port.c.orig
+diff -urpN linux-5.4.139.old/net/dsa/slave.c linux-5.4.139/net/dsa/slave.c
+--- linux-5.4.139.old/net/dsa/slave.c 2021-08-12 12:42:24.015154362 +0700
++++ linux-5.4.139/net/dsa/slave.c 2021-08-12 12:42:40.619173663 +0700
+@@ -284,28 +284,43 @@ static int dsa_slave_port_attr_set(struc
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int ret;
+
+- if (!dsa_port_offloads_netdev(dp, attr->orig_dev))
+- return -EOPNOTSUPP;
+-
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
++ if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
++ if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
++ if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
++ if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
+ trans, NULL);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans, NULL);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
++ if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
++ return -EOPNOTSUPP;
++
+ ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans, NULL);
+ break;
+ default:
+@@ -324,9 +339,6 @@ static int dsa_slave_vlan_add(struct net
+ struct switchdev_obj_port_vlan vlan;
+ int err;
+
+- if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+- return -EOPNOTSUPP;
+-
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+@@ -364,11 +376,13 @@ static int dsa_slave_port_obj_add(struct
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+- if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
++ if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
++ if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
++ return -EOPNOTSUPP;
+ /* DSA can directly translate this to a normal MDB add,
+ * but on the CPU port.
+ */
+@@ -376,6 +390,9 @@ static int dsa_slave_port_obj_add(struct
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
++ return -EOPNOTSUPP;
++
+ err = dsa_slave_vlan_add(dev, obj, trans);
+ break;
+ default:
+@@ -391,9 +408,6 @@ static int dsa_slave_vlan_del(struct net
+ {
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+- if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
+- return -EOPNOTSUPP;
+-
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+@@ -411,17 +425,22 @@ static int dsa_slave_port_obj_del(struct
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+- if (!dsa_port_offloads_netdev(dp, obj->orig_dev))
++ if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
++ if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
++ return -EOPNOTSUPP;
+ /* DSA can directly translate this to a normal MDB add,
+ * but on the CPU port.
+ */
+ err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
++ return -EOPNOTSUPP;
++
+ err = dsa_slave_vlan_del(dev, obj);
+ break;
+ default:
+@@ -1801,6 +1820,14 @@ static int dsa_slave_switchdev_event(str
+ if (!fdb_info->added_by_user &&
+ !dp->ds->assisted_learning_on_cpu_port)
+ return NOTIFY_DONE;
++
++ /* When the bridge learns an address on an offloaded
++ * LAG we don't want to send traffic to the CPU, the
++ * other ports bridged with the LAG should be able to
++ * autonomously forward towards it.
++ */
++ if (dsa_tree_offloads_bridge_port(dp->ds->dst, dev))
++ return NOTIFY_DONE;
+ }
+
+ if (!dp->ds->ops->port_fdb_add || !dp->ds->ops->port_fdb_del)
diff --git a/target/linux/realtek/patches-5.4/710-adt7470.patch b/target/linux/realtek/patches-5.4/710-adt7470.patch
new file mode 100644
index 0000000000..64798b7ddf
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/710-adt7470.patch
@@ -0,0 +1,20 @@
+--- a/drivers/hwmon/adt7470.c
++++ b/drivers/hwmon/adt7470.c
+@@ -1271,10 +1271,17 @@ static const struct i2c_device_id adt747
+ };
+ MODULE_DEVICE_TABLE(i2c, adt7470_id);
+
++static const struct of_device_id __maybe_unused adt7470_of_match =
++{
++ .compatible = "adi,adt7470",
++};
++MODULE_DEVICE_TABLE(of, adt7470_of_match);
++
+ static struct i2c_driver adt7470_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adt7470",
++ .of_match_table = of_match_ptr(&adt7470_of_match),
+ },
+ .probe = adt7470_probe,
+ .remove = adt7470_remove,
diff --git a/target/linux/realtek/patches-5.4/711-ec4100.patch b/target/linux/realtek/patches-5.4/711-ec4100.patch
new file mode 100644
index 0000000000..950912ce8d
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/711-ec4100.patch
@@ -0,0 +1,150 @@
+Index: linux-5.4.111/drivers/net/dsa/rtl83xx/common.c
+===================================================================
+--- linux-5.4.111.orig/drivers/net/dsa/rtl83xx/common.c
++++ linux-5.4.111/drivers/net/dsa/rtl83xx/common.c
+@@ -1609,6 +1609,17 @@ static int __init rtl83xx_sw_probe(struc
+ rtl930x_dbgfs_init(priv);
+ }
+
++ if (of_machine_is_compatible("edgecore,ecs4100-12ph")) {
++ sw_w32(0x000000FF, 0x110);
++ sw_w32(0x00000000, 0x114);
++ sw_w32(0x00000000, 0x118);
++ sw_w32(0x000f0000, 0x11c);
++ sw_w32(0x00000000, 0x120);
++ sw_w32(0x000f0000, 0x124);
++ sw_w32(0x3DEA, 0xec);
++ sw_w32(0x707568, 0xe4);
++ }
++
+ return 0;
+
+ err_register_fib_nb:
+Index: linux-5.4.111/drivers/net/phy/rtl83xx-phy.c
+===================================================================
+--- linux-5.4.111.orig/drivers/net/phy/rtl83xx-phy.c
++++ linux-5.4.111/drivers/net/phy/rtl83xx-phy.c
+@@ -1439,6 +1439,14 @@ static int rtl8380_configure_rtl8214fc(s
+ write_phy(mac + i, 0xfff, 0x1e, 0x0000);
+ }
+
++ if (of_machine_is_compatible("edgecore,ecs4100-12ph")) {
++ printk("setting edgecore specific SFP modes\n");
++ rtl8380_rtl8214fc_media_set(mac + 0, 0);
++ rtl8380_rtl8214fc_media_set(mac + 1, 0);
++ rtl8380_rtl8214fc_media_set(mac + 2, 1);
++ rtl8380_rtl8214fc_media_set(mac + 3, 1);
++ }
++
+ return 0;
+ }
+
+Index: linux-5.4.111/arch/mips/rtl838x/setup.c
+===================================================================
+--- linux-5.4.111.orig/arch/mips/rtl838x/setup.c
++++ linux-5.4.111/arch/mips/rtl838x/setup.c
+@@ -46,21 +46,6 @@ static void rtl838x_restart(char *comman
+ sw_w32(1, RTL838X_RST_GLB_CTRL_1);
+ }
+
+-static void rtl839x_restart(char *command)
+-{
+- /* SoC reset vector (in flash memory): on RTL839x platform preferred way to reset */
+- void (*f)(void) = (void *) 0xbfc00000;
+-
+- pr_info("System restart.\n");
+- /* Reset SoC */
+- sw_w32(0xFFFFFFFF, RTL839X_RST_GLB_CTRL);
+- /* and call reset vector */
+- f();
+- /* If this fails, halt the CPU */
+- while
+- (1);
+-}
+-
+ static void rtl930x_restart(char *command)
+ {
+ pr_info("System restart.\n");
+@@ -109,8 +94,6 @@ static void __init rtl838x_setup(void)
+ static void __init rtl839x_setup(void)
+ {
+ pr_info("Registering _machine_restart\n");
+- _machine_restart = rtl839x_restart;
+- _machine_halt = rtl838x_halt;
+
+ /* Setup System LED. Bit 14 of RTL839X_LED_GLB_CTRL then allows to toggle it */
+ sw_w32_mask(0, 3 << 15, RTL839X_LED_GLB_CTRL);
+@@ -141,7 +124,6 @@ void __init plat_mem_setup(void)
+ void *dtb;
+
+ set_io_port_base(KSEG1);
+- _machine_restart = rtl838x_restart;
+
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
+Index: linux-5.4.111/drivers/gpio/edgecore_reboot.c
+===================================================================
+--- /dev/null
++++ linux-5.4.111/drivers/gpio/edgecore_reboot.c
+@@ -0,0 +1,61 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2021 John Crispin <john@phrozen.org> */
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/notifier.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/reboot.h>
++#include <linux/module.h>
++#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
++
++static struct notifier_block edgecore_reboot_handler;
++static struct gpio_desc *gpiod;
++static int edgecore_reboot_handle(struct notifier_block *this,
++ unsigned long mode, void *cmd)
++{
++ gpiod_direction_output(gpiod, 0);
++ mdelay(1000);
++
++ pr_emerg("Unable to restart system\n");
++ return NOTIFY_DONE;
++}
++
++static int __init edgecore_reboot_probe(struct platform_device *pdev)
++{
++ int err;
++ unsigned long flags = GPIOF_IN;
++
++ gpiod = devm_gpiod_get_index(&pdev->dev, NULL, 0, flags);
++ if (!IS_ERR(gpiod))
++ gpiod_set_consumer_name(gpiod, "reboot");
++ else
++ return -EPROBE_DEFER;
++
++ edgecore_reboot_handler.notifier_call = edgecore_reboot_handle;
++ edgecore_reboot_handler.priority = 255;
++ err = register_restart_handler(&edgecore_reboot_handler);
++ if (err)
++ printk("can't register restart notifier (err=%d)\n", err);
++
++
++ return 0;
++}
++
++static const struct of_device_id edgecore_reboot_of_ids[] = {
++ { .compatible = "edgecore,reboot"},
++ { /* sentinel */ }
++};
++
++
++static struct platform_driver edgecore_reboot_driver = {
++ .probe = edgecore_reboot_probe,
++ .driver = {
++ .name = "edgecore_reboot",
++ .of_match_table = edgecore_reboot_of_ids,
++ },
++};
++
++module_platform_driver(edgecore_reboot_driver);
diff --git a/target/linux/realtek/patches-5.4/712-fixes.patch b/target/linux/realtek/patches-5.4/712-fixes.patch
new file mode 100644
index 0000000000..68494b0ecc
--- /dev/null
+++ b/target/linux/realtek/patches-5.4/712-fixes.patch
@@ -0,0 +1,23 @@
+--- a/drivers/gpio/gpio-rtl838x.c
++++ b/drivers/gpio/gpio-rtl838x.c
+@@ -348,6 +348,9 @@ static int rtl838x_gpio_probe(struct pla
+ case 0x8391:
+ pr_debug("Found RTL8391 GPIO\n");
+ break;
++ case 0x8392:
++ pr_debug("Found RTL8392 GPIO\n");
++ break;
+ case 0x8393:
+ pr_debug("Found RTL8393 GPIO\n");
+ break;
+--- a/drivers/net/phy/rtl83xx-phy.c
++++ b/drivers/net/phy/rtl83xx-phy.c
+@@ -1399,7 +1399,7 @@ static int rtl8380_configure_rtl8214fc(s
+ for (i = 0; i < 4; i++) {
+ for (l = 0; l < 100; l++) {
+ read_phy(mac + i, 0xb80, 0x10, &val);
+- if (val & 0x40)
++ if (val & 0x80)
+ break;
+ }
+ if (l >= 100) {
--
2.25.1