open-converged-wireless: Import 21.02 based uCentral tree

Signed-off-by: John Crispin <john@phrozen.org>
This commit is contained in:
John Crispin
2021-02-18 14:16:15 +01:00
commit 528a778e38
2024 changed files with 1084952 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
openwrt/

8
Makefile Normal file
View File

@@ -0,0 +1,8 @@
.PHONY: all purge
all:
./dock-run.sh ./build.sh $(TARGET)
purge:
cd openwrt && rm -rf * && rm -rf .*
@echo Done

View File

@@ -0,0 +1,48 @@
From 9a1f2adebe2245175462a6b2758d78b292072505 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 22 Oct 2020 10:29:34 +0200
Subject: [PATCH 01/22] build: build kernel image before building
modules/packages
This is needed for linux 5.10, where modules.builtin is generated from
vmlinux.o
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/kernel-defaults.mk | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/kernel-defaults.mk b/include/kernel-defaults.mk
index e5a0ba367b..b069c1e671 100644
--- a/include/kernel-defaults.mk
+++ b/include/kernel-defaults.mk
@@ -113,7 +113,7 @@ endef
define Kernel/CompileModules/Default
rm -f $(LINUX_DIR)/vmlinux $(LINUX_DIR)/System.map
- +$(KERNEL_MAKE) modules
+ +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all) modules
endef
OBJCOPY_STRIP = -R .reginfo -R .notes -R .note -R .comment -R .mdebug -R .note.gnu.build-id
@@ -137,7 +137,7 @@ endef
define Kernel/CompileImage/Default
rm -f $(TARGET_DIR)/init
- +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all) modules
+ +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all)
$(call Kernel/CopyImage)
endef
@@ -147,7 +147,7 @@ define Kernel/CompileImage/Initramfs
$(CP) $(GENERIC_PLATFORM_DIR)/other-files/init $(TARGET_DIR)/init
$(if $(SOURCE_DATE_EPOCH),touch -hcd "@$(SOURCE_DATE_EPOCH)" $(TARGET_DIR)/init)
rm -rf $(KERNEL_BUILD_DIR)/linux-$(LINUX_VERSION)/usr/initramfs_data.cpio*
- +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all) modules
+ +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all)
$(call Kernel/CopyImage,-initramfs)
endef
else
--
2.25.1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,41 @@
From cc135eeb1b77fc084e4154d25e13e42b7a2d9150 Mon Sep 17 00:00:00 2001
From: David Bauer <mail@david-bauer.net>
Date: Tue, 16 Feb 2021 22:51:18 +0100
Subject: [PATCH 03/22] generic: ar8216: fix kernel 5.10 compile error
Signed-off-by: David Bauer <mail@david-bauer.net>
---
target/linux/generic/files/drivers/net/phy/ar8216.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/target/linux/generic/files/drivers/net/phy/ar8216.c b/target/linux/generic/files/drivers/net/phy/ar8216.c
index acfa0ebecd..d48a415d50 100644
--- a/target/linux/generic/files/drivers/net/phy/ar8216.c
+++ b/target/linux/generic/files/drivers/net/phy/ar8216.c
@@ -891,7 +891,11 @@ ar8216_phy_write(struct ar8xxx_priv *priv, int addr, int regnum, u16 val)
static int
ar8229_hw_init(struct ar8xxx_priv *priv)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+ phy_interface_t phy_if_mode;
+#else
int phy_if_mode;
+#endif
if (priv->initialized)
return 0;
@@ -899,7 +903,11 @@ ar8229_hw_init(struct ar8xxx_priv *priv)
ar8xxx_write(priv, AR8216_REG_CTRL, AR8216_CTRL_RESET);
ar8xxx_reg_wait(priv, AR8216_REG_CTRL, AR8216_CTRL_RESET, 0, 1000);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+ of_get_phy_mode(priv->pdev->of_node, &phy_if_mode);
+#else
phy_if_mode = of_get_phy_mode(priv->pdev->of_node);
+#endif
if (phy_if_mode == PHY_INTERFACE_MODE_GMII) {
ar8xxx_write(priv, AR8229_REG_OPER_MODE0,
--
2.25.1

View File

@@ -0,0 +1,45 @@
From e1053344ae44ac022e1e8262169d7194442d78a6 Mon Sep 17 00:00:00 2001
From: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Date: Tue, 16 Feb 2021 22:56:06 +0100
Subject: [PATCH 04/22] generic: ar8216: update version switch for
of_get_phy_mode fix
Kernel has changed the of_get_phy_mode API in commit 0c65b2b90d13
("net: of_get_phy_mode: Change API to solve int/unit warnings").
This is already included in kernel 5.5, so fix the version switch
(though this will not actually matter for the versions we support).
Similar driver adjustments to account for the API change will
probably be necessary to various other local drivers.
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
---
target/linux/generic/files/drivers/net/phy/ar8216.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/target/linux/generic/files/drivers/net/phy/ar8216.c b/target/linux/generic/files/drivers/net/phy/ar8216.c
index d48a415d50..ef0fc54949 100644
--- a/target/linux/generic/files/drivers/net/phy/ar8216.c
+++ b/target/linux/generic/files/drivers/net/phy/ar8216.c
@@ -891,7 +891,7 @@ ar8216_phy_write(struct ar8xxx_priv *priv, int addr, int regnum, u16 val)
static int
ar8229_hw_init(struct ar8xxx_priv *priv)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)
phy_interface_t phy_if_mode;
#else
int phy_if_mode;
@@ -903,7 +903,7 @@ ar8229_hw_init(struct ar8xxx_priv *priv)
ar8xxx_write(priv, AR8216_REG_CTRL, AR8216_CTRL_RESET);
ar8xxx_reg_wait(priv, AR8216_REG_CTRL, AR8216_CTRL_RESET, 0, 1000);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)
of_get_phy_mode(priv->pdev->of_node, &phy_if_mode);
#else
phy_if_mode = of_get_phy_mode(priv->pdev->of_node);
--
2.25.1

View File

@@ -0,0 +1,36 @@
From c3a5b35f6971a4c6daed3674badb6718058f5ebe Mon Sep 17 00:00:00 2001
From: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Date: Tue, 16 Feb 2021 23:16:00 +0100
Subject: [PATCH 05/22] kernel: 5.10: fix busy wait loop in mediatek PPE code
Reapply changes added to 5.4 but not copied to 5.10:
3da4acaa7bba ("kernel: fix busy wait loop in mediatek PPE code")
The intention is for the loop to timeout if the body does not succeed.
The current logic calls time_is_before_jiffies(timeout) which is false
until after the timeout, so the loop body never executes.
time_is_after_jiffies(timeout) will return true until timeout is less
than jiffies, which is the intended behavior here.
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
---
...5-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch b/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch
index fa4803211a..09282175b0 100644
--- a/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch
+++ b/target/linux/generic/pending-5.10/770-15-net-ethernet-mtk_eth_soc-add-support-for-initializin.patch
@@ -185,7 +185,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+{
+ unsigned long timeout = jiffies + HZ;
+
-+ while (time_is_before_jiffies(timeout)) {
++ while (time_is_after_jiffies(timeout)) {
+ if (!(ppe_r32(ppe, MTK_PPE_GLO_CFG) & MTK_PPE_GLO_CFG_BUSY))
+ return 0;
+
--
2.25.1

View File

@@ -0,0 +1,43 @@
From c7c270cedef77aa077f1f0dcc31964b43d573597 Mon Sep 17 00:00:00 2001
From: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Date: Tue, 16 Feb 2021 23:25:00 +0100
Subject: [PATCH 06/22] kernel: hack-5.10: make UDP tunneling user-selectable
This applies another patch from 5.4 to 5.10 as well:
de09355f74c3 ("kernel/hack-5.4: make UDP tunneling user-selectable")
UDP tunneling support isn't user-selectable, but it's required by WireGuard
which is, for the time being, an out-of-tree module. We currently work around
this issue by selecting an unrelated module which depends on UDP tunnelling
(VXLAN). This is inconvenient, as it implies this unrelated module needs to be
built-in when doing a monolithic build.
Fix this inconvenience by making UDP tunneling user-selectable in the kernel
configuration.
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
---
.../generic/hack-5.10/249-udp-tunnel-selection.patch | 11 +++++++++++
1 file changed, 11 insertions(+)
create mode 100644 target/linux/generic/hack-5.10/249-udp-tunnel-selection.patch
diff --git a/target/linux/generic/hack-5.10/249-udp-tunnel-selection.patch b/target/linux/generic/hack-5.10/249-udp-tunnel-selection.patch
new file mode 100644
index 0000000000..2c74298dfe
--- /dev/null
+++ b/target/linux/generic/hack-5.10/249-udp-tunnel-selection.patch
@@ -0,0 +1,11 @@
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -315,7 +315,7 @@ config NET_IPVTI
+ on top.
+
+ config NET_UDP_TUNNEL
+- tristate
++ tristate "IP: UDP tunneling support"
+ select NET_IP_TUNNEL
+ default n
+
--
2.25.1

View File

@@ -0,0 +1,347 @@
From 56c65b4ffa04a202b777e8119aaa8069264d7789 Mon Sep 17 00:00:00 2001
From: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Date: Tue, 16 Feb 2021 23:31:40 +0100
Subject: [PATCH 07/22] kernel: 5.10: add missing partitions doc syntax commit
This patch has been added to 5.4, but not been copied to 5.10:
7495acb55573 ("kernel: backport mtd commit converting partitions doc syntax")
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
---
...convert-fixed-partitions-to-the-json.patch | 324 ++++++++++++++++++
1 file changed, 324 insertions(+)
create mode 100644 target/linux/generic/backport-5.10/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch
diff --git a/target/linux/generic/backport-5.10/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch b/target/linux/generic/backport-5.10/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch
new file mode 100644
index 0000000000..8aded43526
--- /dev/null
+++ b/target/linux/generic/backport-5.10/401-v5.11-dt-bindings-mtd-convert-fixed-partitions-to-the-json.patch
@@ -0,0 +1,324 @@
+From 04e9ab75267489224364fa510a88ada83e11c325 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
+Date: Thu, 10 Dec 2020 18:23:52 +0100
+Subject: [PATCH] dt-bindings: mtd: convert "fixed-partitions" to the
+ json-schema
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This standardizes its documentation, allows validating with Makefile
+checks and helps writing DTS files.
+
+Noticeable changes:
+1. Dropped "Partitions can be represented by sub-nodes of a flash
+ device." as we also support subpartitions (don't have to be part of
+ flash device node)
+2. Dropped "to Linux" as bindings are meant to be os agnostic.
+
+Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
+Link: https://lore.kernel.org/r/20201210172352.31632-1-zajec5@gmail.com
+Signed-off-by: Rob Herring <robh@kernel.org>
+---
+ .../devicetree/bindings/mtd/partition.txt | 131 +--------------
+ .../mtd/partitions/fixed-partitions.yaml | 152 ++++++++++++++++++
+ 2 files changed, 154 insertions(+), 129 deletions(-)
+ create mode 100644 Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
+
+--- a/Documentation/devicetree/bindings/mtd/partition.txt
++++ b/Documentation/devicetree/bindings/mtd/partition.txt
+@@ -24,137 +24,10 @@ another partitioning method.
+ Available bindings are listed in the "partitions" subdirectory.
+
+
+-Fixed Partitions
+-================
+-
+-Partitions can be represented by sub-nodes of a flash device. This can be used
+-on platforms which have strong conventions about which portions of a flash are
+-used for what purposes, but which don't use an on-flash partition table such
+-as RedBoot.
+-
+-The partition table should be a subnode of the flash node and should be named
+-'partitions'. This node should have the following property:
+-- compatible : (required) must be "fixed-partitions"
+-Partitions are then defined in subnodes of the partitions node.
++Deprecated: partitions defined in flash node
++============================================
+
+ For backwards compatibility partitions as direct subnodes of the flash device are
+ supported. This use is discouraged.
+ NOTE: also for backwards compatibility, direct subnodes that have a compatible
+ string are not considered partitions, as they may be used for other bindings.
+-
+-#address-cells & #size-cells must both be present in the partitions subnode of the
+-flash device. There are two valid values for both:
+-<1>: for partitions that require a single 32-bit cell to represent their
+- size/address (aka the value is below 4 GiB)
+-<2>: for partitions that require two 32-bit cells to represent their
+- size/address (aka the value is 4 GiB or greater).
+-
+-Required properties:
+-- reg : The partition's offset and size within the flash
+-
+-Optional properties:
+-- label : The label / name for this partition. If omitted, the label is taken
+- from the node name (excluding the unit address).
+-- read-only : This parameter, if present, is a hint to Linux that this
+- partition should only be mounted read-only. This is usually used for flash
+- partitions containing early-boot firmware images or data which should not be
+- clobbered.
+-- lock : Do not unlock the partition at initialization time (not supported on
+- all devices)
+-- slc-mode: This parameter, if present, allows one to emulate SLC mode on a
+- partition attached to an MLC NAND thus making this partition immune to
+- paired-pages corruptions
+-
+-Examples:
+-
+-
+-flash@0 {
+- partitions {
+- compatible = "fixed-partitions";
+- #address-cells = <1>;
+- #size-cells = <1>;
+-
+- partition@0 {
+- label = "u-boot";
+- reg = <0x0000000 0x100000>;
+- read-only;
+- };
+-
+- uimage@100000 {
+- reg = <0x0100000 0x200000>;
+- };
+- };
+-};
+-
+-flash@1 {
+- partitions {
+- compatible = "fixed-partitions";
+- #address-cells = <1>;
+- #size-cells = <2>;
+-
+- /* a 4 GiB partition */
+- partition@0 {
+- label = "filesystem";
+- reg = <0x00000000 0x1 0x00000000>;
+- };
+- };
+-};
+-
+-flash@2 {
+- partitions {
+- compatible = "fixed-partitions";
+- #address-cells = <2>;
+- #size-cells = <2>;
+-
+- /* an 8 GiB partition */
+- partition@0 {
+- label = "filesystem #1";
+- reg = <0x0 0x00000000 0x2 0x00000000>;
+- };
+-
+- /* a 4 GiB partition */
+- partition@200000000 {
+- label = "filesystem #2";
+- reg = <0x2 0x00000000 0x1 0x00000000>;
+- };
+- };
+-};
+-
+-flash@3 {
+- partitions {
+- compatible = "fixed-partitions";
+- #address-cells = <1>;
+- #size-cells = <1>;
+-
+- partition@0 {
+- label = "bootloader";
+- reg = <0x000000 0x100000>;
+- read-only;
+- };
+-
+- firmware@100000 {
+- label = "firmware";
+- reg = <0x100000 0xe00000>;
+- compatible = "brcm,trx";
+- };
+-
+- calibration@f00000 {
+- label = "calibration";
+- reg = <0xf00000 0x100000>;
+- compatible = "fixed-partitions";
+- ranges = <0 0xf00000 0x100000>;
+- #address-cells = <1>;
+- #size-cells = <1>;
+-
+- partition@0 {
+- label = "wifi0";
+- reg = <0x000000 0x080000>;
+- };
+-
+- partition@80000 {
+- label = "wifi1";
+- reg = <0x080000 0x080000>;
+- };
+- };
+- };
+-};
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
+@@ -0,0 +1,152 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/mtd/partitions/fixed-partitions.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Fixed partitions
++
++description: |
++ This binding can be used on platforms which have strong conventions about
++ which portions of a flash are used for what purposes, but which don't use an
++ on-flash partition table such as RedBoot.
++
++ The partition table should be a node named "partitions". Partitions are then
++ defined as subnodes.
++
++maintainers:
++ - Rafał Miłecki <rafal@milecki.pl>
++
++properties:
++ compatible:
++ const: fixed-partitions
++
++ "#address-cells": true
++
++ "#size-cells": true
++
++patternProperties:
++ "@[0-9a-f]+$":
++ description: node describing a single flash partition
++ type: object
++
++ properties:
++ reg:
++ description: partition's offset and size within the flash
++ maxItems: 1
++
++ label:
++ description: The label / name for this partition. If omitted, the label
++ is taken from the node name (excluding the unit address).
++
++ read-only:
++ description: This parameter, if present, is a hint that this partition
++ should only be mounted read-only. This is usually used for flash
++ partitions containing early-boot firmware images or data which should
++ not be clobbered.
++ type: boolean
++
++ lock:
++ description: Do not unlock the partition at initialization time (not
++ supported on all devices)
++ type: boolean
++
++ slc-mode:
++ description: This parameter, if present, allows one to emulate SLC mode
++ on a partition attached to an MLC NAND thus making this partition
++ immune to paired-pages corruptions
++ type: boolean
++
++ required:
++ - reg
++
++required:
++ - "#address-cells"
++ - "#size-cells"
++
++additionalProperties: true
++
++examples:
++ - |
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ partition@0 {
++ label = "u-boot";
++ reg = <0x0000000 0x100000>;
++ read-only;
++ };
++
++ uimage@100000 {
++ reg = <0x0100000 0x200000>;
++ };
++ };
++ - |
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <2>;
++
++ /* a 4 GiB partition */
++ partition@0 {
++ label = "filesystem";
++ reg = <0x00000000 0x1 0x00000000>;
++ };
++ };
++ - |
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ /* an 8 GiB partition */
++ partition@0 {
++ label = "filesystem #1";
++ reg = <0x0 0x00000000 0x2 0x00000000>;
++ };
++
++ /* a 4 GiB partition */
++ partition@200000000 {
++ label = "filesystem #2";
++ reg = <0x2 0x00000000 0x1 0x00000000>;
++ };
++ };
++ - |
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ partition@0 {
++ label = "bootloader";
++ reg = <0x000000 0x100000>;
++ read-only;
++ };
++
++ firmware@100000 {
++ compatible = "brcm,trx";
++ label = "firmware";
++ reg = <0x100000 0xe00000>;
++ };
++
++ calibration@f00000 {
++ compatible = "fixed-partitions";
++ label = "calibration";
++ reg = <0xf00000 0x100000>;
++ ranges = <0 0xf00000 0x100000>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ partition@0 {
++ label = "wifi0";
++ reg = <0x000000 0x080000>;
++ };
++
++ partition@80000 {
++ label = "wifi1";
++ reg = <0x080000 0x080000>;
++ };
++ };
++ };
--
2.25.1

View File

@@ -0,0 +1,226 @@
From cebbb0ffa366e356268afe15fb243682313e02fc Mon Sep 17 00:00:00 2001
From: Adrian Schmutzler <freifunk@adrianschmutzler.de>
Date: Tue, 16 Feb 2021 23:39:32 +0100
Subject: [PATCH 08/22] kernel: 5.10: refresh patches
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
---
.../generic/hack-5.10/531-debloat_lzma.patch | 33 ++++++++++---------
...t-command-line-parameters-from-users.patch | 10 +++---
...ble-add-offload-support-for-xmit-pat.patch | 15 +++++----
...dd-support-for-threaded-NAPI-polling.patch | 15 +++++----
.../pending-5.10/834-ledtrig-libata.patch | 4 +--
5 files changed, 41 insertions(+), 36 deletions(-)
diff --git a/target/linux/generic/hack-5.10/531-debloat_lzma.patch b/target/linux/generic/hack-5.10/531-debloat_lzma.patch
index 0854872ffa..2f70eee3e9 100644
--- a/target/linux/generic/hack-5.10/531-debloat_lzma.patch
+++ b/target/linux/generic/hack-5.10/531-debloat_lzma.patch
@@ -710,26 +710,26 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
{
UInt32 dicSize;
Byte d;
-@@ -935,33 +883,11 @@ static SRes LzmaDec_AllocateProbs2(CLzma
+@@ -935,7 +883,7 @@ static SRes LzmaDec_AllocateProbs2(CLzma
return SZ_OK;
}
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
--{
-- CLzmaProps propNew;
-- RINOK(LzmaProps_Decode(&propNew, props, propsSize));
-- RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
-- p->prop = propNew;
-- return SZ_OK;
--}
--
--SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+static SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
{
CLzmaProps propNew;
-- SizeT dicBufSize;
RINOK(LzmaProps_Decode(&propNew, props, propsSize));
- RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+@@ -943,28 +891,6 @@ SRes LzmaDec_AllocateProbs(CLzmaDec *p,
+ p->prop = propNew;
+ return SZ_OK;
+ }
+-
+-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+-{
+- CLzmaProps propNew;
+- SizeT dicBufSize;
+- RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+- RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
- dicBufSize = propNew.dicSize;
- if (p->dic == 0 || dicBufSize != p->dicBufSize)
- {
@@ -742,9 +742,12 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
- }
- }
- p->dicBufSize = dicBufSize;
- p->prop = propNew;
- return SZ_OK;
- }
+- p->prop = propNew;
+- return SZ_OK;
+-}
+
+ SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
--- a/lib/lzma/LzmaEnc.c
+++ b/lib/lzma/LzmaEnc.c
@@ -53,7 +53,7 @@ void LzmaEncProps_Init(CLzmaEncProps *p)
diff --git a/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch b/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
index 5a0e44b76b..2808c95322 100644
--- a/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
+++ b/target/linux/generic/pending-5.10/330-MIPS-kexec-Accept-command-line-parameters-from-users.patch
@@ -267,15 +267,15 @@ Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
+ EXPORT(kexec_argv_buf)
+ .skip KEXEC_COMMAND_LINE_SIZE
+ .size kexec_argv_buf, KEXEC_COMMAND_LINE_SIZE
++
++kexec_argv:
++ EXPORT(kexec_argv)
++ .skip KEXEC_ARGV_SIZE
++ .size kexec_argv, KEXEC_ARGV_SIZE
-relocate_new_kernel_size:
- EXPORT(relocate_new_kernel_size)
- PTR relocate_new_kernel_end - relocate_new_kernel
- .size relocate_new_kernel_size, PTRSIZE
-+kexec_argv:
-+ EXPORT(kexec_argv)
-+ .skip KEXEC_ARGV_SIZE
-+ .size kexec_argv, KEXEC_ARGV_SIZE
-+
+kexec_relocate_new_kernel_end:
+ EXPORT(kexec_relocate_new_kernel_end)
diff --git a/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch b/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
index 5e3c7e031a..508dc90e14 100644
--- a/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
+++ b/target/linux/generic/pending-5.10/640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
@@ -85,12 +85,14 @@ tag to the driver.
- n = dst_neigh_lookup(dst_cache, daddr);
- if (!n)
- return -ENOENT;
-+ this_tuple = &flow->tuplehash[dir].tuple;
-
+-
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(ha, n->ha);
- read_unlock_bh(&n->lock);
++ this_tuple = &flow->tuplehash[dir].tuple;
+
+- if (!(nud_state & NUD_VALID)) {
+ switch (this_tuple->xmit_type) {
+ case FLOW_OFFLOAD_XMIT_DIRECT:
+ ether_addr_copy(ha, this_tuple->out.h_dest);
@@ -102,8 +104,7 @@ tag to the driver.
+ n = dst_neigh_lookup(dst_cache, daddr);
+ if (!n)
+ return -ENOENT;
-
-- if (!(nud_state & NUD_VALID)) {
++
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(ha, n->ha);
@@ -143,8 +144,7 @@ tag to the driver.
+ struct flow_action_entry *entry;
+ struct net_device *dev;
+ int ifindex;
-
-- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
++
+ this_tuple = &flow->tuplehash[dir].tuple;
+ switch (this_tuple->xmit_type) {
+ case FLOW_OFFLOAD_XMIT_DIRECT:
@@ -158,7 +158,8 @@ tag to the driver.
+ default:
+ return;
+ }
-+
+
+- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev)
+ return;
diff --git a/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch b/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch
index 79b7832f2a..2979934926 100644
--- a/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch
+++ b/target/linux/generic/pending-5.10/690-net-add-support-for-threaded-NAPI-polling.patch
@@ -214,7 +214,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
napi_hash_del(napi);
list_del_rcu(&napi->dev_list);
napi_free_frags(napi);
-@@ -6788,52 +6881,18 @@ EXPORT_SYMBOL(__netif_napi_del);
+@@ -6788,53 +6881,19 @@ EXPORT_SYMBOL(__netif_napi_del);
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
@@ -228,7 +228,8 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
have = netpoll_poll_lock(n);
- weight = n->weight;
--
++ work = __napi_poll(n, &do_repoll);
+
- /* This NAPI_STATE_SCHED test is for avoiding a race
- * with netpoll's poll_napi(). Only the entity which
- * obtains the lock and sees NAPI_STATE_SCHED set will
@@ -246,8 +247,8 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
- n->poll, work, weight);
-
- if (likely(work < weight))
-- goto out_unlock;
-+ work = __napi_poll(n, &do_repoll);
++ if (!do_repoll)
+ goto out_unlock;
- /* Drivers must not modify the NAPI state if they
- * consume the entire weight. In such cases this code
@@ -256,8 +257,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
- */
- if (unlikely(napi_disable_pending(n))) {
- napi_complete(n);
-+ if (!do_repoll)
- goto out_unlock;
+- goto out_unlock;
- }
-
- if (n->gro_bitmask) {
@@ -268,9 +268,10 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
- }
-
- gro_normal_list(n);
-
+-
/* Some drivers may have called napi_schedule
* prior to exhausting their budget.
+ */
@@ -11288,6 +11347,10 @@ static int __init net_dev_init(void)
sd->backlog.weight = weight_p;
}
diff --git a/target/linux/generic/pending-5.10/834-ledtrig-libata.patch b/target/linux/generic/pending-5.10/834-ledtrig-libata.patch
index 623e48085d..a52e712d8c 100644
--- a/target/linux/generic/pending-5.10/834-ledtrig-libata.patch
+++ b/target/linux/generic/pending-5.10/834-ledtrig-libata.patch
@@ -106,11 +106,11 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+ for (i = 0; i < host->n_ports; i++) {
+ if (unlikely(!host->ports[i]->ledtrig))
+ continue;
-+
+
+ snprintf(host->ports[i]->ledtrig_name,
+ sizeof(host->ports[i]->ledtrig_name), "ata%u",
+ host->ports[i]->print_id);
-
++
+ host->ports[i]->ledtrig->name = host->ports[i]->ledtrig_name;
+
+ if (led_trigger_register(host->ports[i]->ledtrig)) {
--
2.25.1

View File

@@ -0,0 +1,46 @@
From 6923e03f687b00c0394d34194364b56d04a79d8f Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 17 Feb 2021 13:49:14 +0100
Subject: [PATCH 09/22] build: fix build with CONFIG_STRIP_KERNEL_EXPORTS
Only use symtab.h on the final kernel link
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/kernel-defaults.mk | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/kernel-defaults.mk b/include/kernel-defaults.mk
index b069c1e671..93eed54ae1 100644
--- a/include/kernel-defaults.mk
+++ b/include/kernel-defaults.mk
@@ -3,7 +3,7 @@
# Copyright (C) 2006-2020 OpenWrt.org
ifdef CONFIG_STRIP_KERNEL_EXPORTS
- KERNEL_MAKEOPTS += \
+ KERNEL_MAKEOPTS_IMAGE += \
EXTRA_LDSFLAGS="-I$(KERNEL_BUILD_DIR) -include symtab.h"
endif
@@ -137,7 +137,7 @@ endef
define Kernel/CompileImage/Default
rm -f $(TARGET_DIR)/init
- +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all)
+ +$(KERNEL_MAKE) $(KERNEL_MAKEOPTS_IMAGE) $(if $(KERNELNAME),$(KERNELNAME),all)
$(call Kernel/CopyImage)
endef
@@ -147,7 +147,7 @@ define Kernel/CompileImage/Initramfs
$(CP) $(GENERIC_PLATFORM_DIR)/other-files/init $(TARGET_DIR)/init
$(if $(SOURCE_DATE_EPOCH),touch -hcd "@$(SOURCE_DATE_EPOCH)" $(TARGET_DIR)/init)
rm -rf $(KERNEL_BUILD_DIR)/linux-$(LINUX_VERSION)/usr/initramfs_data.cpio*
- +$(KERNEL_MAKE) $(if $(KERNELNAME),$(KERNELNAME),all)
+ +$(KERNEL_MAKE) $(KERNEL_MAKEOPTS_IMAGE) $(if $(KERNELNAME),$(KERNELNAME),all)
$(call Kernel/CopyImage,-initramfs)
endef
else
--
2.25.1

View File

@@ -0,0 +1,60 @@
From c0484713ff059736b169496d396f722af5db31c0 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sun, 14 Feb 2021 20:17:31 +0000
Subject: [PATCH 10/22] kernel: update kernel 5.10 to 5.10.16
Compile and runtime-tested on mediatek/mt7622
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
include/kernel-version.mk | 4 ++--
.../491-ubi-auto-create-ubiblock-device-for-rootfs.patch | 2 +-
.../pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/kernel-version.mk b/include/kernel-version.mk
index 035692f3e3..26f24bae42 100644
--- a/include/kernel-version.mk
+++ b/include/kernel-version.mk
@@ -7,10 +7,10 @@ ifdef CONFIG_TESTING_KERNEL
endif
LINUX_VERSION-5.4 = .105
-LINUX_VERSION-5.10 = .14
+LINUX_VERSION-5.10 = .16
LINUX_KERNEL_HASH-5.4.105 = 244e4cd16184285df55ec5a9501daba011aa8b85c5527ee05eab4592e70fb8b6
-LINUX_KERNEL_HASH-5.10.14 = fa27b79f198b5be969e497ed5461860df48e0591c85e60699fc8be26837a1d2a
+LINUX_KERNEL_HASH-5.10.16 = 536fe3ea273bfcc72b3571d3b3a7ff0a5bcdc16068efd22e42c4f9d03c200a37
remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1))))
sanitize_uri=$(call qstrip,$(subst @,_,$(subst :,_,$(subst .,_,$(subst -,_,$(subst /,_,$(1)))))))
diff --git a/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
index 61fcbac92e..e5ee2c8656 100644
--- a/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
+++ b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
@@ -53,7 +53,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
-@@ -684,6 +722,10 @@ int __init ubiblock_init(void)
+@@ -684,6 +725,10 @@ int __init ubiblock_init(void)
*/
ubiblock_create_from_param();
diff --git a/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch b/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch
index e6a3d15b79..42b91fe4c3 100644
--- a/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch
+++ b/target/linux/generic/pending-5.10/760-net-dsa-mv88e6xxx-fix-vlan-setup.patch
@@ -17,7 +17,7 @@ Signed-off-by: DENG Qingfang <dqfext@gmail.com>
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
-@@ -2853,6 +2853,7 @@ static int mv88e6xxx_setup(struct dsa_sw
+@@ -2857,6 +2857,7 @@ static int mv88e6xxx_setup(struct dsa_sw
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
--
2.25.1

View File

@@ -0,0 +1,910 @@
From 4ff7b77727cc6c4ed02fa89ced3aaa3d9437bf22 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 9 Apr 2020 09:53:24 +0200
Subject: [PATCH 11/22] mediatek: implement bad-block management table support
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
.../mediatek/mt7622-elecom-wrc-2533gent.dts | 2 +
target/linux/mediatek/mt7622/config-5.4 | 1 +
target/linux/mediatek/mt7623/config-5.4 | 1 +
target/linux/mediatek/mt7629/config-5.4 | 1 +
.../patches-5.4/0310-mtk-bmt-support.patch | 837 ++++++++++++++++++
5 files changed, 842 insertions(+)
create mode 100644 target/linux/mediatek/patches-5.4/0310-mtk-bmt-support.patch
diff --git a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-elecom-wrc-2533gent.dts b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-elecom-wrc-2533gent.dts
index 2ac1c6a671..3971ce0389 100644
--- a/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-elecom-wrc-2533gent.dts
+++ b/target/linux/mediatek/files-5.4/arch/arm64/boot/dts/mediatek/mt7622-elecom-wrc-2533gent.dts
@@ -514,6 +514,8 @@
spi-max-frequency = <104000000>;
reg = <0>;
+ mediatek,bmt-v2;
+
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
diff --git a/target/linux/mediatek/mt7622/config-5.4 b/target/linux/mediatek/mt7622/config-5.4
index 1b0b1e36a6..cdb73bd7bc 100644
--- a/target/linux/mediatek/mt7622/config-5.4
+++ b/target/linux/mediatek/mt7622/config-5.4
@@ -396,6 +396,7 @@ CONFIG_MT753X_GSW=y
CONFIG_MTD_NAND_CORE=y
CONFIG_MTD_NAND_ECC_SW_HAMMING=y
CONFIG_MTD_NAND_MTK=y
+CONFIG_MTD_NAND_MTK_BMT=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_SPI_NAND=y
CONFIG_MTD_SPI_NOR=y
diff --git a/target/linux/mediatek/mt7623/config-5.4 b/target/linux/mediatek/mt7623/config-5.4
index dbd3055d3b..839eb71008 100644
--- a/target/linux/mediatek/mt7623/config-5.4
+++ b/target/linux/mediatek/mt7623/config-5.4
@@ -332,6 +332,7 @@ CONFIG_MODULES_USE_ELF_REL=y
# CONFIG_MT753X_GSW is not set
CONFIG_MTD_BLOCK2MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_NAND_MTK_BMT is not set
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_SPLIT_FIRMWARE=y
CONFIG_MTD_SPLIT_UIMAGE_FW=y
diff --git a/target/linux/mediatek/mt7629/config-5.4 b/target/linux/mediatek/mt7629/config-5.4
index 7fe01d1748..6ecde5ced6 100644
--- a/target/linux/mediatek/mt7629/config-5.4
+++ b/target/linux/mediatek/mt7629/config-5.4
@@ -240,6 +240,7 @@ CONFIG_MT753X_GSW=y
CONFIG_MTD_NAND_CORE=y
CONFIG_MTD_NAND_ECC_SW_HAMMING=y
CONFIG_MTD_NAND_MTK=y
+# CONFIG_MTD_NAND_MTK_BMT is not set
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_SPI_NAND=y
CONFIG_MTD_SPI_NOR=y
diff --git a/target/linux/mediatek/patches-5.4/0310-mtk-bmt-support.patch b/target/linux/mediatek/patches-5.4/0310-mtk-bmt-support.patch
new file mode 100644
index 0000000000..2a23f8c3dc
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/0310-mtk-bmt-support.patch
@@ -0,0 +1,837 @@
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -5,3 +5,7 @@ config MTD_NAND_CORE
+ source "drivers/mtd/nand/onenand/Kconfig"
+ source "drivers/mtd/nand/raw/Kconfig"
+ source "drivers/mtd/nand/spi/Kconfig"
++
++config MTD_NAND_MTK_BMT
++ bool "Support MediaTek NAND Bad-block Management Table"
++ default n
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -2,6 +2,7 @@
+
+ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
++obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o
+
+ obj-y += onenand/
+ obj-y += raw/
+--- /dev/null
++++ b/drivers/mtd/nand/mtk_bmt.c
+@@ -0,0 +1,766 @@
++/*
++ * Copyright (c) 2017 MediaTek Inc.
++ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
++ * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/slab.h>
++#include <linux/gfp.h>
++#include <linux/kernel.h>
++#include <linux/of.h>
++#include <linux/mtd/nand.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/mtk_bmt.h>
++#include <linux/module.h>
++#include <linux/debugfs.h>
++
++#define MAIN_SIGNATURE_OFFSET 0
++#define OOB_SIGNATURE_OFFSET 1
++#define BBPOOL_RATIO 2
++
++#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
++
++/* Maximum 8k blocks */
++#define BB_TABLE_MAX 0x2000U
++#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
++#define BMT_TBL_DEF_VAL 0x0
++
++/*
++ * Burner Bad Block Table
++ * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
++ */
++
++struct bbbt {
++ char signature[3];
++ /* This version is used to distinguish the legacy and new algorithm */
++#define BBMT_VERSION 2
++ unsigned char version;
++ /* Below 2 tables will be written in SLC */
++ u16 bb_tbl[BB_TABLE_MAX];
++ struct bbmt {
++ u16 block;
++#define NO_MAPPED 0
++#define NORMAL_MAPPED 1
++#define BMT_MAPPED 2
++ u16 mapped;
++ } bmt_tbl[BMT_TABLE_MAX];
++};
++
++static struct bmt_desc {
++ struct mtd_info *mtd;
++
++ int (*_read_oob) (struct mtd_info *mtd, loff_t from,
++ struct mtd_oob_ops *ops);
++ int (*_write_oob) (struct mtd_info *mtd, loff_t to,
++ struct mtd_oob_ops *ops);
++ const struct nand_ops *nand_ops;
++
++ struct bbbt *bbt;
++
++ struct dentry *debugfs_dir;
++
++ u32 pg_size;
++ u32 blk_size;
++ u16 pg_shift;
++ u16 blk_shift;
++ /* bbt logical address */
++ u16 pool_lba;
++ /* bbt physical address */
++ u16 pool_pba;
++ /* Maximum count of bad blocks that the vendor guaranteed */
++ u16 bb_max;
++ /* Total blocks of the Nand Chip */
++ u16 total_blks;
++ /* The block(n) BMT is located at (bmt_tbl[n]) */
++ u16 bmt_blk_idx;
++ /* How many pages needs to store 'struct bbbt' */
++ u32 bmt_pgs;
++
++ /* to compensate for driver level remapping */
++ u8 oob_offset;
++} bmtd = {0};
++
++static unsigned char *nand_bbt_buf;
++static unsigned char *nand_data_buf;
++
++/* -------- Unit conversions -------- */
++static inline u32 blk_pg(u16 block)
++{
++ return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
++}
++
++/* -------- Nand operations wrapper -------- */
++static inline int
++bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
++ unsigned char *fdm, int fdm_len)
++{
++ struct mtd_oob_ops ops = {
++ .mode = MTD_OPS_PLACE_OOB,
++ .ooboffs = bmtd.oob_offset,
++ .oobbuf = fdm,
++ .ooblen = fdm_len,
++ .datbuf = dat,
++ .len = dat_len,
++ };
++
++ return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
++}
++
++static inline int bbt_nand_erase(u16 block)
++{
++ struct nand_device *nand = mtd_to_nanddev(bmtd.mtd);
++ loff_t addr = (loff_t)block << bmtd.blk_shift;
++ struct nand_pos pos;
++
++ nanddev_offs_to_pos(nand, addr, &pos);
++ return bmtd.nand_ops->erase(nand, &pos);
++}
++
++/* -------- Bad Blocks Management -------- */
++static int
++read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
++{
++ u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
++
++ return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
++}
++
++static int write_bmt(u16 block, unsigned char *dat)
++{
++ struct mtd_oob_ops ops = {
++ .mode = MTD_OPS_PLACE_OOB,
++ .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
++ .oobbuf = "bmt",
++ .ooblen = 3,
++ .datbuf = dat,
++ .len = bmtd.bmt_pgs << bmtd.pg_shift,
++ };
++ loff_t addr = (loff_t)block << bmtd.blk_shift;
++
++ return bmtd._write_oob(bmtd.mtd, addr, &ops);
++}
++
++static u16 find_valid_block(u16 block)
++{
++ u8 fdm[4];
++ int ret;
++ int loop = 0;
++
++retry:
++ if (block >= bmtd.total_blks)
++ return 0;
++
++ ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
++ fdm, sizeof(fdm));
++ /* Read the 1st byte of FDM to judge whether it's a bad
++ * or not
++ */
++ if (ret || fdm[0] != 0xff) {
++ pr_info("nand: found bad block 0x%x\n", block);
++ if (loop >= bmtd.bb_max) {
++ pr_info("nand: FATAL ERR: too many bad blocks!!\n");
++ return 0;
++ }
++
++ loop++;
++ block++;
++ goto retry;
++ }
++
++ return block;
++}
++
++/* Find out all bad blocks, and fill in the mapping table */
++static int scan_bad_blocks(struct bbbt *bbt)
++{
++ int i;
++ u16 block = 0;
++
++ /* First time download, the block0 MUST NOT be a bad block,
++ * this is guaranteed by vendor
++ */
++ bbt->bb_tbl[0] = 0;
++
++ /*
++ * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
++ * G - Good block; B - Bad block
++ * ---------------------------
++ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
++ * ---------------------------
++ * What bb_tbl[i] looks like:
++ * physical block(i):
++ * 0 1 2 3 4 5 6 7 8 9 a b c
++ * mapped block(bb_tbl[i]):
++ * 0 1 3 6 7 8 9 b ......
++ * ATTENTION:
++ * If new bad block ocurred(n), search bmt_tbl to find
++ * a available block(x), and fill in the bb_tbl[n] = x;
++ */
++ for (i = 1; i < bmtd.pool_lba; i++) {
++ bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
++ BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
++ if (bbt->bb_tbl[i] == 0)
++ return -1;
++ }
++
++ /* Physical Block start Address of BMT pool */
++ bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
++ if (bmtd.pool_pba >= bmtd.total_blks - 2) {
++ pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
++ return -1;
++ }
++
++ BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
++ i = 0;
++ block = bmtd.pool_pba;
++ /*
++ * The bmt table is used for runtime bad block mapping
++ * G - Good block; B - Bad block
++ * ---------------------------
++ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
++ * ---------------------------
++ * block: 0 1 2 3 4 5 6 7 8 9 a b c
++ * What bmt_tbl[i] looks like in initial state:
++ * i:
++ * 0 1 2 3 4 5 6 7
++ * bmt_tbl[i].block:
++ * 0 1 3 6 7 8 9 b
++ * bmt_tbl[i].mapped:
++ * N N N N N N N B
++ * N - Not mapped(Available)
++ * M - Mapped
++ * B - BMT
++ * ATTENTION:
++ * BMT always in the last valid block in pool
++ */
++ while ((block = find_valid_block(block)) != 0) {
++ bbt->bmt_tbl[i].block = block;
++ bbt->bmt_tbl[i].mapped = NO_MAPPED;
++ BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
++ block++;
++ i++;
++ }
++
++ /* i - How many available blocks in pool, which is the length of bmt_tbl[]
++ * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
++ */
++ bmtd.bmt_blk_idx = i - 1;
++ bbt->bmt_tbl[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
++
++ if (i < 1) {
++ pr_info("nand: FATAL ERR: no space to store BMT!!\n");
++ return -1;
++ }
++
++ pr_info("[BBT] %d available blocks in BMT pool\n", i);
++
++ return 0;
++}
++
++static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
++{
++ struct bbbt *bbt = (struct bbbt *)buf;
++ u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
++
++
++ if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
++ memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
++ if (bbt->version == BBMT_VERSION)
++ return true;
++ }
++ BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
++ sig[0], sig[1], sig[2],
++ fdm[1], fdm[2], fdm[3]);
++ return false;
++}
++
++static u16 get_bmt_index(struct bbmt *bmt)
++{
++ int i = 0;
++
++ while (bmt[i].block != BMT_TBL_DEF_VAL) {
++ if (bmt[i].mapped == BMT_MAPPED)
++ return i;
++ i++;
++ }
++ return 0;
++}
++
++static struct bbbt *scan_bmt(u16 block)
++{
++ u8 fdm[4];
++
++ if (block < bmtd.pool_lba)
++ return NULL;
++
++ if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
++ return scan_bmt(block - 1);
++
++ if (is_valid_bmt(nand_bbt_buf, fdm)) {
++ bmtd.bmt_blk_idx = get_bmt_index(((struct bbbt *)nand_bbt_buf)->bmt_tbl);
++ if (bmtd.bmt_blk_idx == 0) {
++ pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
++ return NULL;
++ }
++ pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
++ return (struct bbbt *)nand_bbt_buf;
++ } else
++ return scan_bmt(block - 1);
++}
++
++/* Write the Burner Bad Block Table to Nand Flash
++ * n - write BMT to bmt_tbl[n]
++ */
++static u16 upload_bmt(struct bbbt *bbt, int n)
++{
++ u16 block;
++
++retry:
++ if (n < 0 || bbt->bmt_tbl[n].mapped == NORMAL_MAPPED) {
++ pr_info("nand: FATAL ERR: no space to store BMT!\n");
++ return (u16)-1;
++ }
++
++ block = bbt->bmt_tbl[n].block;
++ BBT_LOG("n = 0x%x, block = 0x%x", n, block);
++ if (bbt_nand_erase(block)) {
++ bbt->bmt_tbl[n].block = 0;
++ /* erase failed, try the previous block: bmt_tbl[n - 1].block */
++ n--;
++ goto retry;
++ }
++
++ /* The signature offset is fixed set to 0,
++ * oob signature offset is fixed set to 1
++ */
++ memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
++ bbt->version = BBMT_VERSION;
++
++ if (write_bmt(block, (unsigned char *)bbt)) {
++ bbt->bmt_tbl[n].block = 0;
++
++ /* write failed, try the previous block in bmt_tbl[n - 1] */
++ n--;
++ goto retry;
++ }
++
++ /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
++ return n;
++}
++
++static u16 find_valid_block_in_pool(struct bbbt *bbt)
++{
++ int i;
++
++ if (bmtd.bmt_blk_idx == 0)
++ goto error;
++
++ for (i = 0; i < bmtd.bmt_blk_idx; i++) {
++ if (bbt->bmt_tbl[i].block != 0 && bbt->bmt_tbl[i].mapped == NO_MAPPED) {
++ bbt->bmt_tbl[i].mapped = NORMAL_MAPPED;
++ return bbt->bmt_tbl[i].block;
++ }
++ }
++
++error:
++ pr_info("nand: FATAL ERR: BMT pool is run out!\n");
++ return 0;
++}
++
++/* We met a bad block, mark it as bad and map it to a valid block in pool,
++ * if it's a write failure, we need to write the data to mapped block
++ */
++static bool update_bmt(u16 block)
++{
++ u16 mapped_blk;
++ struct bbbt *bbt;
++
++ bbt = bmtd.bbt;
++ mapped_blk = find_valid_block_in_pool(bbt);
++ if (mapped_blk == 0)
++ return false;
++
++ /* Map new bad block to available block in pool */
++ bbt->bb_tbl[block] = mapped_blk;
++ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
++
++ return true;
++}
++
++u16 get_mapping_block_index(int block)
++{
++ int mapping_block;
++
++ if (block < bmtd.pool_lba)
++ mapping_block = bmtd.bbt->bb_tbl[block];
++ else
++ mapping_block = block;
++ BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
++
++ return mapping_block;
++}
++
++static int
++mtk_bmt_read(struct mtd_info *mtd, loff_t from,
++ struct mtd_oob_ops *ops)
++{
++ struct mtd_oob_ops cur_ops = *ops;
++ int retry_count = 0;
++ loff_t cur_from;
++ int ret;
++
++ ops->retlen = 0;
++ ops->oobretlen = 0;
++
++ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
++ u32 offset = from & (bmtd.blk_size - 1);
++ u32 block = from >> bmtd.blk_shift;
++ u32 cur_block;
++
++ cur_block = get_mapping_block_index(block);
++ cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
++
++ cur_ops.oobretlen = 0;
++ cur_ops.retlen = 0;
++ cur_ops.len = min_t(u32, mtd->erasesize - offset,
++ ops->len - ops->retlen);
++ ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
++ if (ret < 0) {
++ update_bmt(block);
++ if (retry_count++ < 10)
++ continue;
++
++ return ret;
++ }
++
++ ops->retlen += cur_ops.retlen;
++ ops->oobretlen += cur_ops.oobretlen;
++
++ cur_ops.datbuf += cur_ops.retlen;
++ cur_ops.oobbuf += cur_ops.oobretlen;
++ cur_ops.ooblen -= cur_ops.oobretlen;
++
++ if (!cur_ops.len)
++ cur_ops.len = mtd->erasesize - offset;
++
++ from += cur_ops.len;
++ retry_count = 0;
++ }
++
++ return 0;
++}
++
++static int
++mtk_bmt_write(struct mtd_info *mtd, loff_t to,
++ struct mtd_oob_ops *ops)
++{
++ struct mtd_oob_ops cur_ops = *ops;
++ int retry_count = 0;
++ loff_t cur_to;
++ int ret;
++
++ ops->retlen = 0;
++ ops->oobretlen = 0;
++
++ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
++ u32 offset = to & (bmtd.blk_size - 1);
++ u32 block = to >> bmtd.blk_shift;
++ u32 cur_block;
++
++ cur_block = get_mapping_block_index(block);
++ cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
++
++ cur_ops.oobretlen = 0;
++ cur_ops.retlen = 0;
++ cur_ops.len = min_t(u32, bmtd.blk_size - offset,
++ ops->len - ops->retlen);
++ ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
++ if (ret < 0) {
++ update_bmt(block);
++ if (retry_count++ < 10)
++ continue;
++
++ return ret;
++ }
++
++ ops->retlen += cur_ops.retlen;
++ ops->oobretlen += cur_ops.oobretlen;
++
++ cur_ops.datbuf += cur_ops.retlen;
++ cur_ops.oobbuf += cur_ops.oobretlen;
++ cur_ops.ooblen -= cur_ops.oobretlen;
++
++ if (!cur_ops.len)
++ cur_ops.len = mtd->erasesize - offset;
++
++ to += cur_ops.len;
++ retry_count = 0;
++ }
++
++ return 0;
++}
++
++
++
++static int
++mtk_bmt_erase(struct nand_device *nand, const struct nand_pos *pos)
++{
++ struct nand_pos new_pos = *pos;
++ int retry_count = 0;
++ int ret;
++
++retry:
++ new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
++
++ ret = bmtd.nand_ops->erase(nand, &new_pos);
++ if (ret) {
++ update_bmt(pos->eraseblock);
++ if (retry_count++ < 10)
++ goto retry;
++ }
++
++ return ret;
++}
++
++static bool
++mtk_bmt_isbad(struct nand_device *nand, const struct nand_pos *pos)
++{
++ struct nand_pos new_pos = *pos;
++ int retry_count = 0;
++ bool ret;
++
++retry:
++ new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
++
++ ret = bmtd.nand_ops->isbad(nand, &new_pos);
++ if (ret) {
++ update_bmt(pos->eraseblock);
++ if (retry_count++ < 10)
++ goto retry;
++ }
++
++ return ret;
++}
++
++static int
++mtk_bmt_markbad(struct nand_device *nand, const struct nand_pos *pos)
++{
++ struct nand_pos new_pos = *pos;
++
++ new_pos.eraseblock = get_mapping_block_index(new_pos.eraseblock);
++ update_bmt(pos->eraseblock);
++
++ return bmtd.nand_ops->markbad(nand, &new_pos);
++}
++
++static void
++mtk_bmt_replace_ops(struct mtd_info *mtd)
++{
++ static const struct nand_ops mtk_bmt_nand_ops = {
++ .erase = mtk_bmt_erase,
++ .isbad = mtk_bmt_isbad,
++ .markbad = mtk_bmt_markbad,
++ };
++ struct nand_device *nand = mtd_to_nanddev(mtd);
++
++ bmtd.nand_ops = nand->ops;
++ bmtd._read_oob = mtd->_read_oob;
++ bmtd._write_oob = mtd->_write_oob;
++
++ mtd->_read_oob = mtk_bmt_read;
++ mtd->_write_oob = mtk_bmt_write;
++ nand->ops = &mtk_bmt_nand_ops;
++}
++
++static int mtk_bmt_debug_mark_good(void *data, u64 val)
++{
++ u32 block = val >> bmtd.blk_shift;
++
++ bmtd.bbt->bb_tbl[block] = block;
++ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
++
++ return 0;
++}
++
++static int mtk_bmt_debug_mark_bad(void *data, u64 val)
++{
++ u32 block = val >> bmtd.blk_shift;
++
++ update_bmt(block);
++
++ return 0;
++}
++
++DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
++DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
++
++static void
++mtk_bmt_add_debugfs(void)
++{
++ struct dentry *dir;
++
++ dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
++ if (!dir)
++ return;
++
++ debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
++ debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
++}
++
++void mtk_bmt_detach(struct mtd_info *mtd)
++{
++ struct nand_device *nand = mtd_to_nanddev(mtd);
++
++ if (bmtd.mtd != mtd)
++ return;
++
++ if (bmtd.debugfs_dir)
++ debugfs_remove_recursive(bmtd.debugfs_dir);
++ bmtd.debugfs_dir = NULL;
++
++ kfree(nand_bbt_buf);
++ kfree(nand_data_buf);
++
++ mtd->_read_oob = bmtd._read_oob;
++ mtd->_write_oob = bmtd._write_oob;
++ mtd->size = bmtd.total_blks << bmtd.blk_shift;
++ nand->ops = bmtd.nand_ops;
++
++ memset(&bmtd, 0, sizeof(bmtd));
++}
++
++/* total_blocks - The total count of blocks that the Nand Chip has */
++int mtk_bmt_attach(struct mtd_info *mtd)
++{
++ struct device_node *np;
++ struct bbbt *bbt;
++ u32 bufsz;
++ u32 block;
++ u16 total_blocks, pmt_block;
++ int ret = 0;
++ u32 bmt_pool_size;
++
++ if (bmtd.mtd)
++ return -ENOSPC;
++
++ np = mtd_get_of_node(mtd);
++ if (!np)
++ return 0;
++
++ if (!of_property_read_bool(np, "mediatek,bmt-v2"))
++ return 0;
++
++ if (of_property_read_u32(np, "mediatek,bmt-pool-size",
++ &bmt_pool_size) != 0)
++ bmt_pool_size = 80;
++
++ if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
++ &bmtd.oob_offset) != 0)
++ bmtd.oob_offset = 8;
++
++ bmtd.mtd = mtd;
++ mtk_bmt_replace_ops(mtd);
++
++ bmtd.blk_size = mtd->erasesize;
++ bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
++ bmtd.pg_size = mtd->writesize;
++ bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
++ total_blocks = mtd->size >> bmtd.blk_shift;
++ pmt_block = total_blocks - bmt_pool_size - 2;
++
++ mtd->size = pmt_block << bmtd.blk_shift;
++
++ /*
++ * ---------------------------------------
++ * | PMT(2blks) | BMT POOL(totalblks * 2%) |
++ * ---------------------------------------
++ * ^ ^
++ * | |
++ * pmt_block pmt_block + 2blocks(pool_lba)
++ *
++ * ATTETION!!!!!!
++ * The blocks ahead of the boundary block are stored in bb_tbl
++ * and blocks behind are stored in bmt_tbl
++ */
++
++ bmtd.pool_lba = (u16)(pmt_block + 2);
++ bmtd.total_blks = total_blocks;
++ bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
++
++ /* 3 buffers we need */
++ bufsz = round_up(sizeof(struct bbbt), bmtd.pg_size);
++ bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
++
++ nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
++ nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
++
++ if (!nand_bbt_buf || !nand_data_buf) {
++ pr_info("nand: FATAL ERR: allocate buffer failed!\n");
++ ret = -1;
++ goto error;
++ }
++
++ memset(nand_bbt_buf, 0xff, bufsz);
++ memset(nand_data_buf, 0xff, bmtd.pg_size);
++
++ BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
++ nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
++ BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
++ bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
++
++ /* Scanning start from the first page of the last block
++ * of whole flash
++ */
++ bbt = scan_bmt(bmtd.total_blks - 1);
++ if (!bbt) {
++ /* BMT not found */
++ if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
++ pr_info("nand: FATAL: Too many blocks, can not support!\n");
++ ret = -1;
++ goto error;
++ }
++
++ bbt = (struct bbbt *)nand_bbt_buf;
++ memset(bbt->bmt_tbl, BMT_TBL_DEF_VAL, sizeof(bbt->bmt_tbl));
++
++ if (scan_bad_blocks(bbt)) {
++ ret = -1;
++ goto error;
++ }
++
++ /* BMT always in the last valid block in pool */
++ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
++ block = bbt->bmt_tbl[bmtd.bmt_blk_idx].block;
++ pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
++
++ if (bmtd.bmt_blk_idx == 0)
++ pr_info("nand: Warning: no available block in BMT pool!\n");
++ else if (bmtd.bmt_blk_idx == (u16)-1) {
++ ret = -1;
++ goto error;
++ }
++ }
++ mtk_bmt_add_debugfs();
++
++ bmtd.bbt = bbt;
++ return 0;
++
++error:
++ mtk_bmt_detach(mtd);
++ return ret;
++}
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
++MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
++
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -18,6 +18,7 @@
+ #include <linux/slab.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi-mem.h>
++#include <linux/mtd/mtk_bmt.h>
+
+ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+ {
+@@ -1099,6 +1100,8 @@ static int spinand_probe(struct spi_mem
+ if (ret)
+ return ret;
+
++ mtk_bmt_attach(mtd);
++
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_spinand_cleanup;
+@@ -1124,6 +1127,7 @@ static int spinand_remove(struct spi_mem
+ if (ret)
+ return ret;
+
++ mtk_bmt_detach(mtd);
+ spinand_cleanup(spinand);
+
+ return 0;
+--- /dev/null
++++ b/include/linux/mtd/mtk_bmt.h
+@@ -0,0 +1,18 @@
++#ifndef __MTK_BMT_H
++#define __MTK_BMT_H
++
++#ifdef CONFIG_MTD_NAND_MTK_BMT
++int mtk_bmt_attach(struct mtd_info *mtd);
++void mtk_bmt_detach(struct mtd_info *mtd);
++#else
++static inline int mtk_bmt_attach(struct mtd_info *mtd)
++{
++ return 0;
++}
++
++static inline void mtk_bmt_detach(struct mtd_info *mtd)
++{
++}
++#endif
++
++#endif
--
2.25.1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,215 @@
From 1572b27651f11ac0e8f14a543815963147eb26ae Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 3 Feb 2021 19:34:29 +0100
Subject: [PATCH 13/22] mediatek: add support for configuring BMT table size
via device tree
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
.../patches-5.10/330-mtk-bmt-support.patch | 59 ++++++++++++-------
1 file changed, 37 insertions(+), 22 deletions(-)
diff --git a/target/linux/mediatek/patches-5.10/330-mtk-bmt-support.patch b/target/linux/mediatek/patches-5.10/330-mtk-bmt-support.patch
index 5c20952611..504c602c50 100644
--- a/target/linux/mediatek/patches-5.10/330-mtk-bmt-support.patch
+++ b/target/linux/mediatek/patches-5.10/330-mtk-bmt-support.patch
@@ -23,7 +23,7 @@
obj-y += raw/
--- /dev/null
+++ b/drivers/mtd/nand/mtk_bmt.c
-@@ -0,0 +1,766 @@
+@@ -0,0 +1,781 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
@@ -56,7 +56,7 @@
+#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/* Maximum 8k blocks */
-+#define BB_TABLE_MAX 0x2000U
++#define BB_TABLE_MAX bmtd.table_size
+#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
+#define BMT_TBL_DEF_VAL 0x0
+
@@ -71,14 +71,15 @@
+#define BBMT_VERSION 2
+ unsigned char version;
+ /* Below 2 tables will be written in SLC */
-+ u16 bb_tbl[BB_TABLE_MAX];
-+ struct bbmt {
-+ u16 block;
++ u16 bb_tbl[];
++};
++
++struct bbmt {
++ u16 block;
+#define NO_MAPPED 0
+#define NORMAL_MAPPED 1
+#define BMT_MAPPED 2
-+ u16 mapped;
-+ } bmt_tbl[BMT_TABLE_MAX];
++ u16 mapped;
+};
+
+static struct bmt_desc {
@@ -94,6 +95,7 @@
+
+ struct dentry *debugfs_dir;
+
++ u32 table_size;
+ u32 pg_size;
+ u32 blk_size;
+ u16 pg_shift;
@@ -152,6 +154,11 @@
+}
+
+/* -------- Bad Blocks Management -------- */
++static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
++{
++ return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
++}
++
+static int
+read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
+{
@@ -269,8 +276,8 @@
+ * BMT always in the last valid block in pool
+ */
+ while ((block = find_valid_block(block)) != 0) {
-+ bbt->bmt_tbl[i].block = block;
-+ bbt->bmt_tbl[i].mapped = NO_MAPPED;
++ bmt_tbl(bbt)[i].block = block;
++ bmt_tbl(bbt)[i].mapped = NO_MAPPED;
+ BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
+ block++;
+ i++;
@@ -280,7 +287,7 @@
+ * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
+ */
+ bmtd.bmt_blk_idx = i - 1;
-+ bbt->bmt_tbl[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
++ bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
+
+ if (i < 1) {
+ pr_info("nand: FATAL ERR: no space to store BMT!!\n");
@@ -332,7 +339,7 @@
+ return scan_bmt(block - 1);
+
+ if (is_valid_bmt(nand_bbt_buf, fdm)) {
-+ bmtd.bmt_blk_idx = get_bmt_index(((struct bbbt *)nand_bbt_buf)->bmt_tbl);
++ bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
+ if (bmtd.bmt_blk_idx == 0) {
+ pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
+ return NULL;
@@ -351,15 +358,15 @@
+ u16 block;
+
+retry:
-+ if (n < 0 || bbt->bmt_tbl[n].mapped == NORMAL_MAPPED) {
++ if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
+ pr_info("nand: FATAL ERR: no space to store BMT!\n");
+ return (u16)-1;
+ }
+
-+ block = bbt->bmt_tbl[n].block;
++ block = bmt_tbl(bbt)[n].block;
+ BBT_LOG("n = 0x%x, block = 0x%x", n, block);
+ if (bbt_nand_erase(block)) {
-+ bbt->bmt_tbl[n].block = 0;
++ bmt_tbl(bbt)[n].block = 0;
+ /* erase failed, try the previous block: bmt_tbl[n - 1].block */
+ n--;
+ goto retry;
@@ -372,7 +379,7 @@
+ bbt->version = BBMT_VERSION;
+
+ if (write_bmt(block, (unsigned char *)bbt)) {
-+ bbt->bmt_tbl[n].block = 0;
++ bmt_tbl(bbt)[n].block = 0;
+
+ /* write failed, try the previous block in bmt_tbl[n - 1] */
+ n--;
@@ -391,9 +398,9 @@
+ goto error;
+
+ for (i = 0; i < bmtd.bmt_blk_idx; i++) {
-+ if (bbt->bmt_tbl[i].block != 0 && bbt->bmt_tbl[i].mapped == NO_MAPPED) {
-+ bbt->bmt_tbl[i].mapped = NORMAL_MAPPED;
-+ return bbt->bmt_tbl[i].block;
++ if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
++ bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
++ return bmt_tbl(bbt)[i].block;
+ }
+ }
+
@@ -471,6 +478,7 @@
+ ops->retlen += cur_ops.retlen;
+ ops->oobretlen += cur_ops.oobretlen;
+
++ cur_ops.ooboffs = 0;
+ cur_ops.datbuf += cur_ops.retlen;
+ cur_ops.oobbuf += cur_ops.oobretlen;
+ cur_ops.ooblen -= cur_ops.oobretlen;
@@ -521,6 +529,7 @@
+ ops->retlen += cur_ops.retlen;
+ ops->oobretlen += cur_ops.oobretlen;
+
++ cur_ops.ooboffs = 0;
+ cur_ops.datbuf += cur_ops.retlen;
+ cur_ops.oobbuf += cur_ops.oobretlen;
+ cur_ops.ooblen -= cur_ops.oobretlen;
@@ -673,7 +682,7 @@
+ u32 block;
+ u16 total_blocks, pmt_block;
+ int ret = 0;
-+ u32 bmt_pool_size;
++ u32 bmt_pool_size, bmt_table_size;
+
+ if (bmtd.mtd)
+ return -ENOSPC;
@@ -693,9 +702,14 @@
+ &bmtd.oob_offset) != 0)
+ bmtd.oob_offset = 8;
+
++ if (of_property_read_u32(np, "mediatek,bmt-table-size",
++ &bmt_table_size) != 0)
++ bmt_table_size = 0x2000U;
++
+ bmtd.mtd = mtd;
+ mtk_bmt_replace_ops(mtd);
+
++ bmtd.table_size = bmt_table_size;
+ bmtd.blk_size = mtd->erasesize;
+ bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
+ bmtd.pg_size = mtd->writesize;
@@ -723,7 +737,8 @@
+ bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
+
+ /* 3 buffers we need */
-+ bufsz = round_up(sizeof(struct bbbt), bmtd.pg_size);
++ bufsz = round_up(sizeof(struct bbbt) +
++ bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
+ bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
+
+ nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
@@ -756,7 +771,7 @@
+ }
+
+ bbt = (struct bbbt *)nand_bbt_buf;
-+ memset(bbt->bmt_tbl, BMT_TBL_DEF_VAL, sizeof(bbt->bmt_tbl));
++ memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
+
+ if (scan_bad_blocks(bbt)) {
+ ret = -1;
@@ -765,7 +780,7 @@
+
+ /* BMT always in the last valid block in pool */
+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
-+ block = bbt->bmt_tbl[bmtd.bmt_blk_idx].block;
++ block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
+ pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
+
+ if (bmtd.bmt_blk_idx == 0)
--
2.25.1

View File

@@ -0,0 +1,43 @@
From 29da2c500a856b214e9a0eca3ccdc98a6470da48 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 3 Feb 2021 20:37:03 +0100
Subject: [PATCH 14/22] kernel: add support for enabling fit firmware partition
parser via cmdline
This is useful for dual-boot setups where the loader sets variables depending
on the flash boot partition.
For example the Linksys E8450 sets mtdparts=master for the first partition
and mtdparts=slave for the second one.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
.../linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
index 67ee33d085..5cc1658dbd 100644
--- a/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
+++ b/target/linux/generic/files/drivers/mtd/mtdsplit/mtdsplit_fit.c
@@ -49,6 +49,8 @@ mtdsplit_fit_parse(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ struct device_node *np = mtd_get_of_node(mtd);
+ const char *cmdline_match = NULL;
struct fdt_header hdr;
size_t hdr_len, retlen;
size_t offset;
@@ -57,6 +59,10 @@ mtdsplit_fit_parse(struct mtd_info *mtd,
struct mtd_partition *parts;
int ret;
+ of_property_read_string(np, "openwrt,cmdline-match", &cmdline_match);
+ if (cmdline_match && !strstr(saved_command_line, cmdline_match))
+ return -ENODEV;
+
hdr_len = sizeof(struct fdt_header);
/* Parse the MTD device & search for the FIT image location */
--
2.25.1

View File

@@ -0,0 +1,757 @@
From f0d0621227f82d56676485cb31918c17fb3ed564 Mon Sep 17 00:00:00 2001
From: John Crispin <john@phrozen.org>
Date: Tue, 2 Feb 2021 16:29:58 +0100
Subject: [PATCH 15/22] mediatek: add linksys-e8450 support
Signed-off-by: John Crispin <john@phrozen.org>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
.../dts/mediatek/mt7622-linksys-e8450.dts | 488 ++++++++++++++++++
target/linux/mediatek/image/mt7622.mk | 10 +
.../mt7622/base-files/etc/board.d/01_leds | 18 +
.../mt7622/base-files/etc/board.d/02_network | 11 +-
.../mt7622/base-files/etc/init.d/bootcount | 11 +
.../mt7622/base-files/lib/upgrade/platform.sh | 8 +
...Add-support-for-the-Fidelix-FM35X1GA.patch | 122 +++++
7 files changed, 667 insertions(+), 1 deletion(-)
create mode 100644 target/linux/mediatek/files-5.10/arch/arm64/boot/dts/mediatek/mt7622-linksys-e8450.dts
create mode 100755 target/linux/mediatek/mt7622/base-files/etc/board.d/01_leds
create mode 100755 target/linux/mediatek/mt7622/base-files/etc/init.d/bootcount
create mode 100644 target/linux/mediatek/patches-5.10/340-mtd-spinand-Add-support-for-the-Fidelix-FM35X1GA.patch
diff --git a/target/linux/mediatek/files-5.10/arch/arm64/boot/dts/mediatek/mt7622-linksys-e8450.dts b/target/linux/mediatek/files-5.10/arch/arm64/boot/dts/mediatek/mt7622-linksys-e8450.dts
new file mode 100644
index 0000000000..00b11690f0
--- /dev/null
+++ b/target/linux/mediatek/files-5.10/arch/arm64/boot/dts/mediatek/mt7622-linksys-e8450.dts
@@ -0,0 +1,488 @@
+/*
+ * SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+#include "mt7622.dtsi"
+#include "mt6380.dtsi"
+
+/ {
+ model = "Linksys E8450";
+ compatible = "linksys,e8450", "mediatek,mt7622";
+
+ aliases {
+ serial0 = &uart0;
+ led-boot = &led_power;
+ led-failsafe = &led_power;
+ led-running = &led_power;
+ led-upgrade = &led_power;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+ };
+
+ cpus {
+ cpu@0 {
+ proc-supply = <&mt6380_vcpu_reg>;
+ sram-supply = <&mt6380_vm_reg>;
+ };
+
+ cpu@1 {
+ proc-supply = <&mt6380_vcpu_reg>;
+ sram-supply = <&mt6380_vm_reg>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ factory {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&pio 0 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&pio 102 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led_power: power_blue {
+ label = "power:blue";
+ gpios = <&pio 95 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+ power_orange {
+ label = "power:orange";
+ gpios = <&pio 96 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ inet_blue {
+ label = "inet:blue";
+ gpios = <&pio 97 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ inet_orange {
+ label = "inet:orange";
+ gpios = <&pio 98 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+};
+
+&bch {
+ status = "okay";
+};
+
+&btif {
+ status = "okay";
+};
+
+&cir {
+ pinctrl-names = "default";
+ pinctrl-0 = <&irrx_pins>;
+ status = "okay";
+};
+
+&eth {
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth_pins>;
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+ reset-gpios = <&pio 54 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+
+ };
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie0_pins>;
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_pins>;
+ status = "okay";
+};
+
+&slot0 {
+ mt7915@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ mediatek,mtd-eeprom = <&factory 0x05000>;
+ };
+};
+
+&pio {
+ /* Attention: GPIO 90 is used to switch between PCIe@1,0 and
+ * SATA functions. i.e. output-high: PCIe, output-low: SATA
+ */
+// asm_sel {
+// gpio-hog;
+// gpios = <90 GPIO_ACTIVE_HIGH>;
+// output-high;
+// };
+
+ eth_pins: eth-pins {
+ mux {
+ function = "eth";
+ groups = "mdc_mdio", "rgmii_via_gmac2";
+ };
+ };
+
+ irrx_pins: irrx-pins {
+ mux {
+ function = "ir";
+ groups = "ir_1_rx";
+ };
+ };
+
+ irtx_pins: irtx-pins {
+ mux {
+ function = "ir";
+ groups = "ir_1_tx";
+ };
+ };
+
+ pcie0_pins: pcie0-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie0_pad_perst",
+ "pcie0_1_waken",
+ "pcie0_1_clkreq";
+ };
+ };
+
+ pcie1_pins: pcie1-pins {
+ mux {
+ function = "pcie";
+ groups = "pcie1_pad_perst",
+ "pcie1_0_waken",
+ "pcie1_0_clkreq";
+ };
+ };
+
+ pmic_bus_pins: pmic-bus-pins {
+ mux {
+ function = "pmic";
+ groups = "pmic_bus";
+ };
+ };
+
+ pwm7_pins: pwm1-2-pins {
+ mux {
+ function = "pwm";
+ groups = "pwm_ch7_2";
+ };
+ };
+
+ wled_pins: wled-pins {
+ mux {
+ function = "led";
+ groups = "wled";
+ };
+ };
+
+ /* Serial NAND is shared pin with SPI-NOR */
+ serial_nand_pins: serial-nand-pins {
+ mux {
+ function = "flash";
+ groups = "snfi";
+ };
+ };
+
+ spic0_pins: spic0-pins {
+ mux {
+ function = "spi";
+ groups = "spic0_0";
+ };
+ };
+
+ spic1_pins: spic1-pins {
+ mux {
+ function = "spi";
+ groups = "spic1_0";
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ mux {
+ function = "uart";
+ groups = "uart0_0_tx_rx" ;
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ mux {
+ function = "uart";
+ groups = "uart2_1_tx_rx" ;
+ };
+ };
+
+ watchdog_pins: watchdog-pins {
+ mux {
+ function = "watchdog";
+ groups = "watchdog";
+ };
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm7_pins>;
+ status = "okay";
+};
+
+&pwrap {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_bus_pins>;
+
+ status = "okay";
+};
+
+&sata {
+ status = "disabled";
+};
+
+&sata_phy {
+ status = "disabled";
+};
+
+&snfi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&serial_nand_pins>;
+ status = "okay";
+
+ spi_nand@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spi-nand";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+
+ mediatek,bmt-v2;
+ mediatek,bmt-table-size = <0x1000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "Preloader";
+ reg = <0x00000 0x0080000>;
+ read-only;
+ };
+
+ partition@80000 {
+ label = "ATF";
+ reg = <0x80000 0x0040000>;
+ };
+
+ partition@c0000 {
+ label = "u-boot";
+ reg = <0xc0000 0x0080000>;
+ };
+
+ partition@140000 {
+ label = "u-boot-env";
+ reg = <0x140000 0x0080000>;
+ };
+
+ factory: partition@1c0000 {
+ label = "factory";
+ reg = <0x1c0000 0x0100000>;
+ };
+
+ partition@300000 {
+ label = "devinfo";
+ reg = <0x300000 0x020000>;
+ };
+
+ partition@320000 {
+ label = "senv";
+ reg = <0x320000 0x020000>;
+ };
+
+ partition@360000 {
+ label = "bootseq";
+ reg = <0x360000 0x020000>;
+ };
+
+ partition@500000 {
+ label = "firmware1";
+ compatible = "denx,fit";
+ openwrt,cmdline-match = "mtdparts=master";
+ reg = <0x500000 0x1E00000>;
+ };
+
+ partition@2300000 {
+ label = "firmware2";
+ compatible = "denx,fit";
+ openwrt,cmdline-match = "mtdparts=slave";
+ reg = <0x2300000 0x1E00000>;
+ };
+
+ partition@4100000 {
+ label = "data";
+ reg = <0x4100000 0x1900000>;
+ };
+
+ partition@5100000 {
+ label = "mfg";
+ reg = <0x5a00000 0x1400000>;
+ };
+ };
+ };
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spic0_pins>;
+ status = "okay";
+};
+
+&spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spic1_pins>;
+ status = "okay";
+};
+
+&ssusb {
+ vusb33-supply = <&reg_3p3v>;
+ vbus-supply = <&reg_5v>;
+ status = "okay";
+};
+
+&u3phy {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "okay";
+};
+
+&watchdog {
+ pinctrl-names = "default";
+ pinctrl-0 = <&watchdog_pins>;
+ status = "okay";
+};
+
+&wmac {
+ mediatek,mtd-eeprom = <&factory 0x0000>;
+ status = "okay";
+};
diff --git a/target/linux/mediatek/image/mt7622.mk b/target/linux/mediatek/image/mt7622.mk
index 74f6eba19a..efaa3bcaa3 100644
--- a/target/linux/mediatek/image/mt7622.mk
+++ b/target/linux/mediatek/image/mt7622.mk
@@ -36,6 +36,16 @@ define Device/elecom_wrc-2533gent
endef
TARGET_DEVICES += elecom_wrc-2533gent
+define Device/linksys_e8450
+ DEVICE_VENDOR := Linksys
+ DEVICE_MODEL := E8450
+ DEVICE_DTS := mt7622-linksys-e8450
+ DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
+ DEVICE_PACKAGES := kmod-usb-ohci kmod-usb2 kmod-usb3 kmod-ata-ahci-mtk \
+ kmod-mt7615e kmod-mt7615-firmware kmod-mt7915
+endef
+TARGET_DEVICES += linksys_e8450
+
define Device/mediatek_mt7622-rfb1
DEVICE_VENDOR := MediaTek
DEVICE_MODEL := MTK7622 rfb1 AP
diff --git a/target/linux/mediatek/mt7622/base-files/etc/board.d/01_leds b/target/linux/mediatek/mt7622/base-files/etc/board.d/01_leds
new file mode 100755
index 0000000000..e74944a65f
--- /dev/null
+++ b/target/linux/mediatek/mt7622/base-files/etc/board.d/01_leds
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+. /lib/functions/leds.sh
+. /lib/functions/uci-defaults.sh
+
+board=$(board_name)
+
+board_config_update
+
+case $board in
+linksys,e8450)
+ ucidef_set_led_netdev "wan" "WAN" "inet:blue" "wan"
+ ;;
+esac
+
+board_config_flush
+
+exit 0
diff --git a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
index 3a409c8ec9..f6cd4ba3fc 100755
--- a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
+++ b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
@@ -10,7 +10,8 @@ mediatek_setup_interfaces()
case $board in
bananapi,bpi-r64-rootdisk|\
- bananapi,bpi-r64)
+ bananapi,bpi-r64|\
+ linksys,e8450)
ucidef_set_interfaces_lan_wan "lan0 lan1 lan2 lan3" wan
;;
mediatek,mt7622-rfb1)
@@ -31,7 +32,15 @@ mediatek_setup_macs()
local board="$1"
case $board in
+ linksys,e8450)
+ wan_mac=$(mtd_get_mac_ascii devinfo wan_mac_addr)
+ lan_mac=$(mtd_get_mac_ascii devinfo lan_mac_addr)
+ label_mac=$wan_mac
+ ;;
esac
+ [ -n "$lan_mac" ] && ucidef_set_interface_macaddr "lan" $lan_mac
+ [ -n "$wan_mac" ] && ucidef_set_interface_macaddr "wan" $wan_mac
+ [ -n "$label_mac" ] && ucidef_set_label_macaddr $label_mac
}
board_config_update
diff --git a/target/linux/mediatek/mt7622/base-files/etc/init.d/bootcount b/target/linux/mediatek/mt7622/base-files/etc/init.d/bootcount
new file mode 100755
index 0000000000..bc4eeb6530
--- /dev/null
+++ b/target/linux/mediatek/mt7622/base-files/etc/init.d/bootcount
@@ -0,0 +1,11 @@
+#!/bin/sh /etc/rc.common
+
+START=99
+
+boot() {
+ case $(board_name) in
+ linksys,e8450)
+ mtd erase senv || true
+ ;;
+ esac
+}
diff --git a/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh b/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh
index 8144476943..95ac8b5657 100755
--- a/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh
+++ b/target/linux/mediatek/mt7622/base-files/lib/upgrade/platform.sh
@@ -10,6 +10,14 @@ platform_do_upgrade() {
mediatek,mt7622,ubi)
nand_do_upgrade "$1"
;;
+ linksys,e8450)
+ if grep -q mtdparts=slave /proc/cmdline; then
+ PART_NAME=firmware2
+ else
+ PART_NAME=firmware1
+ fi
+ default_do_upgrade "$1"
+ ;;
*)
default_do_upgrade "$1"
;;
diff --git a/target/linux/mediatek/patches-5.10/340-mtd-spinand-Add-support-for-the-Fidelix-FM35X1GA.patch b/target/linux/mediatek/patches-5.10/340-mtd-spinand-Add-support-for-the-Fidelix-FM35X1GA.patch
new file mode 100644
index 0000000000..69a9297e1f
--- /dev/null
+++ b/target/linux/mediatek/patches-5.10/340-mtd-spinand-Add-support-for-the-Fidelix-FM35X1GA.patch
@@ -0,0 +1,122 @@
+From ea0df4552efcdcc2806fe6eba0540b5f719d80b6 Mon Sep 17 00:00:00 2001
+From: Davide Fioravanti <pantanastyle@gmail.com>
+Date: Fri, 8 Jan 2021 15:35:24 +0100
+Subject: [PATCH 1/1] mtd: spinand: Add support for the Fidelix FM35X1GA
+
+Datasheet: http://www.hobos.com.cn/upload/datasheet/DS35X1GAXXX_100_rev00.pdf
+
+Signed-off-by: Davide Fioravanti <pantanastyle@gmail.com>
+---
+ drivers/mtd/nand/spi/Makefile | 2 +-
+ drivers/mtd/nand/spi/core.c | 1 +
+ drivers/mtd/nand/spi/fidelix.c | 80 ++++++++++++++++++++++++++++++++++
+ include/linux/mtd/spinand.h | 1 +
+ 4 files changed, 83 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/mtd/nand/spi/fidelix.c
+
+--- a/drivers/mtd/nand/spi/Makefile
++++ b/drivers/mtd/nand/spi/Makefile
+@@ -1,3 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0
+-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
++spinand-objs := core.o fidelix.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
+ obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -755,6 +755,7 @@ static const struct nand_ops spinand_ops
+ };
+
+ static const struct spinand_manufacturer *spinand_manufacturers[] = {
++ &fidelix_spinand_manufacturer,
+ &gigadevice_spinand_manufacturer,
+ &macronix_spinand_manufacturer,
+ &micron_spinand_manufacturer,
+--- /dev/null
++++ b/drivers/mtd/nand/spi/fidelix.c
+@@ -0,0 +1,76 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (c) 2020 Davide Fioravanti <pantanastyle@gmail.com>
++ */
++
++#include <linux/device.h>
++#include <linux/kernel.h>
++#include <linux/mtd/spinand.h>
++
++#define SPINAND_MFR_FIDELIX 0xE5
++#define FIDELIX_ECCSR_MASK 0x0F
++
++static SPINAND_OP_VARIANTS(read_cache_variants,
++ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
++
++static SPINAND_OP_VARIANTS(write_cache_variants,
++ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
++ SPINAND_PROG_LOAD(true, 0, NULL, 0));
++
++static SPINAND_OP_VARIANTS(update_cache_variants,
++ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
++ SPINAND_PROG_LOAD(false, 0, NULL, 0));
++
++static int fm35x1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
++{
++ if (section > 3)
++ return -ERANGE;
++
++ region->offset = (16 * section) + 8;
++ region->length = 8;
++
++ return 0;
++}
++
++static int fm35x1ga_ooblayout_free(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
++{
++ if (section > 3)
++ return -ERANGE;
++
++ region->offset = (16 * section) + 2;
++ region->length = 6;
++
++ return 0;
++}
++
++static const struct mtd_ooblayout_ops fm35x1ga_ooblayout = {
++ .ecc = fm35x1ga_ooblayout_ecc,
++ .free = fm35x1ga_ooblayout_free,
++};
++
++static const struct spinand_info fidelix_spinand_table[] = {
++ SPINAND_INFO("FM35X1GA",
++ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71),
++ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
++ NAND_ECCREQ(4, 512),
++ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
++ &write_cache_variants,
++ &update_cache_variants),
++ SPINAND_HAS_QE_BIT,
++ SPINAND_ECCINFO(&fm35x1ga_ooblayout, NULL)),
++};
++
++static const struct spinand_manufacturer_ops fidelix_spinand_manuf_ops = {
++};
++
++const struct spinand_manufacturer fidelix_spinand_manufacturer = {
++ .id = SPINAND_MFR_FIDELIX,
++ .name = "Fidelix",
++ .chips = fidelix_spinand_table,
++ .nchips = ARRAY_SIZE(fidelix_spinand_table),
++ .ops = &fidelix_spinand_manuf_ops,
++};
+--- a/include/linux/mtd/spinand.h
++++ b/include/linux/mtd/spinand.h
+@@ -238,6 +238,7 @@ struct spinand_manufacturer {
+ };
+
+ /* SPI NAND manufacturers */
++extern const struct spinand_manufacturer fidelix_spinand_manufacturer;
+ extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
+ extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+ extern const struct spinand_manufacturer micron_spinand_manufacturer;
--
2.25.1

View File

@@ -0,0 +1,42 @@
From d8505bfc5491c7b3c9cfb6a58380c115f83ffeb7 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Sat, 30 Jan 2021 23:08:27 +0000
Subject: [PATCH 16/22] mediatek: linksys-e8450: fix wifi and lan4
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
target/linux/mediatek/image/mt7622.mk | 2 +-
.../linux/mediatek/mt7622/base-files/etc/board.d/02_network | 4 +---
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/target/linux/mediatek/image/mt7622.mk b/target/linux/mediatek/image/mt7622.mk
index efaa3bcaa3..df9e0d7d17 100644
--- a/target/linux/mediatek/image/mt7622.mk
+++ b/target/linux/mediatek/image/mt7622.mk
@@ -42,7 +42,7 @@ define Device/linksys_e8450
DEVICE_DTS := mt7622-linksys-e8450
DEVICE_DTS_DIR := $(DTS_DIR)/mediatek
DEVICE_PACKAGES := kmod-usb-ohci kmod-usb2 kmod-usb3 kmod-ata-ahci-mtk \
- kmod-mt7615e kmod-mt7615-firmware kmod-mt7915
+ kmod-mt7615e kmod-mt7615-firmware kmod-mt7915e
endef
TARGET_DEVICES += linksys_e8450
diff --git a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
index f6cd4ba3fc..3d2b9ffe49 100755
--- a/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
+++ b/target/linux/mediatek/mt7622/base-files/etc/board.d/02_network
@@ -11,9 +11,7 @@ mediatek_setup_interfaces()
case $board in
bananapi,bpi-r64-rootdisk|\
bananapi,bpi-r64|\
- linksys,e8450)
- ucidef_set_interfaces_lan_wan "lan0 lan1 lan2 lan3" wan
- ;;
+ linksys,e8450|\
mediatek,mt7622-rfb1)
ucidef_set_interfaces_lan_wan "lan1 lan2 lan3 lan4" wan
;;
--
2.25.1

View File

@@ -0,0 +1,754 @@
From 86c2de0e5b6b800525df4abf533366c34554064f Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Mon, 15 Feb 2021 14:37:17 +0000
Subject: [PATCH 17/22] image: add support for building FIT image with
filesystem
Allow for single (external-data) FIT image to hold kernel, dtb and
squashfs. In that way, the bootloader verify the system integrity
including the rootfs, flashing sysupgrade and factory on many platforms
becomes much easier.
In short: mkimage has a parameter '-E' which allows generating FIT
images with 'external' data rather than embedding the data into the
device-tree blob itself. In this way, the FIT structure itself remains
small and can be parsed easily (rather than having to page around
megabytes of image content). This patch makes use of that and adds
support for adding sub-images of type 'filesystem' which are used to
store the squashfs. Now U-Boot can verify the whole OS and the new
partition parsers added in the Linux kernel can detect the filesystem
sub-images and create partitions for them, and select the active
rootfs volume based on the configuration in FIT.
This new FIT partition parser works for NOR flash (on top of mtdblock),
NAND flash (on top of ubiblock) as well as classic block devices
(ie. eMMC, SDcard, SATA, NVME, ...) as well as .
See the follow-up commits for a good example of its use (on SPI-NAND).
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
include/image-commands.mk | 3 +-
package/base-files/files/lib/upgrade/nand.sh | 102 +++++---
scripts/mkits.sh | 45 +++-
target/linux/generic/config-5.10 | 1 +
.../generic/files/block/partitions/fit.c | 234 ++++++++++++++++++
.../400-block-fit-partition-parser.patch | 96 +++++++
...to-create-ubiblock-device-for-rootfs.patch | 5 +-
7 files changed, 442 insertions(+), 44 deletions(-)
create mode 100644 target/linux/generic/files/block/partitions/fit.c
create mode 100644 target/linux/generic/hack-5.10/400-block-fit-partition-parser.patch
diff --git a/include/image-commands.mk b/include/image-commands.mk
index 51e745958e..bddbed6052 100644
--- a/include/image-commands.mk
+++ b/include/image-commands.mk
@@ -200,11 +200,12 @@ define Build/fit
$(TOPDIR)/scripts/mkits.sh \
-D $(DEVICE_NAME) -o $@.its -k $@ \
$(if $(word 2,$(1)),-d $(word 2,$(1))) -C $(word 1,$(1)) \
+ $(if $(word 3,$(1)),-r $(IMAGE_ROOTFS) -f $(subst _,$(comma),$(DEVICE_NAME))) \
-a $(KERNEL_LOADADDR) -e $(if $(KERNEL_ENTRY),$(KERNEL_ENTRY),$(KERNEL_LOADADDR)) \
$(if $(DEVICE_FDT_NUM),-n $(DEVICE_FDT_NUM)) \
-c $(if $(DEVICE_DTS_CONFIG),$(DEVICE_DTS_CONFIG),"config@1") \
-A $(LINUX_KARCH) -v $(LINUX_VERSION)
- PATH=$(LINUX_DIR)/scripts/dtc:$(PATH) mkimage -f $@.its $@.new
+ PATH=$(LINUX_DIR)/scripts/dtc:$(PATH) mkimage $(if $(word 3,$(1)),-E -B 0x1000 -p 0x1000) -f $@.its $@.new
@mv $@.new $@
endef
diff --git a/package/base-files/files/lib/upgrade/nand.sh b/package/base-files/files/lib/upgrade/nand.sh
index e6f58df4f5..5bc9ff83f9 100644
--- a/package/base-files/files/lib/upgrade/nand.sh
+++ b/package/base-files/files/lib/upgrade/nand.sh
@@ -3,13 +3,13 @@
. /lib/functions.sh
-# 'kernel' partition on NAND contains the kernel
+# 'kernel' partition or UBI volume on NAND contains the kernel
CI_KERNPART="${CI_KERNPART:-kernel}"
# 'ubi' partition on NAND contains UBI
CI_UBIPART="${CI_UBIPART:-ubi}"
-# 'rootfs' partition on NAND contains the rootfs
+# 'rootfs' UBI volume on NAND contains the rootfs
CI_ROOTPART="${CI_ROOTPART:-rootfs}"
ubi_mknod() {
@@ -117,9 +117,11 @@ nand_restore_config() {
nand_upgrade_prepare_ubi() {
local rootfs_length="$1"
local rootfs_type="$2"
- local has_kernel="${3:-0}"
+ local kernel_length="$3"
local has_env="${4:-0}"
+ [ -n "$rootfs_length" -o -n "$kernel_length" ] || return 1
+
local mtdnum="$( find_mtd_index "$CI_UBIPART" )"
if [ ! "$mtdnum" ]; then
echo "cannot find ubi mtd partition $CI_UBIPART"
@@ -148,23 +150,24 @@ nand_upgrade_prepare_ubi() {
local root_ubivol="$( nand_find_volume $ubidev $CI_ROOTPART )"
local data_ubivol="$( nand_find_volume $ubidev rootfs_data )"
- # remove ubiblock device of rootfs
- local root_ubiblk="ubiblock${root_ubivol:3}"
- if [ "$root_ubivol" -a -e "/dev/$root_ubiblk" ]; then
- echo "removing $root_ubiblk"
- if ! ubiblock -r /dev/$root_ubivol; then
- echo "cannot remove $root_ubiblk"
- return 1;
+ local ubiblk ubiblkvol
+ for ubiblk in /dev/ubiblock*_? ; do
+ [ -e "$ubiblk" ] || continue
+ echo "removing ubiblock${ubiblk:13}"
+ ubiblkvol=ubi${ubiblk:13}
+ if ! ubiblock -r /dev/$ubiblkvol; then
+ echo "cannot remove $ubiblk"
+ return 1
fi
- fi
+ done
# kill volumes
[ "$kern_ubivol" ] && ubirmvol /dev/$ubidev -N $CI_KERNPART || true
- [ "$root_ubivol" ] && ubirmvol /dev/$ubidev -N $CI_ROOTPART || true
+ [ "$root_ubivol" -a "$root_ubivol" != "$kern_ubivol" ] && ubirmvol /dev/$ubidev -N $CI_ROOTPART || true
[ "$data_ubivol" ] && ubirmvol /dev/$ubidev -N rootfs_data || true
# update kernel
- if [ "$has_kernel" = "1" ]; then
+ if [ -n "$kernel_length" ]; then
if ! ubimkvol /dev/$ubidev -N $CI_KERNPART -s $kernel_length; then
echo "cannot create kernel volume"
return 1;
@@ -172,15 +175,17 @@ nand_upgrade_prepare_ubi() {
fi
# update rootfs
- local root_size_param
- if [ "$rootfs_type" = "ubifs" ]; then
- root_size_param="-m"
- else
- root_size_param="-s $rootfs_length"
- fi
- if ! ubimkvol /dev/$ubidev -N $CI_ROOTPART $root_size_param; then
- echo "cannot create rootfs volume"
- return 1;
+ if [ -n "$rootfs_length" ]; then
+ local root_size_param
+ if [ "$rootfs_type" = "ubifs" ]; then
+ root_size_param="-m"
+ else
+ root_size_param="-s $rootfs_length"
+ fi
+ if ! ubimkvol /dev/$ubidev -N $CI_ROOTPART $rootfs_size_param; then
+ echo "cannot create rootfs volume"
+ return 1;
+ fi
fi
# create rootfs_data for non-ubifs rootfs
@@ -232,7 +237,7 @@ nand_upgrade_ubinized() {
nand_upgrade_ubifs() {
local rootfs_length=$( (cat $1 | wc -c) 2> /dev/null)
- nand_upgrade_prepare_ubi "$rootfs_length" "ubifs" "0" "0"
+ nand_upgrade_prepare_ubi "$rootfs_length" "ubifs" "" ""
local ubidev="$( nand_find_ubi "$CI_UBIPART" )"
local root_ubivol="$(nand_find_volume $ubidev $CI_ROOTPART)"
@@ -241,39 +246,59 @@ nand_upgrade_ubifs() {
nand_do_upgrade_success
}
+nand_upgrade_fit() {
+ local fit_file="$1"
+ local fit_length="$(wc -c < "$fit_file")"
+
+ nand_upgrade_prepare_ubi "" "" "$fit_length" "1"
+
+ local fit_ubidev="$(nand_find_ubi "$CI_UBIPART")"
+ local fit_ubivol="$(nand_find_volume $fit_ubidev "$CI_KERNPART")"
+ ubiupdatevol /dev/$fit_ubivol -s $fit_length $fit_file
+
+ nand_do_upgrade_success
+}
+
nand_upgrade_tar() {
local tar_file="$1"
local kernel_mtd="$(find_mtd_index $CI_KERNPART)"
- local board_dir=$(tar tf $tar_file | grep -m 1 '^sysupgrade-.*/$')
+ local board_dir=$(tar tf "$tar_file" | grep -m 1 '^sysupgrade-.*/$')
board_dir=${board_dir%/}
- local kernel_length=$( (tar xf $tar_file ${board_dir}/kernel -O | wc -c) 2> /dev/null)
- local rootfs_length=$( (tar xf $tar_file ${board_dir}/root -O | wc -c) 2> /dev/null)
+ kernel_length=$( (tar xf "$tar_file" ${board_dir}/kernel -O | wc -c) 2> /dev/null)
+ local has_rootfs=0
+ local rootfs_length
+ local rootfs_type
- local rootfs_type="$(identify_tar "$tar_file" ${board_dir}/root)"
+ tar tf "$tar_file" ${board_dir}/root 1>/dev/null 2>/dev/null && has_rootfs=1
+ [ "$has_rootfs" = "1" ] && {
+ rootfs_length=$( (tar xf "$tar_file" ${board_dir}/root -O | wc -c) 2> /dev/null)
+ rootfs_type="$(identify_tar "$tar_file" ${board_dir}/root)"
+ }
local has_kernel=1
local has_env=0
[ "$kernel_length" != 0 -a -n "$kernel_mtd" ] && {
- tar xf $tar_file ${board_dir}/kernel -O | mtd write - $CI_KERNPART
+ tar xf "$tar_file" ${board_dir}/kernel -O | mtd write - $CI_KERNPART
}
- [ "$kernel_length" = 0 -o ! -z "$kernel_mtd" ] && has_kernel=0
+ [ "$kernel_length" = 0 -o ! -z "$kernel_mtd" ] && has_kernel=
- nand_upgrade_prepare_ubi "$rootfs_length" "$rootfs_type" "$has_kernel" "$has_env"
+ nand_upgrade_prepare_ubi "$rootfs_length" "$rootfs_type" "${has_kernel:+$kernel_length}" "$has_env"
local ubidev="$( nand_find_ubi "$CI_UBIPART" )"
[ "$has_kernel" = "1" ] && {
- local kern_ubivol="$(nand_find_volume $ubidev $CI_KERNPART)"
- tar xf $tar_file ${board_dir}/kernel -O | \
+ local kern_ubivol="$( nand_find_volume $ubidev $CI_KERNPART )"
+ tar xf "$tar_file" ${board_dir}/kernel -O | \
ubiupdatevol /dev/$kern_ubivol -s $kernel_length -
}
- local root_ubivol="$(nand_find_volume $ubidev $CI_ROOTPART)"
- tar xf $tar_file ${board_dir}/root -O | \
- ubiupdatevol /dev/$root_ubivol -s $rootfs_length -
-
+ [ "$has_rootfs" = "1" ] && {
+ local root_ubivol="$( nand_find_volume $ubidev $CI_ROOTPART )"
+ tar xf "$tar_file" ${board_dir}/root -O | \
+ ubiupdatevol /dev/$root_ubivol -s $rootfs_length -
+ }
nand_do_upgrade_success
}
@@ -281,9 +306,10 @@ nand_upgrade_tar() {
nand_do_upgrade() {
local file_type=$(identify $1)
- [ ! "$(find_mtd_index "$CI_UBIPART")" ] && CI_UBIPART="rootfs"
+ [ ! "$( find_mtd_index "$CI_UBIPART" )" ] && CI_UBIPART="rootfs"
case "$file_type" in
+ "fit") nand_upgrade_fit $1;;
"ubi") nand_upgrade_ubinized $1;;
"ubifs") nand_upgrade_ubifs $1;;
*) nand_upgrade_tar $1;;
@@ -309,7 +335,7 @@ nand_do_platform_check() {
local control_length=$( (tar xf $tar_file sysupgrade-$board_name/CONTROL -O | wc -c) 2> /dev/null)
local file_type="$(identify $2)"
- [ "$control_length" = 0 -a "$file_type" != "ubi" -a "$file_type" != "ubifs" ] && {
+ [ "$control_length" = 0 -a "$file_type" != "ubi" -a "$file_type" != "ubifs" -a "$file_type" != "fit" ] && {
echo "Invalid sysupgrade file."
return 1
}
diff --git a/scripts/mkits.sh b/scripts/mkits.sh
index bb629d6fca..3d68fdacbc 100755
--- a/scripts/mkits.sh
+++ b/scripts/mkits.sh
@@ -23,18 +23,23 @@ usage() {
printf "\n\t-c ==> set config name 'config'"
printf "\n\t-a ==> set load address to 'addr' (hex)"
printf "\n\t-e ==> set entry point to 'entry' (hex)"
+ printf "\n\t-f ==> set device tree compatible string"
printf "\n\t-v ==> set kernel version to 'version'"
printf "\n\t-k ==> include kernel image 'kernel'"
printf "\n\t-D ==> human friendly Device Tree Blob 'name'"
printf "\n\t-n ==> fdt unit-address 'address'"
printf "\n\t-d ==> include Device Tree Blob 'dtb'"
+ printf "\n\t-r ==> include RootFS blob"
+ printf "\n\t-H ==> specify hash algo instead of SHA1"
printf "\n\t-o ==> create output file 'its_file'\n"
exit 1
}
FDTNUM=1
+ROOTFSNUM=1
+HASH=sha1
-while getopts ":A:a:c:C:D:d:e:k:n:o:v:" OPTION
+while getopts ":A:a:c:C:D:d:e:f:k:n:o:v:r:S" OPTION
do
case $OPTION in
A ) ARCH=$OPTARG;;
@@ -44,9 +49,12 @@ do
D ) DEVICE=$OPTARG;;
d ) DTB=$OPTARG;;
e ) ENTRY_ADDR=$OPTARG;;
+ f ) COMPATIBLE=$OPTARG;;
k ) KERNEL=$OPTARG;;
n ) FDTNUM=$OPTARG;;
o ) OUTPUT=$OPTARG;;
+ r ) ROOTFS=$OPTARG;;
+ S ) HASH=$OPTARG;;
v ) VERSION=$OPTARG;;
* ) echo "Invalid option passed to '$0' (options:$*)"
usage;;
@@ -62,11 +70,16 @@ fi
ARCH_UPPER=$(echo "$ARCH" | tr '[:lower:]' '[:upper:]')
+if [ -n "${COMPATIBLE}" ]; then
+ COMPATIBLE_PROP="compatible = \"${COMPATIBLE}\";"
+fi
+
# Conditionally create fdt information
if [ -n "${DTB}" ]; then
FDT_NODE="
fdt@$FDTNUM {
description = \"${ARCH_UPPER} OpenWrt ${DEVICE} device tree blob\";
+ ${COMPATIBLE_PROP}
data = /incbin/(\"${DTB}\");
type = \"flat_dt\";
arch = \"${ARCH}\";
@@ -75,13 +88,34 @@ if [ -n "${DTB}" ]; then
algo = \"crc32\";
};
hash@2 {
- algo = \"sha1\";
+ algo = \"${HASH}\";
};
};
"
FDT_PROP="fdt = \"fdt@$FDTNUM\";"
fi
+if [ -n "${ROOTFS}" ]; then
+ dd if="${ROOTFS}" of="${ROOTFS}.pagesync" bs=4096 conv=sync
+ ROOTFS_NODE="
+ rootfs@$ROOTFSNUM {
+ description = \"${ARCH_UPPER} OpenWrt ${DEVICE} rootfs\";
+ ${COMPATIBLE_PROP}
+ data = /incbin/(\"${ROOTFS}.pagesync\");
+ type = \"filesystem\";
+ arch = \"${ARCH}\";
+ compression = \"none\";
+ hash@1 {
+ algo = \"crc32\";
+ };
+ hash@2 {
+ algo = \"${HASH}\";
+ };
+ };
+"
+ ROOTFS_PROP="loadables = \"rootfs@${ROOTFSNUM}\";"
+fi
+
# Create a default, fully populated DTS file
DATA="/dts-v1/;
@@ -103,18 +137,21 @@ DATA="/dts-v1/;
algo = \"crc32\";
};
hash@2 {
- algo = \"sha1\";
+ algo = \"$HASH\";
};
};
${FDT_NODE}
+${ROOTFS_NODE}
};
configurations {
default = \"${CONFIG}\";
${CONFIG} {
- description = \"OpenWrt\";
+ description = \"OpenWrt ${DEVICE}\";
kernel = \"kernel@1\";
${FDT_PROP}
+ ${ROOTFS_PROP}
+ ${COMPATIBLE_PROP}
};
};
};"
diff --git a/target/linux/generic/config-5.10 b/target/linux/generic/config-5.10
index f7cc6c8561..ba6317e35f 100644
--- a/target/linux/generic/config-5.10
+++ b/target/linux/generic/config-5.10
@@ -1859,6 +1859,7 @@ CONFIG_FIB_RULES=y
# CONFIG_FIELDBUS_DEV is not set
CONFIG_FILE_LOCKING=y
# CONFIG_FIND_BIT_BENCHMARK is not set
+# CONFIG_FIT_PARTITION is not set
# CONFIG_FIREWIRE is not set
# CONFIG_FIREWIRE_NOSY is not set
# CONFIG_FIREWIRE_SERIAL is not set
diff --git a/target/linux/generic/files/block/partitions/fit.c b/target/linux/generic/files/block/partitions/fit.c
new file mode 100644
index 0000000000..3694a22667
--- /dev/null
+++ b/target/linux/generic/files/block/partitions/fit.c
@@ -0,0 +1,234 @@
+// SPvDX-License-Identifier: GPL-2.0
+/*
+ * fs/partitions/fit.c
+ * Copyright (C) 2021 Daniel Golle
+ *
+ * headers extracted from U-Boot mkimage sources
+ * (C) Copyright 2008 Semihalf
+ * (C) Copyright 2000-2005
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * based on existing partition parsers
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#define pr_fmt(fmt) fmt
+
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#include "check.h"
+
+#define FIT_IMAGES_PATH "/images"
+#define FIT_CONFS_PATH "/configurations"
+
+/* hash/signature/key node */
+#define FIT_HASH_NODENAME "hash"
+#define FIT_ALGO_PROP "algo"
+#define FIT_VALUE_PROP "value"
+#define FIT_IGNORE_PROP "uboot-ignore"
+#define FIT_SIG_NODENAME "signature"
+#define FIT_KEY_REQUIRED "required"
+#define FIT_KEY_HINT "key-name-hint"
+
+/* cipher node */
+#define FIT_CIPHER_NODENAME "cipher"
+#define FIT_ALGO_PROP "algo"
+
+/* image node */
+#define FIT_DATA_PROP "data"
+#define FIT_DATA_POSITION_PROP "data-position"
+#define FIT_DATA_OFFSET_PROP "data-offset"
+#define FIT_DATA_SIZE_PROP "data-size"
+#define FIT_TIMESTAMP_PROP "timestamp"
+#define FIT_DESC_PROP "description"
+#define FIT_ARCH_PROP "arch"
+#define FIT_TYPE_PROP "type"
+#define FIT_OS_PROP "os"
+#define FIT_COMP_PROP "compression"
+#define FIT_ENTRY_PROP "entry"
+#define FIT_LOAD_PROP "load"
+
+/* configuration node */
+#define FIT_KERNEL_PROP "kernel"
+#define FIT_FILESYSTEM_PROP "filesystem"
+#define FIT_RAMDISK_PROP "ramdisk"
+#define FIT_FDT_PROP "fdt"
+#define FIT_LOADABLE_PROP "loadables"
+#define FIT_DEFAULT_PROP "default"
+#define FIT_SETUP_PROP "setup"
+#define FIT_FPGA_PROP "fpga"
+#define FIT_FIRMWARE_PROP "firmware"
+#define FIT_STANDALONE_PROP "standalone"
+
+#define FIT_MAX_HASH_LEN HASH_MAX_DIGEST_SIZE
+
+int fit_partition(struct parsed_partitions *state)
+{
+ struct address_space *mapping = state->bdev->bd_inode->i_mapping;
+ struct page *page = read_mapping_page(mapping, 0, NULL);
+ void *fit, *init_fit;
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname)];
+ u64 dsize, dsectors, isectors;
+ u32 size, image_pos, image_len;
+ const u32 *image_offset_be, *image_len_be, *image_pos_be;
+ int ret = 1, node, images, config, slot;
+ const char *image_name, *image_type, *image_description, *config_default,
+ *config_description, *config_loadables;
+ int image_name_len, image_type_len, image_description_len, config_default_len,
+ config_description_len, config_loadables_len;
+ sector_t start_sect, nr_sects;
+ size_t label_min;
+
+ if (!page)
+ return -ENOMEM;
+
+ init_fit = page_address(page);
+
+ if (!init_fit) {
+ put_page(page);
+ return -EFAULT;
+ }
+
+ if (fdt_check_header(init_fit)) {
+ put_page(page);
+ return 0;
+ }
+
+ dsectors = get_capacity(state->bdev->bd_disk);
+ dsize = dsectors << SECTOR_SHIFT;
+
+ printk(KERN_INFO "FIT: volume size: %llu sectors (%llu bytes)\n", dsectors, dsize);
+
+ size = fdt_totalsize(init_fit);
+ isectors = size >> SECTOR_SHIFT;
+ if ((isectors << SECTOR_SHIFT) < size)
+ ++isectors;
+
+ printk(KERN_INFO "FIT: FDT structure size: %llu sectors (%u bytes)\n", isectors, size);
+
+ if (size >= dsize || size > PAGE_SIZE)
+ {
+ put_page(page);
+ state->access_beyond_eod = (size >= dsize);
+ return 0;
+ }
+
+ fit = kmemdup(init_fit, size, GFP_KERNEL);
+ put_page(page);
+ if (!fit)
+ return -ENOMEM;
+
+ config = fdt_path_offset(fit, FIT_CONFS_PATH);
+ if (config < 0) {
+ printk(KERN_INFO "FIT: Cannot find %s node: %d\n", FIT_CONFS_PATH, images);
+ ret = -ENOENT;
+ goto ret_out;
+ }
+
+ config_default = fdt_getprop(fit, config, FIT_DEFAULT_PROP, &config_default_len);
+
+ if (!config_default) {
+ printk(KERN_INFO "FIT: Cannot find default configuration\n");
+ ret = -ENOENT;
+ goto ret_out;
+ }
+
+ node = fdt_subnode_offset(fit, config, config_default);
+ if (node < 0) {
+ printk(KERN_INFO "FIT: Cannot find %s node: %d\n", config_default, node);
+ ret = -ENOENT;
+ goto ret_out;
+ }
+
+ config_description = fdt_getprop(fit, node, FIT_DESC_PROP, &config_description_len);
+ config_loadables = fdt_getprop(fit, node, FIT_LOADABLE_PROP, &config_loadables_len);
+
+ printk(KERN_INFO "FIT: Default configuration: %s%s%s%s\n", config_default,
+ config_description?" (":"", config_description?:"", config_description?")":"");
+
+ images = fdt_path_offset(fit, FIT_IMAGES_PATH);
+ if (images < 0) {
+ printk(KERN_INFO "FIT: Cannot find %s node: %d\n", FIT_IMAGES_PATH, images);
+ ret = -EINVAL;
+ goto ret_out;
+ }
+
+ slot = 1;
+ fdt_for_each_subnode(node, fit, images) {
+ image_name = fdt_get_name(fit, node, &image_name_len);
+ image_type = fdt_getprop(fit, node, FIT_TYPE_PROP, &image_type_len);
+ image_offset_be = fdt_getprop(fit, node, FIT_DATA_OFFSET_PROP, NULL);
+ image_pos_be = fdt_getprop(fit, node, FIT_DATA_POSITION_PROP, NULL);
+ image_len_be = fdt_getprop(fit, node, FIT_DATA_SIZE_PROP, NULL);
+ if (!image_name || !image_type || !image_len_be)
+ continue;
+
+ image_len = be32_to_cpu(*image_len_be);
+ if (!image_len)
+ continue;
+
+ if (image_offset_be)
+ image_pos = be32_to_cpu(*image_offset_be) + size;
+ else if (image_pos_be)
+ image_pos = be32_to_cpu(*image_pos_be);
+ else
+ continue;
+
+ image_description = fdt_getprop(fit, node, FIT_DESC_PROP, &image_description_len);
+
+ printk(KERN_INFO "FIT: %16s sub-image 0x%08x - 0x%08x '%s' %s%s%s\n",
+ image_type, image_pos, image_pos + image_len, image_name,
+ image_description?"(":"", image_description?:"", image_description?") ":"");
+
+ if (strcmp(image_type, FIT_FILESYSTEM_PROP))
+ continue;
+
+ if (image_pos & ((1 << PAGE_SHIFT)-1)) {
+ printk(KERN_INFO "FIT: image %s start not aligned to page boundaries, skipping\n", image_name);
+ continue;
+ }
+
+ if (image_len & ((1 << PAGE_SHIFT)-1)) {
+ printk(KERN_INFO "FIT: sub-image %s end not aligned to page boundaries, skipping\n", image_name);
+ continue;
+ }
+
+ start_sect = image_pos >> SECTOR_SHIFT;
+ nr_sects = image_len >> SECTOR_SHIFT;
+
+ if (start_sect + nr_sects > dsectors) {
+ state->access_beyond_eod = 1;
+ continue;
+ }
+
+ put_partition(state, slot, start_sect, nr_sects);
+ state->parts[slot].flags = 0;
+ info = &state->parts[slot].info;
+
+ label_min = min_t(int, sizeof(info->volname) - 1, image_name_len);
+ strncpy(info->volname, image_name, label_min);
+ info->volname[label_min] = '\0';
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+ state->parts[slot].has_info = true;
+
+ if (config_loadables && !strcmp(image_name, config_loadables)) {
+ printk(KERN_INFO "FIT: selecting configured loadable %s to be root filesystem\n", image_name);
+ state->parts[slot].flags |= ADDPART_FLAG_ROOTDEV;
+ }
+
+ ++slot;
+ }
+
+ret_out:
+ kfree(fit);
+ return ret;
+}
diff --git a/target/linux/generic/hack-5.10/400-block-fit-partition-parser.patch b/target/linux/generic/hack-5.10/400-block-fit-partition-parser.patch
new file mode 100644
index 0000000000..9eaf8637d0
--- /dev/null
+++ b/target/linux/generic/hack-5.10/400-block-fit-partition-parser.patch
@@ -0,0 +1,96 @@
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -357,6 +357,7 @@ char *disk_name(struct gendisk *hd, int
+ #define ADDPART_FLAG_NONE 0
+ #define ADDPART_FLAG_RAID 1
+ #define ADDPART_FLAG_WHOLEDISK 2
++#define ADDPART_FLAG_ROOTDEV 4
+ void delete_partition(struct hd_struct *part);
+ int bdev_add_partition(struct block_device *bdev, int partno,
+ sector_t start, sector_t length);
+--- a/block/partitions/Kconfig
++++ b/block/partitions/Kconfig
+@@ -101,6 +101,13 @@ config ATARI_PARTITION
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under the Atari OS.
+
++config FIT_PARTITION
++ bool "Flattened-Image-Tree (FIT) partition support" if PARTITION_ADVANCED
++ default n
++ help
++ Say Y here if your system needs to mount the filesystem part of
++ a Flattened-Image-Tree (FIT) image commonly used with Das U-Boot.
++
+ config IBM_PARTITION
+ bool "IBM disk label and partition support"
+ depends on PARTITION_ADVANCED && S390
+--- a/block/partitions/Makefile
++++ b/block/partitions/Makefile
+@@ -8,6 +8,7 @@ obj-$(CONFIG_ACORN_PARTITION) += acorn.o
+ obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
+ obj-$(CONFIG_ATARI_PARTITION) += atari.o
+ obj-$(CONFIG_AIX_PARTITION) += aix.o
++obj-$(CONFIG_FIT_PARTITION) += fit.o
+ obj-$(CONFIG_CMDLINE_PARTITION) += cmdline.o
+ obj-$(CONFIG_MAC_PARTITION) += mac.o
+ obj-$(CONFIG_LDM_PARTITION) += ldm.o
+--- a/block/partitions/check.h
++++ b/block/partitions/check.h
+@@ -58,6 +58,7 @@ int amiga_partition(struct parsed_partit
+ int atari_partition(struct parsed_partitions *state);
+ int cmdline_partition(struct parsed_partitions *state);
+ int efi_partition(struct parsed_partitions *state);
++int fit_partition(struct parsed_partitions *state);
+ int ibm_partition(struct parsed_partitions *);
+ int karma_partition(struct parsed_partitions *state);
+ int ldm_partition(struct parsed_partitions *state);
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -10,6 +10,8 @@
+ #include <linux/vmalloc.h>
+ #include <linux/blktrace_api.h>
+ #include <linux/raid/detect.h>
++#include <linux/root_dev.h>
++
+ #include "check.h"
+
+ static int (*check_part[])(struct parsed_partitions *) = {
+@@ -46,6 +48,9 @@ static int (*check_part[])(struct parsed
+ #ifdef CONFIG_EFI_PARTITION
+ efi_partition, /* this must come before msdos */
+ #endif
++#ifdef CONFIG_FIT_PARTITION
++ fit_partition,
++#endif
+ #ifdef CONFIG_SGI_PARTITION
+ sgi_partition,
+ #endif
+@@ -694,6 +699,9 @@ static bool blk_add_partition(struct gen
+ (state->parts[p].flags & ADDPART_FLAG_RAID))
+ md_autodetect_dev(part_to_dev(part)->devt);
+
++ if ((state->parts[p].flags & ADDPART_FLAG_ROOTDEV) && ROOT_DEV == 0)
++ ROOT_DEV = part_to_dev(part)->devt;
++
+ return true;
+ }
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -396,7 +396,7 @@ int ubiblock_create(struct ubi_volume_in
+ dev->leb_size = vi->usable_leb_size;
+
+ /* Initialize the gendisk of this ubiblock device */
+- gd = alloc_disk(1);
++ gd = alloc_disk(0);
+ if (!gd) {
+ pr_err("UBI: block: alloc_disk failed\n");
+ ret = -ENODEV;
+@@ -413,6 +413,7 @@ int ubiblock_create(struct ubi_volume_in
+ goto out_put_disk;
+ }
+ gd->private_data = dev;
++ gd->flags |= GENHD_FL_EXT_DEVT;
+ sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
+ set_capacity(gd, disk_capacity);
+ dev->gd = gd;
diff --git a/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
index e5ee2c8656..a2b48fd4fc 100644
--- a/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
+++ b/target/linux/generic/pending-5.10/491-ubi-auto-create-ubiblock-device-for-rootfs.patch
@@ -8,7 +8,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
-@@ -652,6 +652,44 @@ static void __init ubiblock_create_from_
+@@ -652,6 +652,47 @@ static void __init ubiblock_create_from_
}
}
@@ -33,6 +33,9 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
+ desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
+ if (IS_ERR(desc))
++ desc = ubi_open_volume_nm(ubi_num, "fit", UBI_READONLY);;
++
++ if (IS_ERR(desc))
+ continue;
+
+ ubi_get_volume_info(desc, &vi);
--
2.25.1

View File

@@ -0,0 +1,74 @@
From 9f0be984310a4d2bdacf94e53bd198aba3fa8675 Mon Sep 17 00:00:00 2001
From: John Crispin <john@phrozen.org>
Date: Sat, 20 Feb 2021 08:36:43 +0100
Subject: [PATCH 18/22] sysupgrade-nand: allow limiting rootfs_data by setting
env variable
Check if firmware environment variable 'rootfs_data_max' exists and is
set to a numerical value greater than 0. If so, limit rootfs_data
volume to that size instead of using the maximum available size.
This is useful on devices with lots of flash where users may want to
have eg. a volume for persistent logs and statistics or for external
applications/containers. Persistence on rootfs overlay is limited by
the size of memory available during the sysugprade process as that
data needs to be copied to RAM while the volume is being recreated
during sysupgrade. Hence it is unsuitable for keeping larger amounts
of data accross upgrade which makes additional volume(s) for
application data desirable.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
package/base-files/files/lib/upgrade/nand.sh | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/package/base-files/files/lib/upgrade/nand.sh b/package/base-files/files/lib/upgrade/nand.sh
index 5bc9ff83f9..e335d940ed 100644
--- a/package/base-files/files/lib/upgrade/nand.sh
+++ b/package/base-files/files/lib/upgrade/nand.sh
@@ -117,6 +117,9 @@ nand_restore_config() {
nand_upgrade_prepare_ubi() {
local rootfs_length="$1"
local rootfs_type="$2"
+ local rootfs_data_max="$(fw_printenv -n rootfs_data_max 2>/dev/null)"
+ [ -n "$rootfs_data_max" ] && rootfs_data_max=$(printf %d "$rootfs_data_max")
+
local kernel_length="$3"
local has_env="${4:-0}"
@@ -176,11 +179,11 @@ nand_upgrade_prepare_ubi() {
# update rootfs
if [ -n "$rootfs_length" ]; then
- local root_size_param
+ local rootfs_size_param
if [ "$rootfs_type" = "ubifs" ]; then
- root_size_param="-m"
+ rootfs_size_param="-m"
else
- root_size_param="-s $rootfs_length"
+ rootfs_size_param="-s $rootfs_length"
fi
if ! ubimkvol /dev/$ubidev -N $CI_ROOTPART $rootfs_size_param; then
echo "cannot create rootfs volume"
@@ -190,7 +193,16 @@ nand_upgrade_prepare_ubi() {
# create rootfs_data for non-ubifs rootfs
if [ "$rootfs_type" != "ubifs" ]; then
- if ! ubimkvol /dev/$ubidev -N rootfs_data -m; then
+ local availeb=$(cat /sys/devices/virtual/ubi/$ubidev/avail_eraseblocks)
+ local ebsize=$(cat /sys/devices/virtual/ubi/$ubidev/eraseblock_size)
+ local avail_size=$(( $availeb * $ebsize ))
+ local rootfs_data_size_param="-m"
+ if [ -n "$rootfs_data_max" ] &&
+ [ "$rootfs_data_max" != "0" ] &&
+ [ "$rootfs_data_max" -le "$avail_size" ]; then
+ rootfs_data_size_param="-s $rootfs_data_max"
+ fi
+ if ! ubimkvol /dev/$ubidev -N rootfs_data $rootfs_data_size_param; then
echo "cannot initialize rootfs_data volume"
return 1
fi
--
2.25.1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
From ce99ccf70f226ea3aac19120ab8634b8cb6a1478 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Fri, 12 Feb 2021 03:09:39 +0000
Subject: [PATCH 20/22] uboot-envtools: add defaults for linksys-e8450-ubi
Add U-Boot environment configuration for the Linksys E8450 (UBI) to
allow access to the bootloader environment from OpenWrt via
'fw_printenv' and 'fw_setenv'.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
package/boot/uboot-envtools/files/mediatek | 25 ++++++++++++++++++++++
1 file changed, 25 insertions(+)
create mode 100644 package/boot/uboot-envtools/files/mediatek
diff --git a/package/boot/uboot-envtools/files/mediatek b/package/boot/uboot-envtools/files/mediatek
new file mode 100644
index 0000000000..92a04ea73d
--- /dev/null
+++ b/package/boot/uboot-envtools/files/mediatek
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# Copyright (C) 2021 OpenWrt.org
+#
+
+[ -e /etc/config/ubootenv ] && exit 0
+
+touch /etc/config/ubootenv
+
+. /lib/uboot-envtools.sh
+. /lib/functions.sh
+
+board=$(board_name)
+
+case "$board" in
+"linksys,e8450,ubi")
+ ubootenv_add_uci_config "/dev/ubi0_0" "0x0" "0x1f000" "0x1f000" "1"
+ ubootenv_add_uci_config "/dev/ubi0_1" "0x0" "0x1f000" "0x1f000" "1"
+ ;;
+esac
+
+config_load ubootenv
+config_foreach ubootenv_add_app_config ubootenv
+
+exit 0
--
2.25.1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

41
build.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
set -ex
ROOT_PATH=${PWD}
BUILD_DIR=${ROOT_PATH}/openwrt
TARGET=${1}
if [ -z "$1" ]; then
echo "Error: please specify TARGET"
echo "One of: WF194C, ZYXEL_GS1900-10HP"
exit 1
fi
if [ ! "$(ls -A $BUILD_DIR)" ]; then
python3 setup.py --setup || exit 1
else
python3 setup.py --rebase
echo "### OpenWrt repo already setup"
fi
case "${TARGET}" in
WF194C)
TARGET=wf194c
;;
ZYXEL_GS1900-10HP)
TARGET=zyxel_gs1900-10hp
;;
*)
echo "${TARGET} is unknown"
exit 1
;;
esac
cd ${BUILD_DIR}
./scripts/gen_config.py ${TARGET} ucentral-ap || exit 1
cd -
echo "### Building image ..."
cd $BUILD_DIR
make -j$(nproc) V=s 2>&1 | tee build.log
echo "Done"

8
config.yml Normal file
View File

@@ -0,0 +1,8 @@
repo: https://github.com/openwrt/openwrt.git
branch: openwrt-21.02
revision: fdc0342704b692c46ccb65c6372a853ff89094c4
output_dir: ./output
patch_folders:
- backports/
- patches/

9
dock-run.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash -ex
tag=$(echo ${PWD} | tr / - | cut -b2- | tr A-Z a-z)
groups=$(id -G | xargs -n1 echo -n " --group-add ")
params="-v ${PWD}:${PWD} --rm -w ${PWD} -u"$(id -u):$(id -g)" $groups -v/etc/passwd:/etc/passwd:ro -v/etc/group:/etc/group:ro -v$HOME/.gitconfig:$HOME/.gitconfig:ro ${tag}"
docker build --tag=${tag} docker
docker run $params $@

12
docker/Dockerfile Normal file
View File

@@ -0,0 +1,12 @@
FROM ubuntu:20.04
RUN apt-get update \
&& DEBIAN_FRONTEND="noninteractive" apt-get -y install tzdata \
&& apt-get install -y \
time git-core build-essential gcc-multilib \
libncurses5-dev zlib1g-dev gawk flex gettext wget unzip python \
python3 python3-pip python3-yaml openvswitch-common openvswitch-switch libssl-dev rsync \
&& apt-get clean
RUN git config --global user.email "you@example.com"
RUN git config --global user.name "Your Name"
RUN pip3 install kconfiglib

View File

@@ -0,0 +1,53 @@
#
# Copyright (C) 2016 Nordic Semiconductor ASA.
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=bluetooth-6lowpand
PKG_VERSION:=0.0.1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_URL:=https://github.com/NordicSemiconductor/Linux-ble-6lowpan-joiner.git
PKG_SOURCE_VERSION:=5ce5b248846a6d4ac4a609eb0e8d023cf920b247
PKG_SOURCE_PROTO:=git
BLUEZ_DIR:=$(wildcard $(BUILD_DIR)/bluez-*)
TARGET_CFLAGS += -I$(BLUEZ_DIR)
TARGET_LDFLAGS += -L$(BLUEZ_DIR)/lib/.libs/ -L$(BLUEZ_DIR)/src/.libs/ -lshared-mainloop -lbluetooth-internal
include $(INCLUDE_DIR)/package.mk
define Package/bluetooth-6lowpand
SECTION:=base
CATEGORY:=Network
TITLE:=Bluetooth LE 6lowpan joiner daemon
URL:=http://www.nordicsemi.com/
DEPENDS:=+libusb-1.0 +bluez-libs
endef
define Package/bluetooth-6lowpand/description
Bluetooth Low Energy IPSP device scanner and connection daemon.
The Daemon can be used to whitelist certain IPSP Bluetooth LE MAC
addresses, or autoconnect using SSID and Key derived from Wifi AP
setup to authenticate the devices in order to connect. Also, manual
configuration of software SSID and Key can be used.
endef
define Package/bluetooth-6lowpand/install
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/bluetooth_6lowpand.init $(1)/etc/init.d/bluetooth_6lowpand
$(INSTALL_DIR) $(1)/etc/bluetooth
$(INSTALL_DATA) ./files/bluetooth_6lowpand.conf $(1)/etc/bluetooth
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/src/bluetooth_6lowpand $(1)/usr/sbin
endef
$(eval $(call BuildPackage,bluetooth-6lowpand))

View File

@@ -0,0 +1,24 @@
#!/bin/sh /etc/rc.common
START=63
PROG=/usr/sbin/bluetooth_6lowpand
HCICONFIG=/usr/bin/hciconfig
start() {
config_load btle
config_get enable bluetooth_6lowpand enable 0
[ "$enable" -eq 1 ] || return
echo "start bluetooth_6lowpand"
sleep 1
echo 1 > /sys/kernel/debug/bluetooth/6lowpan_enable
sleep 1
killall bluetoothd
sleep 1
$HCICONFIG hci0 reset
$PROG -w 3 -t 5 -a -d
}
stop() {
echo "stop bluetooth_6lowpand"
killall -9 bluetooth_6lowpand
}

View File

@@ -0,0 +1,33 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=bluez-ibeacon
PKG_RELEASE:=1
PKG_SOURCE_URL=https://github.com/blogic/bluez-ibeacon
PKG_SOURCE_PROTO:=git
PKG_SOURCE_DATE:=2022-10-31
PKG_SOURCE_VERSION:=07c082bf3e139ce061ff62a42b7876860256f4ea
PKG_MAINTAINER:=John Crispin <john@phrozen.org>
PKG_LICENSE:=MIT
include $(INCLUDE_DIR)/package.mk
define Package/bluez-ibeacon
SECTION:=utils
CATEGORY:=Utilities
TITLE:=bluez-ibeacon
DEPENDS:=+bluez-libs
endef
define Build/Compile
$(MAKE_VARS) $(MAKE) $(PKG_JOBS) -C $(PKG_BUILD_DIR)/bluez-beacon $(MAKE_FLAGS)
endef
define Package/bluez-ibeacon/install
$(INSTALL_DIR) $(1)/usr/sbin $(1)/etc/init.d
$(INSTALL_BIN) $(PKG_BUILD_DIR)/bluez-beacon/ibeacon $(1)/usr/sbin/
$(INSTALL_BIN) ./files/ibeacon $(1)/etc/init.d/ibeacon
endef
$(eval $(call BuildPackage,bluez-ibeacon))

View File

@@ -0,0 +1,25 @@
#!/bin/sh /etc/rc.common
START=80
USE_PROCD=1
PROG=/usr/sbin/ibeacon
service_triggers() {
procd_add_reload_trigger btle
}
start_service() {
config_load btle
config_get enable ibeacon enable 0
config_get uuid ibeacon uuid 0
config_get major ibeacon major 0
config_get minor ibeacon minor 0
[ "$enable" -eq 1 ] || return
procd_open_instance
procd_set_param command "$PROG" 200 "${uuid}" "${major}" "${minor}" -29
procd_set_param respawn
procd_close_instance
}

View File

@@ -0,0 +1,32 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ubtled
PKG_RELEASE:=1
PKG_SOURCE_URL=https://github.com/blogic/ubtled.git
PKG_SOURCE_PROTO:=git
PKG_SOURCE_DATE:=2022-10-31
PKG_SOURCE_VERSION:=7e01ab86c562fc8ab3777d04e60b8dce596a4c5f
PKG_MAINTAINER:=John Crispin <john@phrozen.org>
PKG_LICENSE:=GPL-2.0
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
define Package/ubtled
SECTION:=utils
CATEGORY:=Utilities
TITLE:=OpenWrt BTLE daemon
DEPENDS:=+libubox +libubus +bluez-libs
endef
define Package/ubtled/install
$(INSTALL_DIR) $(1)/usr/sbin $(1)/etc/{config,init.d,uci-defaults}
$(INSTALL_BIN) $(PKG_BUILD_DIR)/ubtled $(1)/usr/sbin/
$(INSTALL_BIN) ./files/ubtled.init $(1)/etc/init.d/ubtled
$(INSTALL_DATA) ./files/btle.config $(1)/etc/config/btle
$(INSTALL_DATA) ./files/99-btle $(1)/etc/uci-defaults/
endef
$(eval $(call BuildPackage,ubtled))

View File

@@ -0,0 +1,8 @@
#!/bin/sh
cat >> /etc/bluetooth/main.conf <<EOF
[General]
Name = TIP AP
[GATT]
[Policy]
EOF

View File

@@ -0,0 +1,11 @@
config ubtled ubtled
option enable 0
config bluetooth_6lowpand bluetooth_6lowpand
option enable 0
config ubtled ibeacon
option enable 0
option uuid 0
option major 0
option minor 0

View File

@@ -0,0 +1,24 @@
#!/bin/sh /etc/rc.common
START=80
USE_PROCD=1
PROG=/usr/sbin/ubtled
service_triggers() {
procd_add_reload_trigger btle
}
start_service() {
config_load btle
config_get enable ubtled enable 0
[ "$enable" -eq 1 ] || return
hciconfig hci0 up
procd_open_instance
procd_set_param command "$PROG"
procd_set_param respawn
procd_close_instance
}

View File

@@ -0,0 +1,55 @@
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=aq-fw-download
PKG_BRANCH:=master
PKG_VERSION:=1.0
PKG_RELEASE:=1
PKG_BUILD_DIR:=$(BUILD_DIR)/aq-fw-download
include $(INCLUDE_DIR)/package.mk
define AquantiaUtil
define Package/aq-fw-download
SECTION:=utils
CATEGORY:=Utilities
DEPENDS:=@TARGET_ipq806x||TARGET_ipq||TARGET_ipq40xx||TARGET_ipq807x_32||TARGET_ipq807x
TITLE:=Aquantia FW downloader utitlity
endef
define Package/aq-fw-download/description
Aquantia FW downloader utitlity
endef
TARGET_CPPFLAGS := \
-D_GNU_SOURCE \
-I$(LINUX_SRC_DIR)/include \
-I$(LINUX_SRC_DIR)/arch/$(LINUX_KARCH)/include \
-I$(PKG_BUILD_DIR) \
$(TARGET_CPPFLAGS)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
CFLAGS="$(TARGET_CPPFLAGS) $(TARGET_CFLAGS)" \
LDFLAGS="$(TARGET_LDFLAGS)" \
$(MAKE) -C $(PKG_BUILD_DIR) \
$(TARGET_CONFIGURE_OPTS)
endef
define Package/aq-fw-download/install
$(INSTALL_DIR) $$(1)/sbin
$(INSTALL_BIN) $(PKG_BUILD_DIR)/aq-fw-download $$(1)/sbin/aq-fw-download
endef
$$(eval $$(call BuildPackage,aq-fw-download))
endef
#Build/Compile=true
$(eval $(call AquantiaUtil))

View File

@@ -0,0 +1,14 @@
ifndef CFLAGS
CFLAGS = -O2 -g
endif
INCLUDES=-Iinclude -Iinclude/registerMap \
-Iinclude/registerMap/APPIA \
-Iinclude/registerMap/HHD
all: aq-fw-download
%.o: %.c
$(CC) $(INCLUDES) $(CFLAGS) -c -o $@ $^
aq-fw-download: mdioBootLoadCLD.o src/AQ_PhyInterface.o src/AQ_API.o
$(CC) $(LDFLAGS) -o $@ $^ $(LIBS)

View File

@@ -0,0 +1,246 @@
/*
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*! \file
This file contains the AQ_API function and datatype declarations. */
#ifndef AQ_API_TOKEN
#define AQ_API_TOKEN
#include <stdint.h>
#include "AQ_User.h"
#include "AQ_ReturnCodes.h"
/*******************************************************************
General
*******************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
/*! This typedef defines the bool datatype which takes the values
true and false.*/
typedef enum {False = 0, True = 1} AQ_boolean;
/*@}*/
/*******************************************************************
Device Identity
*******************************************************************/
/*! \defgroup deviceIdentity Device Identity
All AQ_API functions accept a parameter identifying the target PHY that
should be acted upon. */
/*@{*/
/*! This enumeration is used to describe the different types of
Aquantia PHY.*/
typedef enum
{
/*! 1/2/4-port package, 40nm architechture.*/
AQ_DEVICE_APPIA,
/*! 1/2/4-port package, 28nm architechture.*/
AQ_DEVICE_HHD
} AQ_API_Device;
/*! This structure is used to specify a particular Aquantia PHY port
within the system.*/
typedef struct
{
/*! The type of Aquantia PHY*/
AQ_API_Device device;
/*! Uniquely identifies the port within the system. AQ_Port must be
defined to whatever data type is suitable for the platform.
AQ_API functions will never do anything with PHY_ID other than
pass it down to the platform's PHY register read/write
functions.*/
AQ_Port PHY_ID;
} AQ_API_Port;
/*@}*/
/*! This function boot-loads the instruction and data memory (IRAM and
DRAM) of a set of Aquantia PHYs from a .cld format image file (the
same image file used to burn the FLASH). During boot-load of each
Aquantia PHY, the processor is halted, and after programming is
complete the processor is released. Note that calling this
function leaves the daisy-chain disabled to prevent RAM over-
write. To exit MDIO boot-load mode, use the function
AQ_API_EnableDaisyChain.
Unlike most of the other functions in this API, this function can
operate on a group of PHYs simultaneously. This is referred to as
gang-loading. To facilitate this, this function takes as
parameters 3 parallel arrays: PHY_IDs, provisioningAddresses, and
resultCodes. The length of these arrays must be identical, and is
specified by the num_PHY_IDs parameter.
In order to check the integrity of the boot-load operation, a
CRC-16 value is calculated over the IRAM and DRAM. After the image
has been loaded, this value is directly compared against each
PHY's Mailbox CRC-16 in 1E.0201.
The value of register 1E.C441 must be the same for all the boot-
loaded PHYs. This will be checked before the boot-load is
performed, and if a non-uniform value is read from any of the
PHYs, the function will fail before any writes are performed.
A separate result code is returned for each of the boot-loaded
PHYs, in the OUT parameter, resultCodes.
Individual Port Return codes:
AQ_RET_BOOTLOAD_PROVADDR_OOR: The specified provisioning address
was outside of the permitted range.
AQ_RET_BOOTLOAD_NONUNIFORM_REGVALS: The values of the register(s)
that must be uniform across the ports being bootloaded were not
uniform.
AQ_RET_BOOTLOAD_CRC_MISMATCH: The image was completely loaded into
memory, but the after the port exited bootload the running
checksum that was read from the uP memory mailbox was not the
expected value. This indicates that the memory has potentially
been corrupted, and the PHY should be reset before trying the
bootload again.
Overall Return codes (the return value from the function call):
AQ_RET_OK: all ports were successfully bootloaded.
AQ_RET_ERROR: One or more ports were not successfully bootloaded.
*/
AQ_Retcode AQ_API_WriteBootLoadImage
(
/*! An array identifying the target PHY ports.*/
AQ_API_Port** ports,
/*! The length of the arrays ports, provisioningAddresses, and
resultCodes. These are parallel arrays, and must all be of the
same length.*/
unsigned int numPorts,
/*! The provisioning addresses of each of the PHYs specified in
ports. This can range from 0 through 47, and is also known as
the daisy-chain address or the hop-count. If the PHYs are
connected to a FLASH using the daisy-chain, this is the distance
from the PHY to the FLASH, and is used to identify customized
provisioning for each PHY from the provisioning data within the
image. Otherwise, it is an arbitrary number. The length of this
array must match the length of ports.*/
unsigned int* provisioningAddresses,
/*! OUT: The result code indicating success or failure of boot-
loading each of the PHYs specified in ports.*/
AQ_Retcode* resultCodes,
/*! A pointer to the size of the image (in bytes) that is being
loaded into the Aquantia PHY.*/
uint32_t* imageSizePointer,
/*! The image being loaded into the Aquantia PHY. This is the same
regardless of whether the target is internal RAM or FLASH.*/
uint8_t* image,
/*! The 5-bit address to be used during the gang-loading operation.
During the boot-loading process, each of the PHYs specified in
ports will be changed such that they are addressed on the MDIO
bus at gangloadAddress. This allows all the PHYs to be loaded
simultaneously. Before returning, each PHY will be moved back to
its original MDIO address. If ports contains only a single
element, callers will probably want to use the PHY's original
MDIO address for this parameter.*/
uint8_t gangload_MDIO_address,
/*! The address of the PHYs while in gangload mode. This is
ultimately some combination of the system address and the
gangload MDIO address, specified by gangload_MDIO_address. For
most platforms, gangload_MDIO_address and gangload_PHY_ID should
have the same value.*/
AQ_API_Port* gangloadPort
);
/*! This function boot-loads the instruction and data memory (IRAM and
DRAM) of a set of Aquantia PHYs from a .cld format image file (the
same image file used to burn the FLASH), as well as a separately
provided provisioning table image file.The provisioning table
image allows additional provisioning to be provided, beyond what
is built in to the .cld image. If provTableSizePointer or
provTableImage are NULL, this function behaves like
AQ_API_WriteBootLoadImage.
Aside from the additional provisioing table, this function behaves
exactly the same as AQ_API_WriteBootLoadImage. For additional
documentation and information on return codes, refer to
AQ_API_WriteBootLoadImage.
Individual Port Return codes (same as AQ_API_WriteBootLoadImage,
plus):
AQ_RET_BOOTLOAD_PROVTABLE_TOO_LARGE: The supplied provisioning
table image does not fit within the alloted space.*/
AQ_Retcode AQ_API_WriteBootLoadImageWithProvTable
(
/*! An array identifying the target PHY ports.*/
AQ_API_Port** ports,
/*! The length of the arrays ports, provisioningAddresses, and
resultCodes. These are parallel arrays, and must all be of the
same length.*/
unsigned int numPorts,
/*! The provisioning addresses of each of the PHYs specified in
ports. This can range from 0 through 47, and is also known as
the daisy-chain address or the hop-count. If the PHYs are
connected to a FLASH using the daisy-chain, this is the distance
from the PHY to the FLASH, and is used to identify customized
provisioning for each PHY from the provisioning data within the
image. Otherwise, it is an arbitrary number. The length of this
array must match the length of ports.*/
unsigned int* provisioningAddresses,
/*! OUT: The result code indicating success or failure of boot-
loading each of the PHYs specified in ports.*/
AQ_Retcode* resultCodes,
/*! A pointer to the size of the image (in bytes) that is being
loaded into the Aquantia PHY.*/
uint32_t* imageSizePointer,
/*! The image being loaded into the Aquantia PHY. This is the same
regardless of whether the target is internal RAM or FLASH.*/
uint8_t* image,
/*! The 5-bit address to be used during the gang-loading operation.
During the boot-loading process, each of the PHYs specified in
ports will be changed such that they are addressed on the MDIO
bus at gangloadAddress. This allows all the PHYs to be loaded
simultaneously. Before returning, each PHY will be moved back to
its original MDIO address. If ports contains only a single
element, callers will probably want to use the PHY's original
MDIO address for this parameter.*/
uint8_t gangload_MDIO_address,
/*! The address of the PHYs while in gangload mode. This is
ultimately some combination of the system address and the
gangload MDIO address, specified by gangload_MDIO_address. For
most platforms, gangload_MDIO_address and gangload_PHY_ID should
have the same value.*/
AQ_API_Port* gangloadPort,
/*! A pointer to the size of the provTableImage (in bytes) that is
being loaded into the Aquantia PHY.*/
uint32_t* provTableSizePointer,
/*! The additional provisioning table image being loaded into the
Aquantia PHY.*/
uint8_t* provTableImage
);
/*! Calling this function disables boot-loading and enables the daisy-
chain. This would typically be called after using MDIO boot-
loading on a daisy-chain enabled PHY. Re-enabling the daisy-chain
after performing an MDIO bootload will cause the PHY to reboot
from FLASH.*/
AQ_Retcode AQ_API_EnableDaisyChain
(
/*! The target PHY port.*/
AQ_API_Port* port
);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,171 @@
/* AQ_PhyInterface.h */
/***********************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Description:
*
* Declares the base PHY register read and write functions that are
* called by the API functions. The platform integrator must provide
* the implementation of these routines.
*
***********************************************************************/
/*! \file
* Declares the base PHY register read and write functions that are
* called by the API functions. The platform integrator must provide
* the implementation of these routines. */
#ifndef AQ_PHY_INTERFACE_TOKEN
#define AQ_PHY_INTERFACE_TOKEN
#include "AQ_API.h"
#include "AQ_User.h"
#ifdef __cplusplus
extern "C" {
#endif
/*******************************************************************
MDIO Access Functions
*******************************************************************/
/*! \defgroup mdioAccessFunctions MDIO Access Functions
The MDIO access functions are required by the API to access the register space
of each Aquantia PHY deployed in a system. The body of these functions needs to
be written by the system designer, as the method of accessing the PHY will
be unique to the target system. They are designed to be generic read and
write access functions, as the MDIO addressing scheme relies on each
MMD to maintain a 16 bit address pointer that determines the register where
the next read or write is coming from. Consequently, various levels of
optimization of the MDIO interface are possible: from re-writing the MMD
address pointer on every transaction, to storing shadow copies of the MMD
address pointers and only updating the MMD address pointer as necessary.
Thus these functions leave the MDIO optimization to the system engineer.
*/
/*@{*/
/*! Provides generic synchronous PHY register write functionality. It is the
* responsibility of the system designer to provide the specific MDIO address
* pointer updates, etc. in order to accomplish this write operation.
* It will be assumed that the write has been completed by the time this
* function returns.*/
void AQ_API_MDIO_Write
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being written. */
unsigned int address,
/*! The 16-bits of data to write to the specified PHY register. */
unsigned int data
);
/*! Provides generic synchronous PHY register read functionality. It is the
* responsibility of the system designer to provide the specific MDIO address
* pointer updates, etc. in order to accomplish this read operation.*/
unsigned int AQ_API_MDIO_Read
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being read. */
unsigned int address
);
#ifdef AQ_PHY_SUPPORTS_BLOCK_READ_WRITE
/*! Provides generic asynchronous/buffered PHY register write functionality.
* It is the responsibility of the system designer to provide the specific
* MDIO address pointer updates, etc. in order to accomplish this write
* operation. The write need not necessarily have been completed by the time
* this function returns. All register reads and writes to a particular PHY_ID
* that are requested by calling AQ_API_MDIO_BlockWrite or AQ_API_MDIO_BlockRead
* MUST be performed in the order that the calls are made. */
void AQ_API_MDIO_BlockWrite
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being written. */
unsigned int address,
/*! The 16-bits of data to write to the specified PHY register. */
unsigned int data
);
/*! Provides generic asynchronous/buffered PHY register read functionality.
* It is the responsibility of the system designer to provide the specific
* MDIO address pointer updates, etc. in order to accomplish this read
* operation. All register reads and writes to a particular PHY_ID that
* are requested by calling AQ_API_MDIO_BlockWrite or AQ_API_MDIO_BlockRead
* MUST be performed in the order that the calls are made. The register value
* may subsequently be fetched by calling AQ_API_MDIO_BlockOperationExecute.*/
void AQ_API_MDIO_BlockRead
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being read. */
unsigned int address
);
/* Retrieve the results of all PHY register reads to PHY_ID previously
* requested via calls to AQ_API_MDIO_BlockRead. The read and write
* operations previously performed by calls to AQ_API_MDIO_BlockRead and
* AQ_API_MDIO_BlockRead must have all been completed by the time this
* function returns, in the order that the calls were performed. The
* return value is an array representing the fetched results of all
* pending calls to AQ_API_MDIO_BlockRead, in the order that the calls
* were performed. Callers should track the number of pending block
* reads to determine the size of the returned array. */
unsigned int * AQ_API_MDIO_BlockOperationExecute
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID
);
/* Returns the maximum number of asynchronous/buffered PHY register
* read/write operations. Callers will call AQ_API_MDIO_BlockOperationExecute
* before issuing additional calls to AQ_API_MDIO_BlockWrite or
* AQ_API_MDIO_BlockRead to avoid a buffer overflow. */
unsigned int AQ_API_MDIO_MaxBlockOperations
(
);
#endif
/*@}*/
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,71 @@
/*AQ_PlatformRoutines.h*/
/************************************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Description:
*
* Declares the platform interface functions that will be called by AQ_API
* functions. The platform integrator must provide the implementation of
* these functions.
*
************************************************************************************/
/*! \file
* Declares the platform interface functions that will be called by AQ_API
* functions. The platform integrator must provide the implementation of
* these functions. */
#ifndef AQ_PHY_PLATFORMROUTINES_TOKEN
#define AQ_PHY_PLATFORMROUTINES_TOKEN
#include <stdint.h>
#include "AQ_API.h"
#include "AQ_User.h"
#include "AQ_ReturnCodes.h"
#ifdef __cplusplus
extern "C" {
#endif
/*******************************************************************
Time Delay
*******************************************************************/
/*! \defgroup delay Time Delay
@{
*/
/*! Returns after at least milliseconds have elapsed. This must be implemented
* in a platform-approriate way. AQ_API functions will call this function to
* block for the specified period of time. If necessary, PHY register reads
* may be performed on port to busy-wait. */
void AQ_API_Wait
(
uint32_t milliseconds, /*!< The delay in milliseconds */
AQ_API_Port* port /*!< The PHY to use if delay reads are necessary*/
);
/*@}*/
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,323 @@
/* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*! \file
This file contains macros for accessing the AQ PHYs' registers
using the device-specific register map data structures and definitions.
*/
#ifndef AQ_REG_MACRO_TOKEN
#define AQ_REG_MACRO_TOKEN
#include "AQ_PhyInterface.h"
#define AQ_API_ReadRegister(id,reg,wd) AQ_API_ReadRegister_DeviceRestricted(APPIA_HHD,id,reg,wd)
#define AQ_API_ReadRegister_DeviceRestricted(devices,id,reg,wd) AQ_API_ReadRegister_Devs_ ## devices(id,reg,wd)
#define AQ_API_ReadRegister_Devs_APPIA(id,reg,wd) \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_Read (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd)) : \
(0))
#define AQ_API_ReadRegister_Devs_HHD(id,reg,wd) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_Read (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd)) : \
(0))
#define AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,wd) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_Read (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd)) : \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_Read (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd)) : \
(0)))
#define AQ_API_ReadRegister_Devs_HHD_APPIA(id,reg,wd) AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,wd)
#define AQ_API_WriteRegister(id,reg,wd,value) AQ_API_WriteRegister_DeviceRestricted(APPIA_HHD,id,reg,wd,value)
#define AQ_API_WriteRegister_DeviceRestricted(devices,id,reg,wd,value) AQ_API_WriteRegister_Devs_ ## devices(id,reg,wd,value)
#define AQ_API_WriteRegister_Devs_APPIA(id,reg,wd,value) \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_Write (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd),value) : \
((void)0))
#define AQ_API_WriteRegister_Devs_HHD(id,reg,wd,value) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_Write (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd),value) : \
((void)0))
#define AQ_API_WriteRegister_Devs_APPIA_HHD(id,reg,wd,value) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_Write (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd),value) : \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_Write (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd),value) : \
((void)0)))
#define AQ_API_WriteRegister_Devs_HHD_APPIA(id,reg,wd,value) AQ_API_WriteRegister_Devs_APPIA_HHD(id,reg,wd,value)
#ifdef AQ_PHY_SUPPORTS_BLOCK_READ_WRITE
#define AQ_API_BlockReadRegister(id,reg,wd) AQ_API_BlockReadRegister_DeviceRestricted(APPIA_HHD,id,reg,wd)
#define AQ_API_BlockReadRegister_DeviceRestricted(devices,id,reg,wd) AQ_API_BlockReadRegister_Devs_ ## devices(id,reg,wd)
#define AQ_API_BlockReadRegister_Devs_APPIA(id,reg,wd) \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_BlockRead (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd)) : \
((void)0))
#define AQ_API_BlockReadRegister_Devs_HHD(id,reg,wd) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_BlockRead (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd)) : \
((void)0))
#define AQ_API_BlockReadRegister_Devs_APPIA_HHD(id,reg,wd) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_BlockRead (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd)) : \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_BlockRead (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd)) : \
((void)0)))
#define AQ_API_BlockReadRegister_Devs_HHD_APPIA(id,reg,wd) AQ_API_BlockReadRegister_Devs_APPIA_HHD(id,reg,wd)
#define AQ_API_BlockWriteRegister(id,reg,wd,value) AQ_API_BlockWriteRegister_DeviceRestricted(APPIA_HHD,id,reg,wd,value)
#define AQ_API_BlockWriteRegister_DeviceRestricted(devices,id,reg,wd,value) AQ_API_BlockWriteRegister_Devs_ ## devices(id,reg,wd,value)
#define AQ_API_BlockWriteRegister_Devs_APPIA(id,reg,wd,value) \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_BlockWrite (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd),value) : \
((void)0))
#define AQ_API_BlockWriteRegister_Devs_HHD(id,reg,wd,value) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_BlockWrite (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd),value) : \
((void)0))
#define AQ_API_BlockWriteRegister_Devs_APPIA_HHD(id,reg,wd,value) \
((port->device == AQ_DEVICE_HHD) ? AQ_API_MDIO_BlockWrite (id,reg ## _HHD_mmdAddress,(reg ## _HHD_baseRegisterAddress + wd),value) : \
((port->device == AQ_DEVICE_APPIA) ? AQ_API_MDIO_BlockWrite (id,reg ## _APPIA_mmdAddress,(reg ## _APPIA_baseRegisterAddress + wd),value) : \
((void)0)))
#define AQ_API_BlockWriteRegister_Devs_HHD_APPIA(id,reg,wd,value) AQ_API_BlockWriteRegister_Devs_APPIA_HHD(id,reg,wd,value)
#endif
#define AQ_API_Variable(reg) AQ_API_Variable_DeviceRestricted(APPIA_HHD,reg)
#define AQ_API_Variable_DeviceRestricted(devices,reg) AQ_API_Variable_Devs_ ## devices(reg)
#define AQ_API_Variable_Devs_APPIA(reg) uint8_t _local ## reg ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _APPIA* _local ## reg ## _APPIA = (reg ## _APPIA*) _local ## reg ## _space; \
#define AQ_API_Variable_Devs_HHD(reg) uint8_t _local ## reg ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _HHD* _local ## reg ## _HHD = (reg ## _HHD*) _local ## reg ## _space; \
#define AQ_API_Variable_Devs_APPIA_HHD(reg) uint8_t _local ## reg ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _APPIA* _local ## reg ## _APPIA = (reg ## _APPIA*) _local ## reg ## _space; \
reg ## _HHD* _local ## reg ## _HHD = (reg ## _HHD*) _local ## reg ## _space; \
#define AQ_API_Variable_Devs_HHD_APPIA(reg) AQ_API_Variable_Devs_APPIA_HHD(reg)
#define AQ_API_DeclareLocalStruct(reg,localvar) AQ_API_DeclareLocalStruct_DeviceRestricted(APPIA_HHD,reg,localvar)
#define AQ_API_DeclareLocalStruct_DeviceRestricted(devices,reg,localvar) AQ_API_DeclareLocalStruct_Devs_ ## devices(reg,localvar)
#define AQ_API_DeclareLocalStruct_Devs_APPIA(reg,localvar) uint8_t localvar ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _APPIA* localvar ## _APPIA = (reg ## _APPIA*) localvar ## _space; \
#define AQ_API_DeclareLocalStruct_Devs_HHD(reg,localvar) uint8_t localvar ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _HHD* localvar ## _HHD = (reg ## _HHD*) localvar ## _space; \
#define AQ_API_DeclareLocalStruct_Devs_APPIA_HHD(reg,localvar) uint8_t localvar ## _space[ sizeof(reg ## _BiggestVersion) ];\
reg ## _APPIA* localvar ## _APPIA = (reg ## _APPIA*) localvar ## _space; \
reg ## _HHD* localvar ## _HHD = (reg ## _HHD*) localvar ## _space; \
#define AQ_API_DeclareLocalStruct_Devs_HHD_APPIA(reg,localvar) AQ_API_DeclareLocalStruct_Devs_APPIA_HHD(reg,localvar)
#define AQ_API_Set(id,reg,field,value) AQ_API_Set_DeviceRestricted(APPIA_HHD,id,reg,field,value)
#define AQ_API_Set_DeviceRestricted(devices,id,reg,field,value) AQ_API_Set_Devs_ ## devices(id,reg,field,value)
#define AQ_API_Set_Devs_APPIA(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_APPIA: \
_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field = AQ_API_ReadRegister_Devs_APPIA(id,reg,reg ## _APPIA_ ## field); \
if (_local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field != value) \
{ \
_local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field = value; \
AQ_API_WriteRegister_Devs_APPIA(id,reg,reg ## _APPIA_ ## field,_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field); \
} \
break; \
default: break; \
} \
}
#define AQ_API_Set_Devs_HHD(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_HHD: \
_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field = AQ_API_ReadRegister_Devs_HHD(id,reg,reg ## _HHD_ ## field); \
if (_local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field != value) \
{ \
_local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field = value; \
AQ_API_WriteRegister_Devs_HHD(id,reg,reg ## _HHD_ ## field,_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field); \
} \
break; \
default: break; \
} \
}
#define AQ_API_Set_Devs_APPIA_HHD(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_APPIA: \
_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field = AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,reg ## _APPIA_ ## field); \
if (_local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field != value) \
{ \
_local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field = value; \
AQ_API_WriteRegister_Devs_APPIA_HHD(id,reg,reg ## _APPIA_ ## field,_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field); \
} \
break; \
case AQ_DEVICE_HHD: \
_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field = AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,reg ## _HHD_ ## field); \
if (_local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field != value) \
{ \
_local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field = value; \
AQ_API_WriteRegister_Devs_APPIA_HHD(id,reg,reg ## _HHD_ ## field,_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field); \
} \
break; \
default: break; \
} \
}
#define AQ_API_Set_Devs_HHD_APPIA(id,reg,field,value) AQ_API_Set_Devs_APPIA_HHD(id,reg,field,value)
#define AQ_API_Get(id,reg,field,value) AQ_API_Get_DeviceRestricted(APPIA_HHD,id,reg,field,value)
#define AQ_API_Get_DeviceRestricted(devices,id,reg,field,value) AQ_API_Get_Devs_ ## devices(id,reg,field,value)
#define AQ_API_Get_Devs_APPIA(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_APPIA: \
_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field = AQ_API_ReadRegister_Devs_APPIA(id,reg,reg ## _APPIA_ ## field); \
value = _local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field; \
break; \
default: value = 0; break; \
} \
}
#define AQ_API_Get_Devs_HHD(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_HHD: \
_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field = AQ_API_ReadRegister_Devs_HHD(id,reg,reg ## _HHD_ ## field); \
value = _local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field; \
break; \
default: value = 0; break; \
} \
}
#define AQ_API_Get_Devs_APPIA_HHD(id,reg,field,value) { \
switch (port->device) { \
case AQ_DEVICE_APPIA: \
_local ## reg ## _APPIA->word_ ## reg ## _APPIA_ ## field = AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,reg ## _APPIA_ ## field); \
value = _local ## reg ## _APPIA->bits_ ## reg ## _APPIA_ ## field.field; \
break; \
case AQ_DEVICE_HHD: \
_local ## reg ## _HHD->word_ ## reg ## _HHD_ ## field = AQ_API_ReadRegister_Devs_APPIA_HHD(id,reg,reg ## _HHD_ ## field); \
value = _local ## reg ## _HHD->bits_ ## reg ## _HHD_ ## field.field; \
break; \
default: value = 0; break; \
} \
}
#define AQ_API_Get_Devs_HHD_APPIA(id,reg,field,value) AQ_API_Get_Devs_APPIA_HHD(id,reg,field,value)
#define AQ_API_BitfieldOfLocalStruct(reg,localvar,field) AQ_API_BitfieldOfLocalStruct_DeviceRestricted(APPIA_HHD,reg,localvar,field)
#define AQ_API_BitfieldOfLocalStruct_DeviceRestricted(devices,reg,localvar,field) AQ_API_BitfieldOfLocalStruct_Devs_ ## devices(reg,localvar,field)
#define AQ_API_BitfieldOfLocalStruct_Devs_APPIA(reg,localvar,field) \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->bits_ ## reg ## _APPIA ## _ ## field.field) : \
(0))
#define AQ_API_BitfieldOfLocalStruct_Devs_HHD(reg,localvar,field) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->bits_ ## reg ## _HHD ## _ ## field.field) : \
(0))
#define AQ_API_BitfieldOfLocalStruct_Devs_APPIA_HHD(reg,localvar,field) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->bits_ ## reg ## _HHD ## _ ## field.field) : \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->bits_ ## reg ## _APPIA ## _ ## field.field) : \
(0)))
#define AQ_API_BitfieldOfLocalStruct_Devs_HHD_APPIA(reg,localvar,field) AQ_API_BitfieldOfLocalStruct_Devs_APPIA_HHD(reg,localvar,field)
#define AQ_API_AssignBitfieldOfLocalStruct(reg,localvar,field,value) AQ_API_AssignBitfieldOfLocalStruct_DeviceRestricted(APPIA_HHD,reg,localvar,field,value)
#define AQ_API_AssignBitfieldOfLocalStruct_DeviceRestricted(devices,reg,localvar,field,value) AQ_API_AssignBitfieldOfLocalStruct_Devs_ ## devices(reg,localvar,field,value)
#define AQ_API_AssignBitfieldOfLocalStruct_Devs_APPIA(reg,localvar,field,value) \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->bits_ ## reg ## _APPIA ## _ ## field.field = value) : \
(0))
#define AQ_API_AssignBitfieldOfLocalStruct_Devs_HHD(reg,localvar,field,value) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->bits_ ## reg ## _HHD ## _ ## field.field = value) : \
(0))
#define AQ_API_AssignBitfieldOfLocalStruct_Devs_APPIA_HHD(reg,localvar,field,value) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->bits_ ## reg ## _HHD ## _ ## field.field = value) : \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->bits_ ## reg ## _APPIA ## _ ## field.field = value) : \
(0)))
#define AQ_API_AssignBitfieldOfLocalStruct_Devs_HHD_APPIA(reg,localvar,field,value) AQ_API_AssignBitfieldOfLocalStruct_Devs_APPIA_HHD(reg,localvar,field,value)
#define AQ_API_WordOfLocalStruct(localvar,wd) AQ_API_WordOfLocalStruct_DeviceRestricted(APPIA_HHD,localvar,wd)
#define AQ_API_WordOfLocalStruct_DeviceRestricted(devices,localvar,wd) AQ_API_WordOfLocalStruct_Devs_ ## devices(localvar,wd)
#define AQ_API_WordOfLocalStruct_Devs_APPIA(localvar,wd) \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->u ## wd.word_ ## wd) : \
(0))
#define AQ_API_WordOfLocalStruct_Devs_HHD(localvar,wd) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->u ## wd.word_ ## wd) : \
(0))
#define AQ_API_WordOfLocalStruct_Devs_APPIA_HHD(localvar,wd) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->u ## wd.word_ ## wd) : \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->u ## wd.word_ ## wd) : \
(0)))
#define AQ_API_WordOfLocalStruct_Devs_HHD_APPIA(localvar,wd) AQ_API_WordOfLocalStruct_Devs_APPIA_HHD(localvar,wd)
#define AQ_API_AssignWordOfLocalStruct(localvar,wd,value) AQ_API_AssignWordOfLocalStruct_DeviceRestricted(APPIA_HHD,localvar,wd,value)
#define AQ_API_AssignWordOfLocalStruct_DeviceRestricted(devices,localvar,wd,value) AQ_API_AssignWordOfLocalStruct_Devs_ ## devices(localvar,wd,value)
#define AQ_API_AssignWordOfLocalStruct_Devs_APPIA(localvar,wd,value) \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->u ## wd.word_ ## wd = value) : \
(0))
#define AQ_API_AssignWordOfLocalStruct_Devs_HHD(localvar,wd,value) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->u ## wd.word_ ## wd = value) : \
(0))
#define AQ_API_AssignWordOfLocalStruct_Devs_APPIA_HHD(localvar,wd,value) \
((port->device == AQ_DEVICE_HHD) ? ((localvar ## _HHD)->u ## wd.word_ ## wd = value) : \
((port->device == AQ_DEVICE_APPIA) ? ((localvar ## _APPIA)->u ## wd.word_ ## wd = value) : \
(0)))
#define AQ_API_AssignWordOfLocalStruct_Devs_HHD_APPIA(localvar,wd,value) AQ_API_AssignWordOfLocalStruct_Devs_APPIA_HHD(localvar,wd,value)
#endif

View File

@@ -0,0 +1,113 @@
/* AQ_ReturnCodes.h */
/************************************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Description:
*
* This file defines the AQ_API functions' integral return codes.
*
*
************************************************************************************/
/*! \file
This file defines the AQ_API functions' integral return codes.
*/
#ifndef AQ_RETURNCODES_TOKEN
#define AQ_RETURNCODES_TOKEN
/*! \defgroup ReturnCodes
@{
*/
/*! Most AQ_API functions return AQ_Retcode to report success or failure.
* The values used are defined as preprocessor symbols in AQ_ReturnCodes.h.
* Callers should prefer to test the return values by equivalence to these
* symbols, rather than using the integer values directly, as these may
* not be stable across releases. The set of possible return codes that may
* be returned by a particular API function can be found in the function's
* documentation, as well as information on how to interpret each of the
* possible return codes. */
typedef unsigned int AQ_Retcode;
/*! \defgroup Success
@{ */
#define AQ_RET_OK 0
/*@}*/
/*! \defgroup GeneralErrors
@{ */
#define AQ_RET_ERROR 100
#define AQ_RET_UP_BUSY_TIMEOUT 101
/*@}*/
/*! \defgroup FunctionSpecificResults
@{ */
#define AQ_RET_FLASH_READY 200
#define AQ_RET_FLASH_READINESS_TIMEOUT 204
#define AQ_RET_FLASHINTF_READY 201
#define AQ_RET_FLASHINTF_NOTREADY 202
#define AQ_RET_FLASHINTF_READINESS_TIMEOUT 203
#define AQ_RET_FLASH_TYPE_UNKNOWN 205
#define AQ_RET_FLASH_TYPE_BAD 206
#define AQ_RET_FLASH_IMAGE_CORRUPT 207
#define AQ_RET_FLASH_IMAGE_TOO_LARGE 208
#define AQ_RET_FLASH_IMAGE_MISMATCH 209
#define AQ_RET_FLASH_PAGE_SIZE_CHANGED 210
#define AQ_RET_BOOTLOAD_PROVADDR_OOR 211
#define AQ_RET_BOOTLOAD_NONUNIFORM_REGVALS 212
#define AQ_RET_BOOTLOAD_CRC_MISMATCH 213
#define AQ_RET_BOOTLOAD_PROVTABLE_TOO_LARGE 228
#define AQ_RET_LOOPBACK_BAD_ENTRY_STATE 214
#define AQ_RET_DEBUGTRACE_FREEZE_TIMEOUT 215
#define AQ_RET_DEBUGTRACE_UNFREEZE_TIMEOUT 216
#define AQ_RET_CABLEDIAG_ALREADY_RUNNING 217
#define AQ_RET_CABLEDIAG_STILL_RUNNING 218
#define AQ_RET_CABLEDIAG_BAD_PAIRSTATUS 219
#define AQ_RET_CABLEDIAG_RESULTS_ALREDY_COLLECTED 220
#define AQ_RET_CABLEDIAG_BAD_NUM_SAMPLES 221
#define AQ_RET_CABLEDIAG_REPORTEDPAIR_MISMATCH 222
#define AQ_RET_CABLEDIAG_REPORTEDPAIR_OOR 223
#define AQ_RET_CABLEDIAG_STARTED_PAIR_B 224
#define AQ_RET_CABLEDIAG_STARTED_PAIR_C 225
#define AQ_RET_CABLEDIAG_STARTED_PAIR_D 226
#define AQ_RET_CABLEDIAG_TXENABLE_MISMATCH 227
#define AQ_RET_SERDESEYE_BAD_SERDES_MODE 229
#define AQ_RET_SERDESEYE_BAD_MEAS_COUNT 230
#define AQ_RET_SERDESEYE_MEAS_TIMEOUT 231
#define AQ_RET_SERDESEYE_LANE_OOR 232
#define AQ_RET_SERDESEYE_COORD_OOR 233
#define AQ_RET_PIFMAILBOX_ERROR 234
#define AQ_RET_PIFMAILBOX_TIMEOUT 235
#define AQ_RET_SEC_TABLE_INDEX_OOR 236
/*@}*/
/*@}*/
#endif

View File

@@ -0,0 +1,97 @@
/*AQ_User.h*/
/************************************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Description:
*
* This file contains preprocessor symbol definitions and type definitions
* for the platform-integrator controlled compile-time AQ_API options.
*
************************************************************************************/
/*! \file
This file contains preprocessor symbol definitions and type definitions
for the platform-integrator controlled compile-time AQ_API options.
*/
#ifndef AQ_USER_TOKEN
#define AQ_USER_TOKEN
/*! \defgroup User User Definitions
This module contains the definitions used to configure AQ_API behavior as desired. */
/*@{*/
/*! Specify the proper data type for AQ_Port. This will depend on the
* platform-specific implementation of the PHY register read/write functions.*/
typedef unsigned int AQ_Port;
/*! If defined, AQ_API functions will print various error and info messages
* to stdout. If not, nothing will be printed and AQ_API.c will NOT include
* stdio.h. */
#define AQ_VERBOSE
/*! If defined, the PHY interface supports block (asynchronous) read/write
* operation. If AQ_PHY_SUPPORTS_BLOCK_READ_WRITE is defined, then
* the API will call the block-operation functions and so implementations
* for each must be provided. If AQ_PHY_SUPPORTS_BLOCK_READ_WRITE is not
* defined, they will not be called, and need not be implemented. */
#undef AQ_PHY_SUPPORTS_BLOCK_READ_WRITE
/*! If defined, time.h exists, and so the associated functions wil be used to
* compute the elapsed time spent in a polling loop, to ensure that the
* maximum time-out period will not be exceeded. If not defined, then
* AQ_MDIO_READS_PER_SECOND will be used to calculate the minimum possible
* elapsed time. */
#define AQ_TIME_T_EXISTS
/*! The maximum number of synchronous PHY register reads that can be performed
* per second. A worst case number can be derived as follows:
*
* AQ_MDIO_READS_PER_SECOND = MDIO Clock Frequency / 64
*
* If using MDIO preamble suppression, multiply this number by 2
*
* For instance, if a 5MHz MDIO clock is being used without preamble supression
* AQ_MDIO_READS_PER_SECOND = 78125
*
* If AQ_TIME_T_EXISTS is defined, this will be ignored and need not be
* defined. If AQ_TIME_T_EXISTS is not defined, this must be defined. */
#define AQ_MDIO_READS_PER_SECOND 78125
/*! If defined, after writing to one of the registers that can trigger a
* processor-intensive MDIO operation, AQ_API functions will poll the
* the "processor intensive MDIO operation in progress" bit and wait for it
* to be zero before proceeding. */
#define AQ_ENABLE_UP_BUSY_CHECKS
/*! If defined, the register map header files containing reverse-packed
* structs will be included. If not, the register map header files containing
* non-reverse-packed structs will be included. The proper choice is typically
* a function of the endianness of the platform; on big-endian systems the
* reverse-packed structs should be used, and on little-endian systems the
* non-reverse-packed structs should be used. */
/*#define AQ_REVERSED_BITFIELD_ORDERING*/
/*@}*/
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,387 @@
/* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef AQ_REG_GROUP_MAX_SIZES
#define AQ_REG_GROUP_MAX_SIZES
#define AQ_Autonegotiation10GBaseT_ControlRegister_BiggestVersion AQ_Autonegotiation10GBaseT_ControlRegister_HHD
#define AQ_Autonegotiation10GBaseT_StatusRegister_BiggestVersion AQ_Autonegotiation10GBaseT_StatusRegister_HHD
#define AQ_AutonegotiationAdvertisementRegister_BiggestVersion AQ_AutonegotiationAdvertisementRegister_HHD
#define AQ_AutonegotiationEeeAdvertisementRegister_BiggestVersion AQ_AutonegotiationEeeAdvertisementRegister_HHD
#define AQ_AutonegotiationEeeLinkPartnerAbilityRegister_BiggestVersion AQ_AutonegotiationEeeLinkPartnerAbilityRegister_HHD
#define AQ_AutonegotiationExtendedNextPageTransmitRegister_BiggestVersion AQ_AutonegotiationExtendedNextPageTransmitRegister_HHD
#define AQ_AutonegotiationExtendedNextPageUnformattedCodeRegister_BiggestVersion AQ_AutonegotiationExtendedNextPageUnformattedCodeRegister_HHD
#define AQ_AutonegotiationLinkPartnerBasePageAbilityRegister_BiggestVersion AQ_AutonegotiationLinkPartnerBasePageAbilityRegister_HHD
#define AQ_AutonegotiationLinkPartnerExtendedNextPageAbilityRegister_BiggestVersion AQ_AutonegotiationLinkPartnerExtendedNextPageAbilityRegister_HHD
#define AQ_AutonegotiationLinkPartnerExtendedNextPageUnformattedCodeRegister_BiggestVersion AQ_AutonegotiationLinkPartnerExtendedNextPageUnformattedCodeRegister_HHD
#define AQ_AutonegotiationReceiveLinkPartnerStatus_BiggestVersion AQ_AutonegotiationReceiveLinkPartnerStatus_HHD
#define AQ_AutonegotiationReceiveReservedVendorProvisioning_BiggestVersion AQ_AutonegotiationReceiveReservedVendorProvisioning_APPIA
#define AQ_AutonegotiationReceiveReservedVendorStatus_BiggestVersion AQ_AutonegotiationReceiveReservedVendorStatus_HHD
#define AQ_AutonegotiationReceiveVendorAlarms_BiggestVersion AQ_AutonegotiationReceiveVendorAlarms_HHD
#define AQ_AutonegotiationReceiveVendorInterruptMask_BiggestVersion AQ_AutonegotiationReceiveVendorInterruptMask_HHD
#define AQ_AutonegotiationReservedVendorProvisioning_BiggestVersion AQ_AutonegotiationReservedVendorProvisioning_HHD
#define AQ_AutonegotiationReservedVendorStatus_BiggestVersion AQ_AutonegotiationReservedVendorStatus_HHD
#define AQ_AutonegotiationStandardControl_1_BiggestVersion AQ_AutonegotiationStandardControl_1_HHD
#define AQ_AutonegotiationStandardDeviceIdentifier_BiggestVersion AQ_AutonegotiationStandardDeviceIdentifier_HHD
#define AQ_AutonegotiationStandardDevicesInPackage_BiggestVersion AQ_AutonegotiationStandardDevicesInPackage_HHD
#define AQ_AutonegotiationStandardInterruptMask_BiggestVersion AQ_AutonegotiationStandardInterruptMask_HHD
#define AQ_AutonegotiationStandardPackageIdentifier_BiggestVersion AQ_AutonegotiationStandardPackageIdentifier_HHD
#define AQ_AutonegotiationStandardStatus_1_BiggestVersion AQ_AutonegotiationStandardStatus_1_HHD
#define AQ_AutonegotiationStandardStatus_2_BiggestVersion AQ_AutonegotiationStandardStatus_2_HHD
#define AQ_AutonegotiationTransmitVendorAlarms_BiggestVersion AQ_AutonegotiationTransmitVendorAlarms_APPIA
#define AQ_AutonegotiationTransmitVendorInterruptMask_BiggestVersion AQ_AutonegotiationTransmitVendorInterruptMask_HHD
#define AQ_AutonegotiationVendorGlobalInterruptFlags_BiggestVersion AQ_AutonegotiationVendorGlobalInterruptFlags_HHD
#define AQ_AutonegotiationVendorProvisioning_BiggestVersion AQ_AutonegotiationVendorProvisioning_HHD
#define AQ_AutonegotiationVendorStatus_BiggestVersion AQ_AutonegotiationVendorStatus_HHD
#define AQ_GbePhyExtendedWolControl_BiggestVersion AQ_GbePhyExtendedWolControl_HHD
#define AQ_GbePhySgmii0RxStatus_BiggestVersion AQ_GbePhySgmii0RxStatus_HHD
#define AQ_GbePhySgmii0TxStatus_BiggestVersion AQ_GbePhySgmii0TxStatus_HHD
#define AQ_GbePhySgmii1RxStatus_BiggestVersion AQ_GbePhySgmii1RxStatus_HHD
#define AQ_GbePhySgmii1TxStatus_BiggestVersion AQ_GbePhySgmii1TxStatus_HHD
#define AQ_GbePhySgmii1WolStatus_BiggestVersion AQ_GbePhySgmii1WolStatus_HHD
#define AQ_GbePhySgmiiRxAlarms_BiggestVersion AQ_GbePhySgmiiRxAlarms_HHD
#define AQ_GbePhySgmiiRxInterruptMask_BiggestVersion AQ_GbePhySgmiiRxInterruptMask_HHD
#define AQ_GbePhySgmiiTestControl_BiggestVersion AQ_GbePhySgmiiTestControl_HHD
#define AQ_GbePhySgmiiTxAlarms_BiggestVersion AQ_GbePhySgmiiTxAlarms_HHD
#define AQ_GbePhySgmiiTxInterruptMask_BiggestVersion AQ_GbePhySgmiiTxInterruptMask_HHD
#define AQ_GbePhySgmiiWolStatus_BiggestVersion AQ_GbePhySgmiiWolStatus_HHD
#define AQ_GbePhyVendorGlobalInterruptFlags_BiggestVersion AQ_GbePhyVendorGlobalInterruptFlags_HHD
#define AQ_GbePhyWolControl_BiggestVersion AQ_GbePhyWolControl_HHD
#define AQ_GbePhysgmii1WolStatus_BiggestVersion AQ_GbePhysgmii1WolStatus_APPIA
#define AQ_GbeReservedProvisioning_BiggestVersion AQ_GbeReservedProvisioning_HHD
#define AQ_GbeStandardDeviceIdentifier_BiggestVersion AQ_GbeStandardDeviceIdentifier_HHD
#define AQ_GbeStandardDevicesInPackage_BiggestVersion AQ_GbeStandardDevicesInPackage_HHD
#define AQ_GbeStandardPackageIdentifier_BiggestVersion AQ_GbeStandardPackageIdentifier_HHD
#define AQ_GbeStandardStatus_2_BiggestVersion AQ_GbeStandardStatus_2_HHD
#define AQ_GbeStandardVendorDevicesInPackage_BiggestVersion AQ_GbeStandardVendorDevicesInPackage_HHD
#define AQ_GlobalAlarms_BiggestVersion AQ_GlobalAlarms_HHD
#define AQ_GlobalCableDiagnosticImpedance_BiggestVersion AQ_GlobalCableDiagnosticImpedance_HHD
#define AQ_GlobalCableDiagnosticStatus_BiggestVersion AQ_GlobalCableDiagnosticStatus_APPIA
#define AQ_GlobalChipIdentification_BiggestVersion AQ_GlobalChipIdentification_APPIA
#define AQ_GlobalChipRevision_BiggestVersion AQ_GlobalChipRevision_APPIA
#define AQ_GlobalChip_wideStandardInterruptFlags_BiggestVersion AQ_GlobalChip_wideStandardInterruptFlags_HHD
#define AQ_GlobalChip_wideVendorInterruptFlags_BiggestVersion AQ_GlobalChip_wideVendorInterruptFlags_HHD
#define AQ_GlobalControl_BiggestVersion AQ_GlobalControl_HHD
#define AQ_GlobalDaisyChainStatus_BiggestVersion AQ_GlobalDaisyChainStatus_HHD
#define AQ_GlobalDiagnosticProvisioning_BiggestVersion AQ_GlobalDiagnosticProvisioning_HHD
#define AQ_GlobalEeeProvisioning_BiggestVersion AQ_GlobalEeeProvisioning_HHD
#define AQ_GlobalFaultMessage_BiggestVersion AQ_GlobalFaultMessage_HHD
#define AQ_GlobalFirmwareID_BiggestVersion AQ_GlobalFirmwareID_HHD
#define AQ_GlobalGeneralProvisioning_BiggestVersion AQ_GlobalGeneralProvisioning_HHD
#define AQ_GlobalGeneralStatus_BiggestVersion AQ_GlobalGeneralStatus_HHD
#define AQ_GlobalInterruptChip_wideStandardMask_BiggestVersion AQ_GlobalInterruptChip_wideStandardMask_HHD
#define AQ_GlobalInterruptChip_wideVendorMask_BiggestVersion AQ_GlobalInterruptChip_wideVendorMask_HHD
#define AQ_GlobalInterruptMask_BiggestVersion AQ_GlobalInterruptMask_HHD
#define AQ_GlobalLedProvisioning_BiggestVersion AQ_GlobalLedProvisioning_HHD
#define AQ_GlobalMailboxInterface_BiggestVersion AQ_GlobalMailboxInterface_HHD
#define AQ_GlobalMicroprocessorScratchPad_BiggestVersion AQ_GlobalMicroprocessorScratchPad_HHD
#define AQ_GlobalNvrInterface_BiggestVersion AQ_GlobalNvrInterface_HHD
#define AQ_GlobalNvrProvisioning_BiggestVersion AQ_GlobalNvrProvisioning_HHD
#define AQ_GlobalPinStatus_BiggestVersion AQ_GlobalPinStatus_HHD
#define AQ_GlobalPrimaryStatus_BiggestVersion AQ_GlobalPrimaryStatus_APPIA
#define AQ_GlobalReservedProvisioning_BiggestVersion AQ_GlobalReservedProvisioning_HHD
#define AQ_GlobalReservedStatus_BiggestVersion AQ_GlobalReservedStatus_HHD
#define AQ_GlobalResetControl_BiggestVersion AQ_GlobalResetControl_HHD
#define AQ_GlobalSmbus_0Provisioning_BiggestVersion AQ_GlobalSmbus_0Provisioning_HHD
#define AQ_GlobalSmbus_1Provisioning_BiggestVersion AQ_GlobalSmbus_1Provisioning_HHD
#define AQ_GlobalStandardControl_1_BiggestVersion AQ_GlobalStandardControl_1_HHD
#define AQ_GlobalStandardDeviceIdentifier_BiggestVersion AQ_GlobalStandardDeviceIdentifier_HHD
#define AQ_GlobalStandardDevicesInPackage_BiggestVersion AQ_GlobalStandardDevicesInPackage_HHD
#define AQ_GlobalStandardPackageIdentifier_BiggestVersion AQ_GlobalStandardPackageIdentifier_HHD
#define AQ_GlobalStandardStatus_2_BiggestVersion AQ_GlobalStandardStatus_2_HHD
#define AQ_GlobalStandardVendorDevicesInPackage_BiggestVersion AQ_GlobalStandardVendorDevicesInPackage_HHD
#define AQ_GlobalStatus_BiggestVersion AQ_GlobalStatus_HHD
#define AQ_GlobalThermalProvisioning_BiggestVersion AQ_GlobalThermalProvisioning_HHD
#define AQ_GlobalThermalStatus_BiggestVersion AQ_GlobalThermalStatus_HHD
#define AQ_Kr0AutonegotiationAdvertisementWord_BiggestVersion AQ_Kr0AutonegotiationAdvertisementWord_HHD
#define AQ_Kr0AutonegotiationControl_BiggestVersion AQ_Kr0AutonegotiationControl_HHD
#define AQ_Kr0AutonegotiationExtendedNextPageAdvertisementWord_BiggestVersion AQ_Kr0AutonegotiationExtendedNextPageAdvertisementWord_HHD
#define AQ_Kr0AutonegotiationStatus_BiggestVersion AQ_Kr0AutonegotiationStatus_HHD
#define AQ_Kr0LinkPartnerAutonegotiationAdvertisementWord_BiggestVersion AQ_Kr0LinkPartnerAutonegotiationAdvertisementWord_HHD
#define AQ_Kr0LinkPartnerAutonegotiationExtendedNextPageAdvertisementWord_BiggestVersion AQ_Kr0LinkPartnerAutonegotiationExtendedNextPageAdvertisementWord_HHD
#define AQ_Kr1AutonegotiationAdvertisementWord_BiggestVersion AQ_Kr1AutonegotiationAdvertisementWord_HHD
#define AQ_Kr1AutonegotiationControl_BiggestVersion AQ_Kr1AutonegotiationControl_HHD
#define AQ_Kr1AutonegotiationExtendedNextPageAdvertisementWord_BiggestVersion AQ_Kr1AutonegotiationExtendedNextPageAdvertisementWord_HHD
#define AQ_Kr1AutonegotiationStatus_BiggestVersion AQ_Kr1AutonegotiationStatus_HHD
#define AQ_Kr1LinkPartnerAutonegotiationAdvertisementWord_BiggestVersion AQ_Kr1LinkPartnerAutonegotiationAdvertisementWord_HHD
#define AQ_Kr1LinkPartnerAutonegotiationExtendedNextPageAdvertisementWord_BiggestVersion AQ_Kr1LinkPartnerAutonegotiationExtendedNextPageAdvertisementWord_HHD
#define AQ_MsmLineFifoControlRegister_BiggestVersion AQ_MsmLineFifoControlRegister_HHD
#define AQ_MsmLineGeneralControlRegister_BiggestVersion AQ_MsmLineGeneralControlRegister_HHD
#define AQ_MsmLineGeneralStatusRegister_BiggestVersion AQ_MsmLineGeneralStatusRegister_HHD
#define AQ_MsmLineRxAlignmentErrorsCounterRegister_BiggestVersion AQ_MsmLineRxAlignmentErrorsCounterRegister_HHD
#define AQ_MsmLineRxBroadcastFramesCounterRegister_BiggestVersion AQ_MsmLineRxBroadcastFramesCounterRegister_HHD
#define AQ_MsmLineRxErrorsCounterRegister_BiggestVersion AQ_MsmLineRxErrorsCounterRegister_HHD
#define AQ_MsmLineRxFcsErrorsCounterRegister_BiggestVersion AQ_MsmLineRxFcsErrorsCounterRegister_HHD
#define AQ_MsmLineRxGoodFramesCounterRegister_BiggestVersion AQ_MsmLineRxGoodFramesCounterRegister_HHD
#define AQ_MsmLineRxInRangeLengthErrorsCounterRegister_BiggestVersion AQ_MsmLineRxInRangeLengthErrorsCounterRegister_HHD
#define AQ_MsmLineRxMulticastFramesCounterRegister_BiggestVersion AQ_MsmLineRxMulticastFramesCounterRegister_HHD
#define AQ_MsmLineRxOctetsCounterRegister_BiggestVersion AQ_MsmLineRxOctetsCounterRegister_HHD
#define AQ_MsmLineRxPauseFramesCounterRegister_BiggestVersion AQ_MsmLineRxPauseFramesCounterRegister_HHD
#define AQ_MsmLineRxTooLongErrorsCounterRegister_BiggestVersion AQ_MsmLineRxTooLongErrorsCounterRegister_HHD
#define AQ_MsmLineRxUnicastFramesCounterRegister_BiggestVersion AQ_MsmLineRxUnicastFramesCounterRegister_HHD
#define AQ_MsmLineRxVlanFramesCounterRegister_BiggestVersion AQ_MsmLineRxVlanFramesCounterRegister_HHD
#define AQ_MsmLineTxBroadcastFramesCounterRegister_BiggestVersion AQ_MsmLineTxBroadcastFramesCounterRegister_HHD
#define AQ_MsmLineTxErrorsCounterRegister_BiggestVersion AQ_MsmLineTxErrorsCounterRegister_HHD
#define AQ_MsmLineTxGoodFramesCounterRegister_BiggestVersion AQ_MsmLineTxGoodFramesCounterRegister_HHD
#define AQ_MsmLineTxIpgControlRegister_BiggestVersion AQ_MsmLineTxIpgControlRegister_HHD
#define AQ_MsmLineTxMulticastFramesCounterRegister_BiggestVersion AQ_MsmLineTxMulticastFramesCounterRegister_HHD
#define AQ_MsmLineTxOctetsCounterRegister_BiggestVersion AQ_MsmLineTxOctetsCounterRegister_HHD
#define AQ_MsmLineTxPauseFramesCounterRegister_BiggestVersion AQ_MsmLineTxPauseFramesCounterRegister_HHD
#define AQ_MsmLineTxUnicastFramesCounterRegister_BiggestVersion AQ_MsmLineTxUnicastFramesCounterRegister_HHD
#define AQ_MsmLineTxVlanFramesCounterRegister_BiggestVersion AQ_MsmLineTxVlanFramesCounterRegister_HHD
#define AQ_MsmSystemFifoControlRegister_BiggestVersion AQ_MsmSystemFifoControlRegister_HHD
#define AQ_MsmSystemGeneralControlRegister_BiggestVersion AQ_MsmSystemGeneralControlRegister_HHD
#define AQ_MsmSystemGeneralStatusRegister_BiggestVersion AQ_MsmSystemGeneralStatusRegister_HHD
#define AQ_MsmSystemRxAlignmentErrorsCounterRegister_BiggestVersion AQ_MsmSystemRxAlignmentErrorsCounterRegister_HHD
#define AQ_MsmSystemRxBroadcastFramesCounterRegister_BiggestVersion AQ_MsmSystemRxBroadcastFramesCounterRegister_HHD
#define AQ_MsmSystemRxErrorsCounterRegister_BiggestVersion AQ_MsmSystemRxErrorsCounterRegister_HHD
#define AQ_MsmSystemRxFcsErrorsCounterRegister_BiggestVersion AQ_MsmSystemRxFcsErrorsCounterRegister_HHD
#define AQ_MsmSystemRxGoodFramesCounterRegister_BiggestVersion AQ_MsmSystemRxGoodFramesCounterRegister_HHD
#define AQ_MsmSystemRxInRangeLengthErrorsCounterRegister_BiggestVersion AQ_MsmSystemRxInRangeLengthErrorsCounterRegister_HHD
#define AQ_MsmSystemRxMulticastFramesCounterRegister_BiggestVersion AQ_MsmSystemRxMulticastFramesCounterRegister_HHD
#define AQ_MsmSystemRxOctetsCounterRegister_BiggestVersion AQ_MsmSystemRxOctetsCounterRegister_HHD
#define AQ_MsmSystemRxPauseFramesCounterRegister_BiggestVersion AQ_MsmSystemRxPauseFramesCounterRegister_HHD
#define AQ_MsmSystemRxTooLongErrorsCounterRegister_BiggestVersion AQ_MsmSystemRxTooLongErrorsCounterRegister_HHD
#define AQ_MsmSystemRxUnicastFramesCounterRegister_BiggestVersion AQ_MsmSystemRxUnicastFramesCounterRegister_HHD
#define AQ_MsmSystemRxVlanFramesCounterRegister_BiggestVersion AQ_MsmSystemRxVlanFramesCounterRegister_HHD
#define AQ_MsmSystemTxBroadcastFramesCounterRegister_BiggestVersion AQ_MsmSystemTxBroadcastFramesCounterRegister_HHD
#define AQ_MsmSystemTxErrorsCounterRegister_BiggestVersion AQ_MsmSystemTxErrorsCounterRegister_HHD
#define AQ_MsmSystemTxGoodFramesCounterRegister_BiggestVersion AQ_MsmSystemTxGoodFramesCounterRegister_HHD
#define AQ_MsmSystemTxIpgControlRegister_BiggestVersion AQ_MsmSystemTxIpgControlRegister_HHD
#define AQ_MsmSystemTxMulticastFramesCounterRegister_BiggestVersion AQ_MsmSystemTxMulticastFramesCounterRegister_HHD
#define AQ_MsmSystemTxOctetsCounterRegister_BiggestVersion AQ_MsmSystemTxOctetsCounterRegister_HHD
#define AQ_MsmSystemTxPauseFramesCounterRegister_BiggestVersion AQ_MsmSystemTxPauseFramesCounterRegister_HHD
#define AQ_MsmSystemTxUnicastFramesCounterRegister_BiggestVersion AQ_MsmSystemTxUnicastFramesCounterRegister_HHD
#define AQ_MsmSystemTxVlanFramesCounterRegister_BiggestVersion AQ_MsmSystemTxVlanFramesCounterRegister_HHD
#define AQ_MssEgressControlRegister_BiggestVersion AQ_MssEgressControlRegister_HHD
#define AQ_MssEgressEccInterruptStatusRegister_BiggestVersion AQ_MssEgressEccInterruptStatusRegister_HHD
#define AQ_MssEgressInterruptMaskRegister_BiggestVersion AQ_MssEgressInterruptMaskRegister_HHD
#define AQ_MssEgressInterruptStatusRegister_BiggestVersion AQ_MssEgressInterruptStatusRegister_HHD
#define AQ_MssEgressLutAddressControlRegister_BiggestVersion AQ_MssEgressLutAddressControlRegister_HHD
#define AQ_MssEgressLutControlRegister_BiggestVersion AQ_MssEgressLutControlRegister_HHD
#define AQ_MssEgressLutDataControlRegister_BiggestVersion AQ_MssEgressLutDataControlRegister_HHD
#define AQ_MssEgressMtuSizeControlRegister_BiggestVersion AQ_MssEgressMtuSizeControlRegister_HHD
#define AQ_MssEgressPnControlRegister_BiggestVersion AQ_MssEgressPnControlRegister_HHD
#define AQ_MssEgressSaExpiredStatusRegister_BiggestVersion AQ_MssEgressSaExpiredStatusRegister_HHD
#define AQ_MssEgressSaThresholdExpiredStatusRegister_BiggestVersion AQ_MssEgressSaThresholdExpiredStatusRegister_HHD
#define AQ_MssEgressVlanControlRegister_BiggestVersion AQ_MssEgressVlanControlRegister_HHD
#define AQ_MssEgressVlanTpid_0Register_BiggestVersion AQ_MssEgressVlanTpid_0Register_HHD
#define AQ_MssEgressVlanTpid_1Register_BiggestVersion AQ_MssEgressVlanTpid_1Register_HHD
#define AQ_MssIngressControlRegister_BiggestVersion AQ_MssIngressControlRegister_HHD
#define AQ_MssIngressEccInterruptStatusRegister_BiggestVersion AQ_MssIngressEccInterruptStatusRegister_HHD
#define AQ_MssIngressInterruptMaskRegister_BiggestVersion AQ_MssIngressInterruptMaskRegister_HHD
#define AQ_MssIngressInterruptStatusRegister_BiggestVersion AQ_MssIngressInterruptStatusRegister_HHD
#define AQ_MssIngressLutAddressControlRegister_BiggestVersion AQ_MssIngressLutAddressControlRegister_HHD
#define AQ_MssIngressLutControlRegister_BiggestVersion AQ_MssIngressLutControlRegister_HHD
#define AQ_MssIngressLutDataControlRegister_BiggestVersion AQ_MssIngressLutDataControlRegister_HHD
#define AQ_MssIngressMtuSizeControlRegister_BiggestVersion AQ_MssIngressMtuSizeControlRegister_HHD
#define AQ_MssIngressSaControlRegister_BiggestVersion AQ_MssIngressSaControlRegister_HHD
#define AQ_MssIngressSaExpiredStatusRegister_BiggestVersion AQ_MssIngressSaExpiredStatusRegister_HHD
#define AQ_MssIngressSaIcvErrorStatusRegister_BiggestVersion AQ_MssIngressSaIcvErrorStatusRegister_HHD
#define AQ_MssIngressSaReplayErrorStatusRegister_BiggestVersion AQ_MssIngressSaReplayErrorStatusRegister_HHD
#define AQ_MssIngressSaThresholdExpiredStatusRegister_BiggestVersion AQ_MssIngressSaThresholdExpiredStatusRegister_HHD
#define AQ_MssIngressVlanControlRegister_BiggestVersion AQ_MssIngressVlanControlRegister_HHD
#define AQ_MssIngressVlanTpid_0Register_BiggestVersion AQ_MssIngressVlanTpid_0Register_HHD
#define AQ_MssIngressVlanTpid_1Register_BiggestVersion AQ_MssIngressVlanTpid_1Register_HHD
#define AQ_Pcs10GBaseT_Status_BiggestVersion AQ_Pcs10GBaseT_Status_APPIA
#define AQ_Pcs10G_Status_BiggestVersion AQ_Pcs10G_Status_HHD
#define AQ_Pcs10G_base_rPcsTest_patternControl_BiggestVersion AQ_Pcs10G_base_rPcsTest_patternControl_HHD
#define AQ_Pcs10G_base_rPcsTest_patternErrorCounter_BiggestVersion AQ_Pcs10G_base_rPcsTest_patternErrorCounter_HHD
#define AQ_Pcs10G_base_rTestPatternSeedA_BiggestVersion AQ_Pcs10G_base_rTestPatternSeedA_HHD
#define AQ_Pcs10G_base_rTestPatternSeedB_BiggestVersion AQ_Pcs10G_base_rTestPatternSeedB_HHD
#define AQ_PcsEeeCapabilityRegister_BiggestVersion AQ_PcsEeeCapabilityRegister_HHD
#define AQ_PcsEeeWakeErrorCounter_BiggestVersion AQ_PcsEeeWakeErrorCounter_HHD
#define AQ_PcsReceiveStandardInterruptMask_BiggestVersion AQ_PcsReceiveStandardInterruptMask_APPIA
#define AQ_PcsReceiveVendorAlarms_BiggestVersion AQ_PcsReceiveVendorAlarms_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_1IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_1IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_2IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_2IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_3IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_3IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_4IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_4IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_5IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_5IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_6IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_6IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_7IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_7IterationCounter_HHD
#define AQ_PcsReceiveVendorCorrectedFrame_8IterationCounter_BiggestVersion AQ_PcsReceiveVendorCorrectedFrame_8IterationCounter_HHD
#define AQ_PcsReceiveVendorCrc_8ErrorCounter_BiggestVersion AQ_PcsReceiveVendorCrc_8ErrorCounter_HHD
#define AQ_PcsReceiveVendorDebug_BiggestVersion AQ_PcsReceiveVendorDebug_HHD
#define AQ_PcsReceiveVendorFcsErrorFrameCounter_BiggestVersion AQ_PcsReceiveVendorFcsErrorFrameCounter_HHD
#define AQ_PcsReceiveVendorFcsNoErrorFrameCounter_BiggestVersion AQ_PcsReceiveVendorFcsNoErrorFrameCounter_HHD
#define AQ_PcsReceiveVendorInterruptMask_BiggestVersion AQ_PcsReceiveVendorInterruptMask_HHD
#define AQ_PcsReceiveVendorProvisioning_BiggestVersion AQ_PcsReceiveVendorProvisioning_HHD
#define AQ_PcsReceiveVendorState_BiggestVersion AQ_PcsReceiveVendorState_HHD
#define AQ_PcsReceiveVendorUncorrectedFrameCounter_BiggestVersion AQ_PcsReceiveVendorUncorrectedFrameCounter_HHD
#define AQ_PcsReceiveXfi0Provisioning_BiggestVersion AQ_PcsReceiveXfi0Provisioning_HHD
#define AQ_PcsReceiveXfi0VendorState_BiggestVersion AQ_PcsReceiveXfi0VendorState_HHD
#define AQ_PcsReceiveXfi1Provisioning_BiggestVersion AQ_PcsReceiveXfi1Provisioning_HHD
#define AQ_PcsReceiveXfi1VendorState_BiggestVersion AQ_PcsReceiveXfi1VendorState_HHD
#define AQ_PcsSerdesMuxSwapTxrxRegister_BiggestVersion AQ_PcsSerdesMuxSwapTxrxRegister_HHD
#define AQ_PcsStandardControl_1_BiggestVersion AQ_PcsStandardControl_1_HHD
#define AQ_PcsStandardControl_2_BiggestVersion AQ_PcsStandardControl_2_HHD
#define AQ_PcsStandardDeviceIdentifier_BiggestVersion AQ_PcsStandardDeviceIdentifier_HHD
#define AQ_PcsStandardDevicesInPackage_BiggestVersion AQ_PcsStandardDevicesInPackage_HHD
#define AQ_PcsStandardInterruptMask_BiggestVersion AQ_PcsStandardInterruptMask_HHD
#define AQ_PcsStandardPackageIdentifier_BiggestVersion AQ_PcsStandardPackageIdentifier_HHD
#define AQ_PcsStandardSpeedAbility_BiggestVersion AQ_PcsStandardSpeedAbility_HHD
#define AQ_PcsStandardStatus_1_BiggestVersion AQ_PcsStandardStatus_1_HHD
#define AQ_PcsStandardStatus_2_BiggestVersion AQ_PcsStandardStatus_2_HHD
#define AQ_PcsTransmitReservedVendorProvisioning_BiggestVersion AQ_PcsTransmitReservedVendorProvisioning_HHD
#define AQ_PcsTransmitVendorAlarms_BiggestVersion AQ_PcsTransmitVendorAlarms_APPIA
#define AQ_PcsTransmitVendorDebug_BiggestVersion AQ_PcsTransmitVendorDebug_HHD
#define AQ_PcsTransmitVendorFcsErrorFrameCounter_BiggestVersion AQ_PcsTransmitVendorFcsErrorFrameCounter_HHD
#define AQ_PcsTransmitVendorFcsNoErrorFrameCounter_BiggestVersion AQ_PcsTransmitVendorFcsNoErrorFrameCounter_HHD
#define AQ_PcsTransmitVendorInterruptMask_BiggestVersion AQ_PcsTransmitVendorInterruptMask_APPIA
#define AQ_PcsTransmitVendorProvisioning_BiggestVersion AQ_PcsTransmitVendorProvisioning_HHD
#define AQ_PcsTransmitXfi0VendorProvisioning_BiggestVersion AQ_PcsTransmitXfi0VendorProvisioning_HHD
#define AQ_PcsTransmitXfi0VendorState_BiggestVersion AQ_PcsTransmitXfi0VendorState_HHD
#define AQ_PcsTransmitXfi1VendorProvisioning_BiggestVersion AQ_PcsTransmitXfi1VendorProvisioning_HHD
#define AQ_PcsTransmitXfi1VendorState_BiggestVersion AQ_PcsTransmitXfi1VendorState_HHD
#define AQ_PcsTransmitXfiVendorProvisioning_BiggestVersion AQ_PcsTransmitXfiVendorProvisioning_HHD
#define AQ_PcsTransmitXgsVendorState_BiggestVersion AQ_PcsTransmitXgsVendorState_HHD
#define AQ_PcsVendorGlobalInterruptFlags_BiggestVersion AQ_PcsVendorGlobalInterruptFlags_HHD
#define AQ_PhyXS_EeeCapabilityRegister_BiggestVersion AQ_PhyXS_EeeCapabilityRegister_HHD
#define AQ_PhyXS_EeeWakeErrorCounter_BiggestVersion AQ_PhyXS_EeeWakeErrorCounter_HHD
#define AQ_PhyXS_Receive_xauiTx_PcsStatus_BiggestVersion AQ_PhyXS_Receive_xauiTx_PcsStatus_HHD
#define AQ_PhyXS_Receive_xauiTx_ReservedVendorProvisioning_BiggestVersion AQ_PhyXS_Receive_xauiTx_ReservedVendorProvisioning_HHD
#define AQ_PhyXS_Receive_xauiTx_VendorAlarms_BiggestVersion AQ_PhyXS_Receive_xauiTx_VendorAlarms_HHD
#define AQ_PhyXS_Receive_xauiTx_VendorDebug_BiggestVersion AQ_PhyXS_Receive_xauiTx_VendorDebug_HHD
#define AQ_PhyXS_Receive_xauiTx_VendorInterruptMask_BiggestVersion AQ_PhyXS_Receive_xauiTx_VendorInterruptMask_HHD
#define AQ_PhyXS_SerdesConfiguration_BiggestVersion AQ_PhyXS_SerdesConfiguration_HHD
#define AQ_PhyXS_SerdesLane_0Configuration_BiggestVersion AQ_PhyXS_SerdesLane_0Configuration_HHD
#define AQ_PhyXS_SerdesLane_1Configuration_BiggestVersion AQ_PhyXS_SerdesLane_1Configuration_HHD
#define AQ_PhyXS_SerdesLane_2Configuration_BiggestVersion AQ_PhyXS_SerdesLane_2Configuration_HHD
#define AQ_PhyXS_SerdesLane_3Configuration_BiggestVersion AQ_PhyXS_SerdesLane_3Configuration_HHD
#define AQ_PhyXS_SerdesLut_BiggestVersion AQ_PhyXS_SerdesLut_HHD
#define AQ_PhyXS_StandardControl_1_BiggestVersion AQ_PhyXS_StandardControl_1_HHD
#define AQ_PhyXS_StandardDeviceIdentifier_BiggestVersion AQ_PhyXS_StandardDeviceIdentifier_HHD
#define AQ_PhyXS_StandardDevicesInPackage_BiggestVersion AQ_PhyXS_StandardDevicesInPackage_HHD
#define AQ_PhyXS_StandardPackageIdentifier_BiggestVersion AQ_PhyXS_StandardPackageIdentifier_HHD
#define AQ_PhyXS_StandardSpeedAbility_BiggestVersion AQ_PhyXS_StandardSpeedAbility_HHD
#define AQ_PhyXS_StandardStatus_1_BiggestVersion AQ_PhyXS_StandardStatus_1_HHD
#define AQ_PhyXS_StandardStatus_2_BiggestVersion AQ_PhyXS_StandardStatus_2_HHD
#define AQ_PhyXS_StandardXGXS_LaneStatus_BiggestVersion AQ_PhyXS_StandardXGXS_LaneStatus_HHD
#define AQ_PhyXS_StandardXGXS_TestControl_BiggestVersion AQ_PhyXS_StandardXGXS_TestControl_HHD
#define AQ_PhyXS_SystemInterfaceConnectionStatus_BiggestVersion AQ_PhyXS_SystemInterfaceConnectionStatus_HHD
#define AQ_PhyXS_Transmit_xauiRx_PcsStatus_BiggestVersion AQ_PhyXS_Transmit_xauiRx_PcsStatus_HHD
#define AQ_PhyXS_Transmit_xauiRx_ReservedVendorProvisioning_BiggestVersion AQ_PhyXS_Transmit_xauiRx_ReservedVendorProvisioning_HHD
#define AQ_PhyXS_Transmit_xauiRx_ReservedVendorState_BiggestVersion AQ_PhyXS_Transmit_xauiRx_ReservedVendorState_HHD
#define AQ_PhyXS_Transmit_xauiRx_StandardInterruptMask_BiggestVersion AQ_PhyXS_Transmit_xauiRx_StandardInterruptMask_HHD
#define AQ_PhyXS_Transmit_xauiRx_TestPatternErrorCounter_BiggestVersion AQ_PhyXS_Transmit_xauiRx_TestPatternErrorCounter_HHD
#define AQ_PhyXS_Transmit_xauiRx_VendorAlarms_BiggestVersion AQ_PhyXS_Transmit_xauiRx_VendorAlarms_HHD
#define AQ_PhyXS_Transmit_xauiRx_VendorDebug_BiggestVersion AQ_PhyXS_Transmit_xauiRx_VendorDebug_HHD
#define AQ_PhyXS_Transmit_xauiRx_VendorInterruptMask_BiggestVersion AQ_PhyXS_Transmit_xauiRx_VendorInterruptMask_HHD
#define AQ_PhyXS_VendorGlobalInterruptFlags_BiggestVersion AQ_PhyXS_VendorGlobalInterruptFlags_HHD
#define AQ_PifMailboxControl_BiggestVersion AQ_PifMailboxControl_HHD
#define AQ_Pma10GBaseT_FastRetrainStatusAndControl_BiggestVersion AQ_Pma10GBaseT_FastRetrainStatusAndControl_HHD
#define AQ_Pma10GBaseT_PairSwapAndPolarityStatus_BiggestVersion AQ_Pma10GBaseT_PairSwapAndPolarityStatus_HHD
#define AQ_Pma10GBaseT_ReceiveSignalPowerChannelA_BiggestVersion AQ_Pma10GBaseT_ReceiveSignalPowerChannelA_HHD
#define AQ_Pma10GBaseT_ReceiveSignalPowerChannelB_BiggestVersion AQ_Pma10GBaseT_ReceiveSignalPowerChannelB_HHD
#define AQ_Pma10GBaseT_ReceiveSignalPowerChannelC_BiggestVersion AQ_Pma10GBaseT_ReceiveSignalPowerChannelC_HHD
#define AQ_Pma10GBaseT_ReceiveSignalPowerChannelD_BiggestVersion AQ_Pma10GBaseT_ReceiveSignalPowerChannelD_HHD
#define AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelA_BiggestVersion AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelA_HHD
#define AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelB_BiggestVersion AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelB_HHD
#define AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelC_BiggestVersion AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelC_HHD
#define AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelD_BiggestVersion AQ_Pma10GBaseT_SNR_MinimumOperatingMarginChannelD_HHD
#define AQ_Pma10GBaseT_SNR_OperatingMarginChannelA_BiggestVersion AQ_Pma10GBaseT_SNR_OperatingMarginChannelA_HHD
#define AQ_Pma10GBaseT_SNR_OperatingMarginChannelB_BiggestVersion AQ_Pma10GBaseT_SNR_OperatingMarginChannelB_HHD
#define AQ_Pma10GBaseT_SNR_OperatingMarginChannelC_BiggestVersion AQ_Pma10GBaseT_SNR_OperatingMarginChannelC_HHD
#define AQ_Pma10GBaseT_SNR_OperatingMarginChannelD_BiggestVersion AQ_Pma10GBaseT_SNR_OperatingMarginChannelD_HHD
#define AQ_Pma10GBaseT_SkewDelay_BiggestVersion AQ_Pma10GBaseT_SkewDelay_HHD
#define AQ_Pma10GBaseT_Status_BiggestVersion AQ_Pma10GBaseT_Status_HHD
#define AQ_Pma10GBaseT_TestModes_BiggestVersion AQ_Pma10GBaseT_TestModes_HHD
#define AQ_Pma10GBaseT_TxPowerBackoffAndShortReachSetting_BiggestVersion AQ_Pma10GBaseT_TxPowerBackoffAndShortReachSetting_HHD
#define AQ_PmaReceiveReservedVendorProvisioning_BiggestVersion AQ_PmaReceiveReservedVendorProvisioning_HHD
#define AQ_PmaReceiveReservedVendorState_BiggestVersion AQ_PmaReceiveReservedVendorState_HHD
#define AQ_PmaReceiveVendorState_BiggestVersion AQ_PmaReceiveVendorState_HHD
#define AQ_PmaStandardControl_1_BiggestVersion AQ_PmaStandardControl_1_HHD
#define AQ_PmaStandardControl_2_BiggestVersion AQ_PmaStandardControl_2_HHD
#define AQ_PmaStandardDeviceIdentifier_BiggestVersion AQ_PmaStandardDeviceIdentifier_HHD
#define AQ_PmaStandardDevicesInPackage_BiggestVersion AQ_PmaStandardDevicesInPackage_HHD
#define AQ_PmaStandardPackageIdentifier_BiggestVersion AQ_PmaStandardPackageIdentifier_HHD
#define AQ_PmaStandardSpeedAbility_BiggestVersion AQ_PmaStandardSpeedAbility_HHD
#define AQ_PmaStandardStatus_1_BiggestVersion AQ_PmaStandardStatus_1_HHD
#define AQ_PmaStandardStatus_2_BiggestVersion AQ_PmaStandardStatus_2_HHD
#define AQ_PmaTransmitReservedVendorProvisioning_BiggestVersion AQ_PmaTransmitReservedVendorProvisioning_HHD
#define AQ_PmaTransmitStandardInterruptMask_BiggestVersion AQ_PmaTransmitStandardInterruptMask_HHD
#define AQ_PmaTransmitVendorAlarms_BiggestVersion AQ_PmaTransmitVendorAlarms_HHD
#define AQ_PmaTransmitVendorDebug_BiggestVersion AQ_PmaTransmitVendorDebug_HHD
#define AQ_PmaTransmitVendorLASI_InterruptMask_BiggestVersion AQ_PmaTransmitVendorLASI_InterruptMask_HHD
#define AQ_PmaVendorGlobalInterruptFlags_BiggestVersion AQ_PmaVendorGlobalInterruptFlags_HHD
#define AQ_PmdStandard10G_ExtendedAbilityRegister_BiggestVersion AQ_PmdStandard10G_ExtendedAbilityRegister_HHD
#define AQ_PmdStandardSignalDetect_BiggestVersion AQ_PmdStandardSignalDetect_HHD
#define AQ_PmdStandardTransmitDisableControl_BiggestVersion AQ_PmdStandardTransmitDisableControl_HHD
#define AQ_Sgmii0WolStatus_BiggestVersion AQ_Sgmii0WolStatus_HHD
#define AQ_TimesyncPcsCapability_BiggestVersion AQ_TimesyncPcsCapability_HHD
#define AQ_TimesyncPcsReceivePathDataDelay_BiggestVersion AQ_TimesyncPcsReceivePathDataDelay_HHD
#define AQ_TimesyncPcsTransmitPathDataDelay_BiggestVersion AQ_TimesyncPcsTransmitPathDataDelay_HHD
#define AQ_TimesyncPhyXsCapability_BiggestVersion AQ_TimesyncPhyXsCapability_HHD
#define AQ_TimesyncPhyXsReceivePathDataDelay_BiggestVersion AQ_TimesyncPhyXsReceivePathDataDelay_HHD
#define AQ_TimesyncPhyXsTransmitPathDataDelay_BiggestVersion AQ_TimesyncPhyXsTransmitPathDataDelay_HHD
#define AQ_TimesyncPmaCapability_BiggestVersion AQ_TimesyncPmaCapability_HHD
#define AQ_TimesyncPmaReceivePathDataDelay_BiggestVersion AQ_TimesyncPmaReceivePathDataDelay_HHD
#define AQ_TimesyncPmaTransmitPathDataDelay_BiggestVersion AQ_TimesyncPmaTransmitPathDataDelay_HHD
#define AQ_XenpakBasic_ApsLoading_BiggestVersion AQ_XenpakBasic_ApsLoading_HHD
#define AQ_XenpakBasic_ApsVoltage_BiggestVersion AQ_XenpakBasic_ApsVoltage_HHD
#define AQ_XenpakBasic_BitRate_BiggestVersion AQ_XenpakBasic_BitRate_HHD
#define AQ_XenpakBasic_Checksum_BiggestVersion AQ_XenpakBasic_Checksum_HHD
#define AQ_XenpakBasic_ConnectorType_BiggestVersion AQ_XenpakBasic_ConnectorType_HHD
#define AQ_XenpakBasic_DomCapability_BiggestVersion AQ_XenpakBasic_DomCapability_HHD
#define AQ_XenpakBasic_Encoding_BiggestVersion AQ_XenpakBasic_Encoding_HHD
#define AQ_XenpakBasic_Low_powerStartupCapability_BiggestVersion AQ_XenpakBasic_Low_powerStartupCapability_HHD
#define AQ_XenpakBasic_PackageIdentifier_BiggestVersion AQ_XenpakBasic_PackageIdentifier_HHD
#define AQ_XenpakBasic_Protocol_BiggestVersion AQ_XenpakBasic_Protocol_HHD
#define AQ_XenpakBasic_Reserved_0x11_BiggestVersion AQ_XenpakBasic_Reserved_0x11_HHD
#define AQ_XenpakBasic_Reserved_0x19_BiggestVersion AQ_XenpakBasic_Reserved_0x19_HHD
#define AQ_XenpakBasic_Reserved_0x7c_BiggestVersion AQ_XenpakBasic_Reserved_0x7c_HHD
#define AQ_XenpakBasic_StandardsComplianceCodes_BiggestVersion AQ_XenpakBasic_StandardsComplianceCodes_HHD
#define AQ_XenpakBasic_TransceiverType_BiggestVersion AQ_XenpakBasic_TransceiverType_HHD
#define AQ_XenpakBasic_VendorDateCode_BiggestVersion AQ_XenpakBasic_VendorDateCode_HHD
#define AQ_XenpakBasic_VendorIdentifier_BiggestVersion AQ_XenpakBasic_VendorIdentifier_HHD
#define AQ_XenpakBasic_VendorName_BiggestVersion AQ_XenpakBasic_VendorName_HHD
#define AQ_XenpakBasic_VendorPartNumber_BiggestVersion AQ_XenpakBasic_VendorPartNumber_HHD
#define AQ_XenpakBasic_VendorPartRevisionNumber_BiggestVersion AQ_XenpakBasic_VendorPartRevisionNumber_HHD
#define AQ_XenpakBasic_VendorSerialNumber_BiggestVersion AQ_XenpakBasic_VendorSerialNumber_HHD
#define AQ_XenpakBasic__3_3vLoading_BiggestVersion AQ_XenpakBasic__3_3vLoading_HHD
#define AQ_XenpakBasic__5vLoading_BiggestVersion AQ_XenpakBasic__5vLoading_HHD
#define AQ_XenpakControl_BiggestVersion AQ_XenpakControl_HHD
#define AQ_XenpakCustomer_Reserved_0x7e_BiggestVersion AQ_XenpakCustomer_Reserved_0x7e_HHD
#define AQ_XenpakDom_Alarms_BiggestVersion AQ_XenpakDom_Alarms_HHD
#define AQ_XenpakDom_Capability_BiggestVersion AQ_XenpakDom_Capability_HHD
#define AQ_XenpakDom_ControlAndStatus_BiggestVersion AQ_XenpakDom_ControlAndStatus_HHD
#define AQ_XenpakDom_HighTemperatureAlarmThresholdLSW_BiggestVersion AQ_XenpakDom_HighTemperatureAlarmThresholdLSW_HHD
#define AQ_XenpakDom_HighTemperatureAlarmThresholdMSW_BiggestVersion AQ_XenpakDom_HighTemperatureAlarmThresholdMSW_HHD
#define AQ_XenpakDom_HighTemperatureWarningThresholdLSW_BiggestVersion AQ_XenpakDom_HighTemperatureWarningThresholdLSW_HHD
#define AQ_XenpakDom_HighTemperatureWarningThresholdMSW_BiggestVersion AQ_XenpakDom_HighTemperatureWarningThresholdMSW_HHD
#define AQ_XenpakDom_LowTemperatureAlarmThresholdLSW_BiggestVersion AQ_XenpakDom_LowTemperatureAlarmThresholdLSW_HHD
#define AQ_XenpakDom_LowTemperatureAlarmThresholdMSW_BiggestVersion AQ_XenpakDom_LowTemperatureAlarmThresholdMSW_HHD
#define AQ_XenpakDom_LowTemperatureWarningThresholdLSW_BiggestVersion AQ_XenpakDom_LowTemperatureWarningThresholdLSW_HHD
#define AQ_XenpakDom_LowTemperatureWarningThresholdMSW_BiggestVersion AQ_XenpakDom_LowTemperatureWarningThresholdMSW_HHD
#define AQ_XenpakDom_Status_BiggestVersion AQ_XenpakDom_Status_HHD
#define AQ_XenpakDom_TemperatureLSW_BiggestVersion AQ_XenpakDom_TemperatureLSW_HHD
#define AQ_XenpakDom_TemperatureMSW_BiggestVersion AQ_XenpakDom_TemperatureMSW_HHD
#define AQ_XenpakDom_TxControl_BiggestVersion AQ_XenpakDom_TxControl_HHD
#define AQ_XenpakHeader_BasicMemoryStartAddress_BiggestVersion AQ_XenpakHeader_BasicMemoryStartAddress_HHD
#define AQ_XenpakHeader_CustomerMemoryOffset_BiggestVersion AQ_XenpakHeader_CustomerMemoryOffset_HHD
#define AQ_XenpakHeader_ExtendedVendorMemoryOffset_BiggestVersion AQ_XenpakHeader_ExtendedVendorMemoryOffset_HHD
#define AQ_XenpakHeader_MemoryUsed_BiggestVersion AQ_XenpakHeader_MemoryUsed_HHD
#define AQ_XenpakHeader_NvrSize_BiggestVersion AQ_XenpakHeader_NvrSize_HHD
#define AQ_XenpakHeader_VendorMemoryStartAddress_BiggestVersion AQ_XenpakHeader_VendorMemoryStartAddress_HHD
#define AQ_XenpakHeader_XenpakMsaVersionSupported_BiggestVersion AQ_XenpakHeader_XenpakMsaVersionSupported_HHD
#define AQ_XenpakLASI__Control_BiggestVersion AQ_XenpakLASI__Control_HHD
#define AQ_XenpakLASI__Status_BiggestVersion AQ_XenpakLASI__Status_HHD
#define AQ_XenpakRxAlarm_Control_BiggestVersion AQ_XenpakRxAlarm_Control_HHD
#define AQ_XenpakRxAlarm_Status_BiggestVersion AQ_XenpakRxAlarm_Status_HHD
#define AQ_XenpakTxAlarm_Control_BiggestVersion AQ_XenpakTxAlarm_Control_HHD
#define AQ_XenpakTxAlarm_Status_BiggestVersion AQ_XenpakTxAlarm_Status_HHD
#define AQ_XenpakVendor_Reserved_0xae_BiggestVersion AQ_XenpakVendor_Reserved_0xae_HHD
#endif

View File

@@ -0,0 +1,69 @@
/*AQ_RegMaps.h*/
/************************************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Description:
*
* This file contains includes all appropriate Aquantia PHY device-specific
* register map headers.
*
************************************************************************************/
/*! \file
* This file contains includes all appropriate Aquantia PHY device-specific
* register map headers.
*/
#ifndef AQ_REGISTERMAPS_HEADER
#define AQ_REGISTERMAPS_HEADER
#include "AQ_User.h"
#include "AQ_RegGroupMaxSizes.h"
#ifndef AQ_REVERSED_BITFIELD_ORDERING
/*
* Include non-reversed header files (bitfields ordered from LSbit to MSbit)
*/
/* APPIA */
#include "AQ_APPIA_Global_registers.h"
#include "AQ_APPIA_Global_registers_Defines.h"
/* HHD */
#include "AQ_HHD_Global_registers.h"
#include "AQ_HHD_Global_registers_Defines.h"
#else
/*
* Include reversed header files (bitfields ordered from MSbit to LSbit)
*/
/* APPIA */
#include "AQ_APPIA_Global_registers_reversed.h"
#include "AQ_APPIA_Global_registers_Defines.h"
/* HHD */
#include "AQ_HHD_Global_registers_reversed.h"
#include "AQ_HHD_Global_registers_Defines.h"
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,193 @@
/* mdioBootLoadCLD.c */
/************************************************************************************
* Copyright (c) 2015 Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $File: //depot/icm/proj/Dena/rev1.0/c/Systems/tools/windows/flashUtilities/src/mdioBootLoadCLD.c $
*
* $Revision: #12 $
*
* $DateTime: 2014/05/19 15:34:49 $
*
* $Author: joshd $
*
* $Label: $
*
************************************************************************************/
/*! \file
This file contains the main (int, char**) file for the mdioBootLoadCLD program, which burns a flash image into a target
Aquantia PHY using the AQ_API. This program calls the API function: <BR><BR>
uint8_t AQ_API_WriteBootLoadImage (uint8_t PHY_ID, uint8_t *image, uint16_t *crc16) <BR><BR>
to boot load a cld flash image into an Aquantia PHY */
/*! \addtogroup mdioBootLoad
@{
*/
/*! \def DEBUG
Uncomment this to compile in debug mode. This sets the source to an arbitrary file, defined by DEBUG_FILENAME,
and an arbitrary PHY_ID, defined by DEBUG_PHY_ID. */
/* #define DEBUG */
/*! The debug source file name */
#define DEBUG_FILENAME "HelloWorld.cld"
/*! The debug PHY ID */
#define DEBUG_PHY_ID 0
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include "AQ_API.h"
#include "AQ_PhyInterface.h"
int sock;
char devname[7];
int sock_init()
{
if ((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
fprintf(stderr, "Error creating socket: %s\n", strerror(errno));
return -1;
}
return 0;
}
int main ( int argc, char **argp)
{
/* declare local variables */
FILE *pFile;
uint8_t* image;
uint8_t byte;
unsigned int PHY_ID;
AQ_Retcode resultCode;
AQ_Retcode resultCodes[4];
uint32_t i;
uint32_t imageSize;
char sourceFileName[1000];
AQ_API_Port targetPort0;
AQ_API_Port* targetPorts[1];
AQ_API_Port broadcastPort;
unsigned int provisioningAddresses[1] = {0};
uint32_t reg1, reg2;
targetPorts[0] = &targetPort0;
if(argc < 4) {
fprintf (stderr, "enter file name/netdev name/phy address\n");
return (101);
}
/*Copy the file name from command line arg*/
if (strlcpy (sourceFileName, argp[1], sizeof(sourceFileName)) >= sizeof(sourceFileName)) {
fprintf (stderr, "Filename: %s too long \n", argp[1]);
return (101);
}
/*Copy the interface name from command line arg*/
strlcpy (devname, argp[2], sizeof(devname));
/*Get PHY Address from command line arg*/
PHY_ID = (unsigned int)strtoul(argp[3], NULL, 0);
/* FIXME: set port and device type */
targetPort0.device = AQ_DEVICE_HHD;
targetPort0.PHY_ID = PHY_ID;
broadcastPort.device = AQ_DEVICE_HHD;
broadcastPort.PHY_ID = PHY_ID;
/* open the source in binary read mode */
pFile = fopen(sourceFileName, "rb");
if (pFile == NULL)
{
fprintf (stderr, "Unable to open source file %s\n", sourceFileName);
return (101);
}
fseek (pFile, 0, SEEK_END);
imageSize = ftell (pFile);
image = (uint8_t*) malloc (imageSize * sizeof(uint8_t));
fseek (pFile, 0, SEEK_SET);
/* load the file */
for (i = 0; i < imageSize; i++)
{
byte = (uint8_t) fgetc (pFile);
image[i] = byte;
}
fclose(pFile);
if (sock_init() < 0)
{
fprintf (stderr, "Unable to initialize interface\n");
return (200);
}
/* Write in the Auantia phy scratch pad register,
* read back the same reg and match the values written.
*/
AQ_API_MDIO_Write(PHY_ID, 0x1e, 0x300, 0xdead);
AQ_API_MDIO_Write(PHY_ID, 0x1e, 0x301, 0xbeaf);
reg1 = AQ_API_MDIO_Read(PHY_ID, 0x1e, 0x300);
reg2 = AQ_API_MDIO_Read(PHY_ID, 0x1e, 0x301);
if(reg1 != 0xdead && reg2 != 0xbeaf) {
fprintf (stderr, "Scratchpad Read/Write test fail\n");
return (101);
}
/* call the boot-load function */
resultCode = AQ_API_WriteBootLoadImage(targetPorts, 1, provisioningAddresses, resultCodes, &imageSize, image, PHY_ID, &broadcastPort);
switch (resultCode)
{
case 0:
printf("Image load good - mailbox CRC-16 matches\n");
free (image);
close(sock);
return 0;
case 1:
fprintf (stderr, "CRC-16 on file is bad\n");
free (image);
close(sock);
return 1;
case 2:
fprintf (stderr, "CRC-16 check on image load failed (mailbox CRC-16 check)\n");
free (image);
close(sock);
return 2;
default:
fprintf (stderr, "Invalid return code\n");
free (image);
close(sock);
}
return 12;
}
/*@}*/

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,141 @@
/* AQ_PhyInterface.c */
/************************************************************************************
* Copyright (c) 2015, Aquantia
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $Revision: #12 $
*
* $DateTime: 2015/02/25 15:34:49 $
*
* $Label: $
*
************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <linux/sockios.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include "AQ_PhyInterface.h"
#include "AQ_PlatformRoutines.h"
#define MII_ADDR_C45 (0x8000)
extern int sock;
extern char devname[7];
static struct ifreq ifr;
/*! Provides generic synchronous PHY register write functionality. It is the
* responsibility of the system designer to provide the specific MDIO address
* pointer updates, etc. in order to accomplish this write operation.
* It will be assumed that the write has been completed by the time this
* function returns.*/
void AQ_API_MDIO_Write(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being written. */
unsigned int address,
/*! The 16-bits of data to write to the specified PHY register. */
unsigned int data)
{
struct mii_ioctl_data mii;
/*
* Frame the control structures
* and send the ioctl to kernel.
*/
memset(&ifr, 0, sizeof(ifr));
strlcpy(ifr.ifr_name, devname, sizeof(ifr.ifr_name));
memset(&mii, 0, sizeof(mii));
memcpy(&mii, &ifr.ifr_data, sizeof(mii));
mii.phy_id = MII_ADDR_C45 | PHY_ID << 5 | MMD;
mii.reg_num = address;
mii.val_in = data;
memcpy(&ifr.ifr_data, &mii, sizeof(mii));
if (ioctl(sock, SIOCSMIIREG, &ifr) < 0) {
fprintf(stderr, "SIOCGMIIREG on %s failed: %s\n", ifr.ifr_name,
strerror(errno));
}
return;
}
/*! Provides generic synchronous PHY register read functionality. It is the
* responsibility of the system designer to provide the specific MDIO address
* pointer updates, etc. in order to accomplish this read operation.*/
unsigned int AQ_API_MDIO_Read
(
/*! Uniquely identifies the port within the system. AQ_Port must be
* defined to a whatever data type is suitable for the platform.*/
AQ_Port PHY_ID,
/*! The address of the MMD within the target PHY. */
unsigned int MMD,
/*! The 16-bit address of the PHY register being read. */
unsigned int address)
{
struct mii_ioctl_data mii;
/*
* Frame the control structures
* and send the ioctl to kernel.
*/
memset(&ifr, 0, sizeof(ifr));
strlcpy(ifr.ifr_name, devname, sizeof(ifr.ifr_name));
memset(&mii, 0, sizeof(mii));
memcpy(&mii, &ifr.ifr_data, sizeof(mii));
mii.phy_id = MII_ADDR_C45 | PHY_ID << 5 | MMD;
mii.reg_num = address;
memcpy(&ifr.ifr_data, &mii, sizeof(mii));
if (ioctl(sock, SIOCGMIIREG, &ifr) < 0) {
fprintf(stderr, "SIOCGMIIREG on %s failed: %s\n", ifr.ifr_name,
strerror(errno));
return -1;
} else {
memcpy(&mii, &ifr.ifr_data, sizeof(mii));
}
return mii.val_out;
}
/*! Returns after at least milliseconds have elapsed. This must be implemented
* * in a platform-approriate way. AQ_API functions will call this function to
* * block for the specified period of time. If necessary, PHY register reads
* * may be performed on port to busy-wait. */
void AQ_API_Wait(
uint32_t milliseconds, /*!< The delay in milliseconds */
AQ_API_Port* port /*!< The PHY to use if delay reads are necessary*/ )
{
unsigned long long mirco = milliseconds *1000;
usleep(mirco);
}

View File

@@ -0,0 +1,67 @@
#
# Copyright (C) 2011 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=libprotobuf-c
PKG_VERSION:=1.3.1
PKG_RELEASE:=2
PKG_SOURCE:=protobuf-c-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/protobuf-c/protobuf-c/releases/download/v$(PKG_VERSION)
PKG_HASH:=51472d3a191d6d7b425e32b612e477c06f73fe23e07f6a6a839b11808e9d2267
PKG_BUILD_DIR:=$(BUILD_DIR)/protobuf-c-$(PKG_VERSION)
HOST_BUILD_DIR:=$(BUILD_DIR_HOST)/protobuf-c-$(PKG_VERSION)
PKG_MAINTAINER:=Rosen Penev <rosenp@gmail.com>
PKG_LICENSE:=BSD-2c
PKG_BUILD_DEPENDS:=protobuf-c/host
HOST_BUILD_DEPENDS:=protobuf/host
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/host-build.mk
define Package/libprotobuf-c
TITLE:=Protocol Buffers library
SECTION:=libs
CATEGORY:=Libraries
URL:=https://github.com/protobuf-c/protobuf-c
endef
define Package/libprotobuf-c/description
Runtime library to use Google Protocol Buffers from C applications.
Protocol Buffers are a way of encoding structured data in an efficient yet
extensible format. Google uses Protocol Buffers for almost all of its
internal RPC protocols and file formats.
endef
CONFIGURE_ARGS += \
--enable-shared \
--enable-static \
--disable-protoc
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/
$(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libprotobuf-c.{a,la,so*} $(1)/usr/lib/
$(INSTALL_DIR) $(1)/usr/lib/pkgconfig
$(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* $(1)/usr/lib/pkgconfig/
endef
define Package/libprotobuf-c/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libprotobuf-c.so.* $(1)/usr/lib/
endef
$(eval $(call BuildPackage,libprotobuf-c))
$(eval $(call HostBuild))

View File

@@ -0,0 +1,13 @@
Index: protobuf-c-1.3.1/t/generated-code2/cxx-generate-packed-data.cc
===================================================================
--- protobuf-c-1.3.1.orig/t/generated-code2/cxx-generate-packed-data.cc
+++ protobuf-c-1.3.1/t/generated-code2/cxx-generate-packed-data.cc
@@ -998,7 +998,7 @@ static void dump_test_packed_repeated_en
static void dump_test_unknown_fields (void)
{
EmptyMess mess;
- const google::protobuf::Message::Reflection *reflection = mess.GetReflection();
+ const google::protobuf::Reflection *reflection = mess.GetReflection();
google::protobuf::UnknownFieldSet *fs = reflection->MutableUnknownFields(&mess);
#if GOOGLE_PROTOBUF_VERSION >= 2001000

View File

@@ -0,0 +1,110 @@
#
# Copyright (C) 2007-2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=protobuf
PKG_VERSION:=3.7.1
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-cpp-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/google/protobuf/releases/download/v$(PKG_VERSION)
PKG_HASH:=97f6cdaa0724d5a8cd3375d5f5cf4bd253d5ad5291154f533ed0d94a9d501ef3
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
PKG_CPE_ID:=cpe:/a:google:protobuf
PKG_BUILD_DEPENDS:=protobuf/host
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/host-build.mk
define Package/protobuf/Default
SECTION:=libs
CATEGORY:=Libraries
TITLE:=A structured data encoding library
URL:=https://github.com/google/protobuf
DEPENDS:=+zlib +libpthread +libatomic +libstdcpp
MAINTAINER:=Ken Keys <kkeys@caida.org>
endef
define Package/protobuf
$(call Package/protobuf/Default)
DEPENDS+=+protobuf-lite
endef
define Package/protobuf-lite
$(call Package/protobuf/Default)
endef
define Package/protobuf/description/Default
Protocol Buffers are a way of encoding structured data in an efficient
yet extensible format. Google uses Protocol Buffers for almost all
of its internal RPC protocols and file formats.
endef
define Package/protobuf/description
$(call Package/protobuf/description/Default)
This package provides the libprotoc, libprotobuf, and libprotobuf-lite
libraries. For a much smaller protobuf package, see "protobuf-lite".
endef
define Package/protobuf-lite/description
$(call Package/protobuf/description/Default)
This package provides the libprotobuf-lite library.
endef
EXTRA_CPPFLAGS+=-std=c++11
CONFIGURE_ARGS += --with-protoc=$(STAGING_DIR_HOSTPKG)/bin/protoc
define Build/InstallDev
$(INSTALL_DIR) \
$(1)/usr/lib \
$(1)/usr/include
$(CP) \
$(PKG_INSTALL_DIR)/usr/include/* \
$(1)/usr/include/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/* \
$(1)/usr/lib/
endef
define Package/protobuf-lite/install
$(INSTALL_DIR) \
$(1)/usr/lib
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/libprotobuf-lite.so* \
$(1)/usr/lib/
endef
define Package/protobuf/install
$(INSTALL_DIR) \
$(1)/usr/lib
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/libprotoc.so* \
$(1)/usr/lib/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/libprotobuf.so* \
$(1)/usr/lib/
endef
$(eval $(call BuildPackage,protobuf))
$(eval $(call BuildPackage,protobuf-lite))
$(eval $(call HostBuild))

View File

@@ -0,0 +1,52 @@
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=qca-nss-dp
PKG_SOURCE_PROTO:=git
PKG_BRANCH:=master
PKG_RELEASE:=1
include $(INCLUDE_DIR)/package.mk
define KernelPackage/qca-nss-dp
SECTION:=kernel
CATEGORY:=Kernel modules
SUBMENU:=Network Devices
DEPENDS:=@TARGET_ipq807x +kmod-qca-ssdk
TITLE:=Kernel driver for NSS data plane
FILES:=$(PKG_BUILD_DIR)/qca-nss-dp.ko
AUTOLOAD:=$(call AutoLoad,31,qca-nss-dp)
endef
define KernelPackage/qca-nss-dp/Description
This package contains a NSS data plane driver for QCA chipset
endef
define Build/InstallDev
mkdir -p $(1)/usr/include/qca-nss-dp
$(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-dp/
endef
EXTRA_CFLAGS+= \
-I$(STAGING_DIR)/usr/include/qca-ssdk
subtarget:=$(SUBTARGET)
NSS_DP_HAL_DIR:=$(PKG_BUILD_DIR)/hal
hal_arch:=$(subtarget)
define Build/Configure
$(LN) $(NSS_DP_HAL_DIR)/arch/$(hal_arch)/nss_$(hal_arch).h \
$(PKG_BUILD_DIR)/exports/nss_dp_arch.h
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
M="$(PKG_BUILD_DIR)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(subtarget)" \
modules
endef
$(eval $(call KernelPackage,qca-nss-dp))

View File

@@ -0,0 +1,56 @@
###################################################
# Makefile for the NSS data plane driver
###################################################
obj ?= .
obj-m += qca-nss-dp.o
qca-nss-dp-objs += nss_dp_attach.o \
nss_dp_ethtools.o \
nss_dp_main.o
ifneq ($(CONFIG_NET_SWITCHDEV),)
qca-nss-dp-objs += nss_dp_switchdev.o
endif
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
qca-nss-dp-objs += hal/edma/edma_cfg.o \
hal/edma/edma_data_plane.o \
hal/edma/edma_tx_rx.o \
hal/gmac_hal_ops/qcom/qcom_if.o \
hal/gmac_hal_ops/syn/xgmac/syn_if.o
endif
NSS_DP_INCLUDE = -I$(obj)/include -I$(obj)/exports -I$(obj)/gmac_hal_ops/include \
-I$(obj)/hal/include
ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
NSS_DP_INCLUDE += -I$(obj)/hal/gmac_hal_ops/syn/gmac
endif
ccflags-y += $(NSS_DP_INCLUDE)
ccflags-y += -Werror
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
ccflags-y += -DNSS_DP_PPE_SUPPORT
endif
ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64))
qca-nss-dp-objs += hal/arch/ipq60xx/nss_ipq60xx.o
ccflags-y += -DNSS_DP_IPQ60XX
endif
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64))
qca-nss-dp-objs += hal/arch/ipq807x/nss_ipq807x.o
ccflags-y += -DNSS_DP_IPQ807X
endif
ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
qca-nss-dp-objs += hal/arch/ipq50xx/nss_ipq50xx.o \
hal/gmac_hal_ops/syn/gmac/syn_if.o \
hal/syn_gmac_dp/syn_data_plane.o \
hal/syn_gmac_dp/syn_dp_tx_rx.o \
hal/syn_gmac_dp/syn_dp_cfg.o
ccflags-y += -DNSS_DP_IPQ50XX
endif

View File

@@ -0,0 +1,219 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/**
* @file nss_dp_api_if.h
* nss-dp exported structures/apis.
*
* This file declares all the public interfaces
* for NSS data-plane driver.
*/
#ifndef __NSS_DP_API_IF_H
#define __NSS_DP_API_IF_H
#include "nss_dp_arch.h"
/**
* @addtogroup nss_dp_subsystem
* @{
*/
/*
* NSS DP status
*/
#define NSS_DP_SUCCESS 0
#define NSS_DP_FAILURE -1
/*
* NSS DP platform specific defines
*/
#define NSS_DP_START_IFNUM NSS_DP_HAL_START_IFNUM
/**< First GMAC interface number (0/1) depending on SoC. */
#define NSS_DP_MAX_MTU_SIZE NSS_DP_HAL_MAX_MTU_SIZE
#define NSS_DP_MAX_PACKET_LEN NSS_DP_HAL_MAX_PACKET_LEN
#define NSS_DP_MAX_INTERFACES (NSS_DP_HAL_MAX_PORTS + NSS_DP_HAL_START_IFNUM)
/**< Last interface index for the SoC, to be used by qca-nss-drv. */
/*
* NSS PTP service code
*/
#define NSS_PTP_EVENT_SERVICE_CODE 0x9
/**
* nss_dp_data_plane_ctx
* Data plane context base class.
*/
struct nss_dp_data_plane_ctx {
struct net_device *dev;
};
/**
* nss_dp_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_gmac_stats {
struct nss_dp_hal_gmac_stats stats;
};
/**
* nss_dp_data_plane_ops
* Per data-plane ops structure.
*
* Default would be slowpath and can be overridden by nss-drv
*/
struct nss_dp_data_plane_ops {
int (*init)(struct nss_dp_data_plane_ctx *dpc);
int (*open)(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring,
uint32_t rx_desc_ring, uint32_t mode);
int (*close)(struct nss_dp_data_plane_ctx *dpc);
int (*link_state)(struct nss_dp_data_plane_ctx *dpc,
uint32_t link_state);
int (*mac_addr)(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr);
int (*change_mtu)(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu);
netdev_tx_t (*xmit)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *os_buf);
void (*set_features)(struct nss_dp_data_plane_ctx *dpc);
int (*pause_on_off)(struct nss_dp_data_plane_ctx *dpc,
uint32_t pause_on);
int (*vsi_assign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi);
int (*vsi_unassign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi);
int (*rx_flow_steer)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb,
uint32_t cpu, bool is_add);
void (*get_stats)(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats);
int (*deinit)(struct nss_dp_data_plane_ctx *dpc);
};
/**
* nss_dp_receive
* Called by overlay drivers to deliver packets to nss-dp.
*
* @datatypes
* net_device
* sk_buff
* napi_struct
*
* @param[in] netdev Pointer to netdev structure on which packet is received.
* @param[in] skb Pointer to the received packet.
* @param[in] napi Pointer to napi context.
*/
void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb,
struct napi_struct *napi);
/**
* nss_dp_is_in_open_state
* Returns if a data plane is opened or not.
*
* @datatypes
* net_device
*
* @param[in] netdev Pointer to netdev structure.
*
* @return
* bool
*/
bool nss_dp_is_in_open_state(struct net_device *netdev);
/**
* nss_dp_override_data_palne
* API to allow overlay drivers to override the data plane.
*
* @datatypes
* net_device
* nss_dp_data_plane_ops
* nss_dp_data_plane_ctx
*
* @param[in] netdev Pointer to netdev structure.
* @param[in] dp_ops Pointer to respective data plane ops structure.
* @param[in] dpc Pointer to data plane context.
*
* @return
* int
*/
int nss_dp_override_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ops *dp_ops,
struct nss_dp_data_plane_ctx *dpc);
/**
* nss_dp_start_data_plane
* Dataplane API to inform netdev when it is ready to start.
*
* @datatypes
* net_device
* nss_dp_data_plane_ctx
*
* @param[in] netdev Pointer to netdev structure.
* @param[in] dpc Pointer to data plane context.
*/
void nss_dp_start_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ctx *dpc);
/**
* nss_dp_restore_data_plane
* Called by overlay drivers to detach itself from nss-dp.
*
* @datatypes
* net_device
*
* @param[in] netdev Pointer to netdev structure.
*/
void nss_dp_restore_data_plane(struct net_device *netdev);
/**
* nss_dp_get_netdev_by_nss_if_num
* Returns the net device of the corresponding id if it exists.
*
* @datatypes
* int
*
* @param[in] interface ID of the physical mac port.
*
* @return
* Pointer to netdev structure.
*/
struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num);
/**
* nss_phy_tstamp_rx_buf
* Receive timestamp packet.
*
* @datatypes
* sk_buff
*
* @param[in] app_data Pointer to the application context of the message.
* @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_rx_buf(void *app_data, struct sk_buff *skb);
/**
* nss_phy_tstamp_tx_buf
* Transmit timestamp packet
*
* @datatypes
* net_device
* sk_buff
*
* @param[in] net_device Pointer to netdev structure.
* @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb);
/**
*@}
*/
#endif /** __NSS_DP_API_IF_H */

View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/ioport.h>
#include <linux/qcom_scm.h>
#include "nss_dp_hal.h"
/*
* nss_dp_hal_tcsr_base_get
* Reads TCSR base address from DTS
*/
static uint32_t nss_dp_hal_tcsr_base_get(void)
{
uint32_t tcsr_base_addr = 0;
struct device_node *dp_cmn;
/*
* Get reference to NSS dp common device node
*/
dp_cmn = of_find_node_by_name(NULL, "nss-dp-common");
if (!dp_cmn) {
pr_info("%s: NSS DP common node not found\n", __func__);
return 0;
}
if (of_property_read_u32(dp_cmn, "qcom,tcsr-base", &tcsr_base_addr)) {
pr_err("%s: error reading TCSR base\n", __func__);
}
of_node_put(dp_cmn);
return tcsr_base_addr;
}
/*
* nss_dp_hal_tcsr_set
* Sets the TCSR axi cache override register
*/
static void nss_dp_hal_tcsr_set(void)
{
void __iomem *tcsr_addr = NULL;
uint32_t tcsr_base;
int err;
tcsr_base = nss_dp_hal_tcsr_base_get();
if (!tcsr_base) {
pr_err("%s: Unable to get TCSR base address\n", __func__);
return;
}
/*
* Check if Trust Zone is enabled in the system.
* If yes, we need to go through SCM API call to program TCSR register.
* If TZ is not enabled, we can write to the register directly.
*/
if (qcom_scm_is_available()) {
err = qcom_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE);
if (err) {
pr_err("%s: SCM TCSR write error: %d\n", __func__, err);
}
} else {
tcsr_addr = ioremap_nocache((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE);
if (!tcsr_addr) {
pr_err("%s: ioremap failed\n", __func__);
return;
}
writel(TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE, tcsr_addr);
iounmap(tcsr_addr);
}
}
/*
* nss_dp_hal_get_data_plane_ops
* Return the data plane ops for GMAC data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_gmac_ops;
}
/*
* nss_dp_hal_clk_enable
* Function to enable GCC_SNOC_GMAC_AXI_CLK.
*
* These clocks are required for GMAC operations.
*/
void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv)
{
struct platform_device *pdev = dp_priv->pdev;
struct device *dev = &pdev->dev;
struct clk *gmac_clk = NULL;
int err;
gmac_clk = devm_clk_get(dev, NSS_SNOC_GMAC_AXI_CLK);
if (IS_ERR(gmac_clk)) {
pr_err("%s: cannot get clock: %s\n", __func__,
NSS_SNOC_GMAC_AXI_CLK);
return;
}
err = clk_prepare_enable(gmac_clk);
if (err) {
pr_err("%s: cannot enable clock: %s, err: %d\n", __func__,
NSS_SNOC_GMAC_AXI_CLK, err);
return;
}
}
/*
* nss_dp_hal_init
* Sets the gmac ops based on the GMAC type.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_GMAC);
/*
* Program the global GMAC AXI Cache override register
* for optimized AXI DMA operation.
*/
nss_dp_hal_tcsr_set();
return true;
}
/*
* nss_dp_hal_cleanup
* Sets the gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_GMAC);
}

View File

@@ -0,0 +1,130 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 2
#define NSS_DP_HAL_CPU_NUM 2
#define NSS_DP_HAL_START_IFNUM 0
#define NSS_DP_GMAC_NORMAL_FRAME_MTU 1500
#define NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU 1978
#define NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU 9000
#define NSS_DP_HAL_MAX_MTU_SIZE NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
/*
* TCSR_GMAC_AXI_CACHE_OVERRIDE register size
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE 4
/*
* TCSR_GMAC_AXI_CACHE_OVERRIDE Register offset
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET 0x6224
/*
* Value for TCSR_GMAC_AXI_CACHE_OVERRIDE register
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE 0x05050505
/*
* GCC_SNOC_GMAC_AXI_CLOCK
*/
#define NSS_SNOC_GMAC_AXI_CLK "nss-snoc-gmac-axi-clk"
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
uint64_t rx_bytes; /**< Number of RX bytes */
uint64_t rx_packets; /**< Number of RX packets */
uint64_t rx_errors; /**< Number of RX errors */
uint64_t rx_receive_errors; /**< Number of RX receive errors */
uint64_t rx_descriptor_errors; /**< Number of RX descriptor errors */
uint64_t rx_late_collision_errors;
/**< Number of RX late collision errors */
uint64_t rx_dribble_bit_errors; /**< Number of RX dribble bit errors */
uint64_t rx_length_errors; /**< Number of RX length errors */
uint64_t rx_ip_header_errors; /**< Number of RX IP header errors read from rxdec */
uint64_t rx_ip_payload_errors; /**< Number of RX IP payload errors */
uint64_t rx_no_buffer_errors; /**< Number of RX no-buffer errors */
uint64_t rx_transport_csum_bypassed;
/**< Number of RX packets where the transport checksum was bypassed */
uint64_t tx_bytes; /**< Number of TX bytes */
uint64_t tx_packets; /**< Number of TX packets */
uint64_t tx_collisions; /**< Number of TX collisions */
uint64_t tx_errors; /**< Number of TX errors */
uint64_t tx_jabber_timeout_errors;
/**< Number of TX jabber timeout errors */
uint64_t tx_frame_flushed_errors;
/**< Number of TX frame flushed errors */
uint64_t tx_loss_of_carrier_errors;
/**< Number of TX loss of carrier errors */
uint64_t tx_no_carrier_errors; /**< Number of TX no carrier errors */
uint64_t tx_late_collision_errors;
/**< Number of TX late collision errors */
uint64_t tx_excessive_collision_errors;
/**< Number of TX excessive collision errors */
uint64_t tx_excessive_deferral_errors;
/**< Number of TX excessive deferral errors */
uint64_t tx_underflow_errors; /**< Number of TX underflow errors */
uint64_t tx_ip_header_errors; /**< Number of TX IP header errors */
uint64_t tx_ip_payload_errors; /**< Number of TX IP payload errors */
uint64_t tx_dropped; /**< Number of TX dropped packets */
uint64_t hw_errs[10]; /**< GMAC DMA error counters */
uint64_t rx_missed; /**< Number of RX packets missed by the DMA */
uint64_t fifo_overflows; /**< Number of RX FIFO overflows signalled by the DMA */
uint64_t rx_scatter_errors; /**< Number of scattered frames received by the DMA */
uint64_t tx_ts_create_errors; /**< Number of tx timestamp creation errors */
uint64_t gmac_total_ticks; /**< Total clock ticks spend inside the GMAC */
uint64_t gmac_worst_case_ticks; /**< Worst case iteration of the GMAC in ticks */
uint64_t gmac_iterations; /**< Number of iterations around the GMAC */
uint64_t tx_pause_frames; /**< Number of pause frames sent by the GMAC */
uint64_t mmc_rx_overflow_errors;
/**< Number of RX overflow errors */
uint64_t mmc_rx_watchdog_timeout_errors;
/**< Number of RX watchdog timeout errors */
uint64_t mmc_rx_crc_errors; /**< Number of RX CRC errors */
uint64_t mmc_rx_ip_header_errors;
/**< Number of RX IP header errors read from MMC counter*/
uint64_t mmc_rx_octets_g;
/**< Number of good octets received */
uint64_t mmc_rx_ucast_frames; /**< Number of Unicast frames received */
uint64_t mmc_rx_bcast_frames; /**< Number of Bcast frames received */
uint64_t mmc_rx_mcast_frames; /**< Number of Mcast frames received */
uint64_t mmc_rx_undersize;
/**< Number of RX undersize frames */
uint64_t mmc_rx_oversize;
/**< Number of RX oversize frames */
uint64_t mmc_rx_jabber; /**< Number of jabber frames */
uint64_t mmc_rx_octets_gb;
/**< Number of good/bad octets */
uint64_t mmc_rx_frag_frames_g; /**< Number of good ipv4 frag frames */
uint64_t mmc_tx_octets_g; /**< Number of good octets sent */
uint64_t mmc_tx_ucast_frames; /**< Number of Unicast frames sent*/
uint64_t mmc_tx_bcast_frames; /**< Number of Broadcast frames sent */
uint64_t mmc_tx_mcast_frames; /**< Number of Multicast frames sent */
uint64_t mmc_tx_deferred; /**< Number of Deferred frames sent */
uint64_t mmc_tx_single_col; /**< Number of single collisions */
uint64_t mmc_tx_multiple_col; /**< Number of multiple collisions */
uint64_t mmc_tx_octets_gb; /**< Number of good/bad octets sent*/
};
extern struct nss_dp_data_plane_ops nss_dp_gmac_ops;
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "nss_dp_hal.h"
#include "edma.h"
/*
* nss_dp_hal_get_data_plane_ops()
* Return the data plane ops for edma data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_edma_ops;
}
/*
* nss_dp_hal_init()
* Initialize EDMA and set gmac ops.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
if (edma_init()) {
return false;
}
return true;
}
/*
* nss_dp_hal_cleanup()
* Cleanup EDMA and set gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
edma_cleanup(false);
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 5
#define NSS_DP_HAL_CPU_NUM 4
#define NSS_DP_HAL_START_IFNUM 1
#define NSS_DP_HAL_MAX_MTU_SIZE 9216
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
#define NSS_DP_PREHEADER_SIZE 32
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
};
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "nss_dp_hal.h"
#include "edma.h"
/*
* nss_dp_hal_get_data_plane_ops()
* Return the data plane ops for edma data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_edma_ops;
}
/*
* nss_dp_hal_init()
* Initialize EDMA and set gmac ops.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
if (edma_init()) {
return false;
}
return true;
}
/*
* nss_dp_hal_cleanup()
* Cleanup EDMA and set gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
edma_cleanup(false);
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 6
#define NSS_DP_HAL_CPU_NUM 4
#define NSS_DP_HAL_START_IFNUM 1
#define NSS_DP_HAL_MAX_MTU_SIZE 9216
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
#define NSS_DP_PREHEADER_SIZE 32
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
};
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -0,0 +1,967 @@
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <linux/reset.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
#define EDMA_HW_RESET_ID "edma_rst"
/*
* edma_cleanup_rxfill_ring_res()
* Cleanup resources for one RxFill ring
*/
static void edma_cleanup_rxfill_ring_res(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
uint16_t cons_idx, curr_idx;
struct edma_rxfill_desc *rxfill_desc;
uint32_t reg_data = 0;
struct edma_rx_preheader *rxph = NULL;
int store_idx;
/*
* Read RXFILL ring producer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id));
curr_idx = reg_data & EDMA_RXFILL_PROD_IDX_MASK;
/*
* Read RXFILL ring consumer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id));
cons_idx = reg_data & EDMA_RXFILL_CONS_IDX_MASK;
while (curr_idx != cons_idx) {
/*
* Get RXFILL descriptor
*/
rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
/*
* Get Rx preheader
*/
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxfill_desc->buffer_addr);
dma_unmap_single(&pdev->dev, rxfill_desc->buffer_addr,
EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE);
/*
* Get sk_buff and free it
*/
store_idx = rxph->opaque;
skb = ehw->rx_skb_store[store_idx];
ehw->rx_skb_store[store_idx] = NULL;
dev_kfree_skb_any(skb);
cons_idx++;
if (cons_idx == rxfill_ring->count)
cons_idx = 0;
}
/*
* Free RXFILL ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_rxfill_desc)
* rxfill_ring->count),
rxfill_ring->desc, rxfill_ring->dma);
}
/*
* edma_setup_rxfill_ring_res()
* Setup resources for one RxFill ring
*/
static int edma_setup_rxfill_ring_res(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate RxFill ring descriptors
*/
rxfill_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_rxfill_desc)
* rxfill_ring->count),
&rxfill_ring->dma, GFP_KERNEL);
if (!rxfill_ring->desc) {
pr_warn("Descriptor alloc for RXFILL ring %u failed\n",
rxfill_ring->id);
return -ENOMEM;
}
spin_lock_init(&rxfill_ring->lock);
return 0;
}
/*
* edma_setup_rxdesc_ring_res()
* Setup resources for one RxDesc ring
*/
static int edma_setup_rxdesc_ring_res(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate RxDesc ring descriptors
*/
rxdesc_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_rxdesc_desc)
* rxdesc_ring->count),
&rxdesc_ring->dma, GFP_KERNEL);
if (!rxdesc_ring->desc) {
pr_warn("Descriptor alloc for RXDESC ring %u failed\n",
rxdesc_ring->id);
return -ENOMEM;
}
return 0;
}
/*
* edma_cleanup_rxdesc_ring_res()
* Cleanup resources for RxDesc ring
*/
static void edma_cleanup_rxdesc_ring_res(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
struct edma_rxdesc_desc *rxdesc_desc;
struct edma_rx_preheader *rxph = NULL;
uint16_t prod_idx = 0;
uint16_t cons_idx = 0;
int store_idx;
cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id))
& EDMA_RXDESC_CONS_IDX_MASK;
prod_idx = edma_reg_read(EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id))
& EDMA_RXDESC_PROD_IDX_MASK;
/*
* Free any buffers assigned to any descriptors
*/
while (cons_idx != prod_idx) {
rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx);
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxdesc_desc->buffer_addr);
dma_unmap_single(&pdev->dev, rxdesc_desc->buffer_addr,
EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE);
store_idx = rxph->opaque;
skb = ehw->rx_skb_store[store_idx];
ehw->rx_skb_store[store_idx] = NULL;
dev_kfree_skb_any(skb);
/*
* Update consumer index
*/
if (++cons_idx == rxdesc_ring->count)
cons_idx = 0;
}
/*
* Free RXDESC ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_rxdesc_desc)
* rxdesc_ring->count),
rxdesc_ring->desc, rxdesc_ring->dma);
}
/*
* edma_cleanup_txcmpl_ring_res()
* Cleanup resources for one TxCmpl ring
*/
static void edma_cleanup_txcmpl_ring_res(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Free any buffers assigned to any descriptors
*/
edma_clean_tx(ehw, txcmpl_ring);
/*
* Free TxCmpl ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_txcmpl_desc)
* txcmpl_ring->count),
txcmpl_ring->desc, txcmpl_ring->dma);
}
/*
* edma_setup_txcmpl_ring_res()
* Setup resources for one TxCmpl ring
*/
static int edma_setup_txcmpl_ring_res(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate TxCmpl ring descriptors
*/
txcmpl_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_txcmpl_desc)
* txcmpl_ring->count),
&txcmpl_ring->dma, GFP_KERNEL);
if (!txcmpl_ring->desc) {
pr_warn("Descriptor alloc for TXCMPL ring %u failed\n",
txcmpl_ring->id);
return -ENOMEM;
}
return 0;
}
/*
* edma_cleanup_txdesc_ring_res()
* Cleanup resources for one TxDesc ring
*/
static void edma_cleanup_txdesc_ring_res(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb = NULL;
struct edma_txdesc_desc *txdesc = NULL;
uint16_t prod_idx, cons_idx;
size_t buf_len;
uint32_t data;
int store_idx;
/*
* Free any buffers assigned to any descriptors
*/
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
prod_idx = data & EDMA_TXDESC_PROD_IDX_MASK;
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
cons_idx = data & EDMA_TXDESC_CONS_IDX_MASK;
while (cons_idx != prod_idx) {
txdesc = EDMA_TXDESC_DESC(txdesc_ring, cons_idx);
store_idx = txdesc->buffer_addr;
skb = ehw->tx_skb_store[store_idx];
ehw->tx_skb_store[store_idx] = NULL;
buf_len = (txdesc->word1 & EDMA_TXDESC_DATA_LENGTH_MASK) >>
EDMA_TXDESC_DATA_LENGTH_SHIFT;
dma_unmap_single(&pdev->dev, (dma_addr_t)skb->data,
buf_len + EDMA_TX_PREHDR_SIZE, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
cons_idx = (cons_idx + 1) & (txdesc_ring->count - 1);
cons_idx++;
if (cons_idx == txdesc_ring->count)
cons_idx = 0;
}
/*
* Free Tx ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_txdesc_desc)
* txdesc_ring->count),
txdesc_ring->desc, txdesc_ring->dma);
}
/*
* edma_setup_txdesc_ring_res()
* Setup resources for one TxDesc ring
*/
static int edma_setup_txdesc_ring_res(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate Tx ring descriptors
*/
txdesc_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_txdesc_desc)
* txdesc_ring->count),
&txdesc_ring->dma, GFP_KERNEL);
if (!txdesc_ring->desc) {
pr_warn("Descriptor alloc for TXDESC ring %u failed\n",
txdesc_ring->id);
return -ENOMEM;
}
spin_lock_init(&txdesc_ring->tx_lock);
return 0;
}
/*
* edma_setup_ring_resources()
* Allocate/setup resources for EDMA rings
*/
static int edma_setup_ring_resources(struct edma_hw *ehw)
{
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_txdesc_ring *txdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
int i;
int ret;
int index;
/*
* Allocate TxDesc ring descriptors
*/
for (i = 0; i < ehw->txdesc_rings; i++) {
txdesc_ring = &ehw->txdesc_ring[i];
txdesc_ring->count = EDMA_RING_SIZE;
txdesc_ring->id = ehw->txdesc_ring_start + i;
ret = edma_setup_txdesc_ring_res(ehw, txdesc_ring);
if (ret != 0) {
while (i-- >= 0)
edma_cleanup_txdesc_ring_res(ehw,
&ehw->txdesc_ring[i]);
return -ENOMEM;
}
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
ret = edma_setup_txcmpl_ring_res(ehw, txcmpl_ring);
if (ret != 0) {
while (i-- >= 0)
edma_cleanup_txcmpl_ring_res(ehw,
&ehw->txcmpl_ring[i]);
goto txcmpl_mem_alloc_fail;
}
}
/*
* Allocate Rx fill ring descriptors
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
rxfill_ring->count = EDMA_RING_SIZE;
rxfill_ring->id = ehw->rxfill_ring_start + i;
ret = edma_setup_rxfill_ring_res(ehw, rxfill_ring);
if (ret != 0) {
while (--i >= 0)
edma_cleanup_rxfill_ring_res(ehw,
&ehw->rxfill_ring[i]);
goto rxfill_mem_alloc_fail;
}
}
/*
* Allocate RxDesc ring descriptors
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
rxdesc_ring->count = EDMA_RING_SIZE;
rxdesc_ring->id = ehw->rxdesc_ring_start + i;
/*
* Create a mapping between RX Desc ring and Rx fill ring.
* Number of fill rings are lesser than the descriptor rings
* Share the fill rings across descriptor rings.
*/
index = ehw->rxfill_ring_start + (i % ehw->rxfill_rings);
rxdesc_ring->rxfill =
&ehw->rxfill_ring[index - ehw->rxfill_ring_start];
ret = edma_setup_rxdesc_ring_res(ehw, rxdesc_ring);
if (ret != 0) {
while (--i >= 0)
edma_cleanup_rxdesc_ring_res(ehw,
&ehw->rxdesc_ring[i]);
goto rxdesc_mem_alloc_fail;
}
}
return 0;
rxdesc_mem_alloc_fail:
for (i = 0; i < ehw->rxfill_rings; i++)
edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]);
rxfill_mem_alloc_fail:
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]);
txcmpl_mem_alloc_fail:
for (i = 0; i < ehw->txdesc_rings; i++)
edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]);
return -ENOMEM;
}
/*
* edma_free_rings()
* Free EDMA software rings
*/
static void edma_free_rings(struct edma_hw *ehw)
{
kfree(ehw->rxfill_ring);
kfree(ehw->rxdesc_ring);
kfree(ehw->txdesc_ring);
kfree(ehw->txcmpl_ring);
}
/*
* edma_alloc_rings()
* Allocate EDMA software rings
*/
static int edma_alloc_rings(struct edma_hw *ehw)
{
ehw->rxfill_ring = kzalloc((sizeof(struct edma_rxfill_ring) *
ehw->rxfill_rings), GFP_KERNEL);
if (!ehw->rxfill_ring)
return -ENOMEM;
ehw->rxdesc_ring = kzalloc((sizeof(struct edma_rxdesc_ring) *
ehw->rxdesc_rings), GFP_KERNEL);
if (!ehw->rxdesc_ring)
goto rxdesc_ring_alloc_fail;
ehw->txdesc_ring = kzalloc((sizeof(struct edma_txdesc_ring) *
ehw->txdesc_rings), GFP_KERNEL);
if (!ehw->txdesc_ring)
goto txdesc_ring_alloc_fail;
ehw->txcmpl_ring = kzalloc((sizeof(struct edma_txcmpl_ring) *
ehw->txcmpl_rings), GFP_KERNEL);
if (!ehw->txcmpl_ring)
goto txcmpl_ring_alloc_fail;
pr_info("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
ehw->txdesc_rings, ehw->txdesc_ring_start,
(ehw->txdesc_ring_start + ehw->txdesc_rings - 1),
ehw->txcmpl_rings, ehw->txcmpl_ring_start,
(ehw->txcmpl_ring_start + ehw->txcmpl_rings - 1));
pr_info("RxDesc:%u (%u-%u) RxFill:%u (%u-%u)\n",
ehw->rxdesc_rings, ehw->rxdesc_ring_start,
(ehw->rxdesc_ring_start + ehw->rxdesc_rings - 1),
ehw->rxfill_rings, ehw->rxfill_ring_start,
(ehw->rxfill_ring_start + ehw->rxfill_rings - 1));
return 0;
txcmpl_ring_alloc_fail:
kfree(ehw->txdesc_ring);
txdesc_ring_alloc_fail:
kfree(ehw->rxdesc_ring);
rxdesc_ring_alloc_fail:
kfree(ehw->rxfill_ring);
return -ENOMEM;
}
/*
* edma_cleanup_rings()
* Cleanup EDMA rings
*/
void edma_cleanup_rings(struct edma_hw *ehw)
{
int i;
/*
* Free any buffers assigned to any descriptors
*/
for (i = 0; i < ehw->txdesc_rings; i++)
edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]);
/*
* Free Tx completion descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]);
/*
* Free Rx fill ring descriptors
*/
for (i = 0; i < ehw->rxfill_rings; i++)
edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]);
/*
* Free Rx completion ring descriptors
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
edma_cleanup_rxdesc_ring_res(ehw, &ehw->rxdesc_ring[i]);
edma_free_rings(ehw);
}
/*
* edma_init_rings()
* Initialize EDMA rings
*/
static int edma_init_rings(struct edma_hw *ehw)
{
int ret = 0;
ret = edma_alloc_rings(ehw);
if (ret)
return ret;
ret = edma_setup_ring_resources(ehw);
if (ret)
return ret;
return 0;
}
/*
* edma_configure_txdesc_ring()
* Configure one TxDesc ring
*/
static void edma_configure_txdesc_ring(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
uint32_t data = 0;
uint16_t hw_cons_idx = 0;
/*
* Configure TXDESC ring
*/
edma_reg_write(EDMA_REG_TXDESC_BA(txdesc_ring->id),
(uint32_t)(txdesc_ring->dma &
EDMA_RING_DMA_MASK));
edma_reg_write(EDMA_REG_TXDESC_RING_SIZE(txdesc_ring->id),
(uint32_t)(txdesc_ring->count &
EDMA_TXDESC_RING_SIZE_MASK));
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
data &= ~(EDMA_TXDESC_CONS_IDX_MASK);
hw_cons_idx = data;
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
data &= ~(EDMA_TXDESC_PROD_IDX_MASK);
data |= hw_cons_idx & EDMA_TXDESC_PROD_IDX_MASK;
edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id), data);
}
/*
* edma_configure_txcmpl_ring()
* Configure one TxCmpl ring
*/
static void edma_configure_txcmpl_ring(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
uint32_t tx_mod_timer;
/*
* Configure TxCmpl ring base address
*/
edma_reg_write(EDMA_REG_TXCMPL_BA(txcmpl_ring->id),
(uint32_t)(txcmpl_ring->dma & EDMA_RING_DMA_MASK));
edma_reg_write(EDMA_REG_TXCMPL_RING_SIZE(txcmpl_ring->id),
(uint32_t)(txcmpl_ring->count
& EDMA_TXDESC_RING_SIZE_MASK));
/*
* Set TxCmpl ret mode to opaque
*/
edma_reg_write(EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id),
EDMA_TXCMPL_RETMODE_OPAQUE);
tx_mod_timer = (EDMA_TX_MOD_TIMER & EDMA_TX_MOD_TIMER_INIT_MASK)
<< EDMA_TX_MOD_TIMER_INIT_SHIFT;
edma_reg_write(EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id),
tx_mod_timer);
edma_reg_write(EDMA_REG_TX_INT_CTRL(txcmpl_ring->id), 0x2);
}
/*
* edma_configure_rxdesc_ring()
* Configure one RxDesc ring
*/
static void edma_configure_rxdesc_ring(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
uint32_t data;
edma_reg_write(EDMA_REG_RXDESC_BA(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->dma & 0xffffffff));
data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
data |= (ehw->rx_payload_offset & EDMA_RXDESC_PL_OFFSET_MASK)
<< EDMA_RXDESC_PL_OFFSET_SHIFT;
edma_reg_write(EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->id), data);
data = (EDMA_RX_MOD_TIMER_INIT & EDMA_RX_MOD_TIMER_INIT_MASK)
<< EDMA_RX_MOD_TIMER_INIT_SHIFT;
edma_reg_write(EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id), data);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
edma_reg_write(EDMA_REG_RX_INT_CTRL(rxdesc_ring->id), 0x2);
}
/*
* edma_configure_rxfill_ring()
* Configure one RxFill ring
*/
static void edma_configure_rxfill_ring(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
uint32_t data = 0;
edma_reg_write(EDMA_REG_RXFILL_BA(rxfill_ring->id),
(uint32_t)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
data = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
edma_reg_write(EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->id), data);
/*
* Alloc Rx buffers
*/
edma_alloc_rx_buffer(ehw, rxfill_ring);
}
/*
* edma_configure_rings()
* Configure EDMA rings
*/
static void edma_configure_rings(struct edma_hw *ehw)
{
int i = 0;
/*
* Initialize the store
*/
for (i = 0; i < EDMA_RING_SIZE; i++) {
ehw->tx_skb_store[i] = NULL;
ehw->rx_skb_store[i] = NULL;
}
/*
* Configure TXDESC ring
*/
for (i = 0; i < ehw->txdesc_rings; i++)
edma_configure_txdesc_ring(ehw, &ehw->txdesc_ring[i]);
/*
* Configure TXCMPL ring
*/
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_configure_txcmpl_ring(ehw, &ehw->txcmpl_ring[i]);
/*
* Configure RXFILL rings
*/
for (i = 0; i < ehw->rxfill_rings; i++)
edma_configure_rxfill_ring(ehw, &ehw->rxfill_ring[i]);
/*
* Configure RXDESC ring
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
edma_configure_rxdesc_ring(ehw, &ehw->rxdesc_ring[i]);
}
/*
* edma_hw_reset()
* Reset EDMA Hardware during initialization
*/
int edma_hw_reset(struct edma_hw *ehw)
{
struct reset_control *rst;
struct platform_device *pdev = ehw->pdev;
rst = devm_reset_control_get(&pdev->dev, EDMA_HW_RESET_ID);
if (IS_ERR(rst)) {
pr_warn("DTS Node: %s does not exist\n", EDMA_HW_RESET_ID);
return -EINVAL;
}
reset_control_assert(rst);
udelay(100);
reset_control_deassert(rst);
udelay(100);
pr_info("EDMA HW Reset completed succesfully\n");
return 0;
}
/*
* edma_hw_init()
* EDMA hw init
*/
int edma_hw_init(struct edma_hw *ehw)
{
int ret = 0;
int desc_index;
uint32_t i, data, reg = 0;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
data = edma_reg_read(EDMA_REG_MAS_CTRL);
pr_info("EDMA ver %d hw init\n", data);
/*
* Setup private data structure
*/
ehw->misc_intr_mask = 0x0;
ehw->rxfill_intr_mask = EDMA_RXFILL_INT_MASK;
ehw->rxdesc_intr_mask = EDMA_RXDESC_INT_MASK_PKT_INT;
ehw->txcmpl_intr_mask = EDMA_TX_INT_MASK_PKT_INT |
EDMA_TX_INT_MASK_UGT_INT;
ehw->rx_payload_offset = EDMA_RX_PREHDR_SIZE;
ehw->active = 0;
ehw->edma_initialized = false;
/* Reset EDMA */
ret = edma_hw_reset(ehw);
if (ret)
return ret;
/*
* Disable interrupts
*/
for (i = 0; i < EDMA_MAX_TXCMPL_RINGS; i++)
edma_reg_write(EDMA_REG_TX_INT_MASK(i), 0);
for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++)
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(i), 0);
for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++)
edma_reg_write(EDMA_REG_RX_INT_CTRL(i), 0);
/*
* Disable Rx rings
*/
for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++) {
data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i));
data &= ~EDMA_RXDESC_RX_EN;
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data);
}
/*
* Disable RxFill Rings
*/
for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++) {
data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i));
data &= ~EDMA_RXFILL_RING_EN;
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data);
}
/*
* Disable Tx rings
*/
for (desc_index = 0; desc_index < EDMA_MAX_TXDESC_RINGS; desc_index++) {
data = edma_reg_read(EDMA_REG_TXDESC_CTRL(desc_index));
data &= ~EDMA_TXDESC_TX_EN;
edma_reg_write(EDMA_REG_TXDESC_CTRL(desc_index), data);
}
#if defined(NSS_DP_IPQ807X)
/*
* Clear the TXDESC2CMPL_MAP_xx reg before setting up
* the mapping. This register holds TXDESC to TXFILL ring
* mapping.
*/
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_0, 0);
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_1, 0);
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_2, 0);
desc_index = ehw->txcmpl_ring_start;
/*
* 3 registers to hold the completion mapping for total 24
* TX desc rings (0-9,10-19 and rest). In each entry 3 bits hold
* the mapping for a particular TX desc ring.
*/
for (i = ehw->txdesc_ring_start;
i < ehw->txdesc_ring_end; i++) {
if (i >= 0 && i <= 9)
reg = EDMA_REG_TXDESC2CMPL_MAP_0;
else if (i >= 10 && i <= 19)
reg = EDMA_REG_TXDESC2CMPL_MAP_1;
else
reg = EDMA_REG_TXDESC2CMPL_MAP_2;
pr_debug("Configure TXDESC:%u to use TXCMPL:%u\n",
i, desc_index);
data = edma_reg_read(reg);
data |= (desc_index & 0x7) << ((i % 10) * 3);
edma_reg_write(reg, data);
desc_index++;
if (desc_index == ehw->txcmpl_ring_end)
desc_index = ehw->txcmpl_ring_start;
}
#endif
/*
* Set PPE QID to EDMA Rx ring mapping.
* When coming up use only queue 0.
* HOST EDMA rings. FW EDMA comes up and overwrites as required.
* Each entry can hold mapping for 8 PPE queues and entry size is
* 4 bytes
*/
desc_index = ehw->rxdesc_ring_start;
data = 0;
data |= (desc_index & 0xF);
edma_reg_write(EDMA_QID2RID_TABLE_MEM(0), data);
pr_debug("Configure QID2RID reg:0x%x to 0x%x\n", reg, data);
ret = edma_init_rings(ehw);
if (ret)
return ret;
edma_configure_rings(ehw);
/*
* Set RXDESC2FILL_MAP_xx reg.
* There are two registers RXDESC2FILL_0 and RXDESC2FILL_1
* 3 bits holds the rx fill ring mapping for each of the
* rx descriptor ring.
*/
edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_0, 0);
edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_1, 0);
for (i = ehw->rxdesc_ring_start;
i < ehw->rxdesc_ring_end; i++) {
if ((i >= 0) && (i <= 9))
reg = EDMA_REG_RXDESC2FILL_MAP_0;
else
reg = EDMA_REG_RXDESC2FILL_MAP_1;
rxdesc_ring = &ehw->rxdesc_ring[i - ehw->rxdesc_ring_start];
pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
rxdesc_ring->id, rxdesc_ring->rxfill->id);
data = edma_reg_read(reg);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
edma_reg_write(reg, data);
}
reg = EDMA_REG_RXDESC2FILL_MAP_0;
pr_debug("EDMA_REG_RXDESC2FILL_MAP_0: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_RXDESC2FILL_MAP_1;
pr_debug("EDMA_REG_RXDESC2FILL_MAP_1: 0x%x\n", edma_reg_read(reg));
#if defined(NSS_DP_IPQ807X)
reg = EDMA_REG_TXDESC2CMPL_MAP_0;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_0: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_TXDESC2CMPL_MAP_1;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_1: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_TXDESC2CMPL_MAP_2;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_2: 0x%x\n", edma_reg_read(reg));
#endif
/*
* Configure DMA request priority, DMA read burst length,
* and AXI write size.
*/
data = EDMA_DMAR_BURST_LEN_SET(EDMA_BURST_LEN_ENABLE)
| EDMA_DMAR_REQ_PRI_SET(0)
| EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(31)
| EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(7)
| EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(7);
edma_reg_write(EDMA_REG_DMAR_CTRL, data);
#if defined(NSS_DP_IPQ60XX)
data = edma_reg_read(EDMA_REG_AXIW_CTRL);
data |= EDMA_AXIW_MAX_WR_SIZE_EN;
edma_reg_write(EDMA_REG_AXIW_CTRL, data);
#endif
/*
* Misc error mask
*/
data = EDMA_MISC_AXI_RD_ERR_MASK_EN |
EDMA_MISC_AXI_WR_ERR_MASK_EN |
EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN |
EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN |
EDMA_MISC_TX_SRAM_FULL_MASK_EN |
EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN |
EDMA_MISC_DATA_LEN_ERR_MASK_EN;
#if defined(NSS_DP_IPQ807X)
data |= EDMA_MISC_PKT_LEN_LA_64K_MASK_EN |
EDMA_MISC_PKT_LEN_LE_40_MASK_EN;
#else
data |= EDMA_MISC_TX_TIMEOUT_MASK_EN;
#endif
edma_reg_write(EDMA_REG_MISC_INT_MASK, data);
/*
* Global EDMA enable and padding enable
*/
data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
edma_reg_write(EDMA_REG_PORT_CTRL, data);
/*
* Enable Rx rings
*/
for (i = ehw->rxdesc_ring_start; i < ehw->rxdesc_ring_end; i++) {
data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i));
data |= EDMA_RXDESC_RX_EN;
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data);
}
for (i = ehw->rxfill_ring_start; i < ehw->rxfill_ring_end; i++) {
data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i));
data |= EDMA_RXFILL_RING_EN;
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data);
}
/*
* Enable Tx rings
*/
for (i = ehw->txdesc_ring_start; i < ehw->txdesc_ring_end; i++) {
data = edma_reg_read(EDMA_REG_TXDESC_CTRL(i));
data |= EDMA_TXDESC_TX_EN;
edma_reg_write(EDMA_REG_TXDESC_CTRL(i), data);
}
ehw->edma_initialized = true;
return 0;
}

View File

@@ -0,0 +1,906 @@
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <fal/fal_vsi.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
/*
* EDMA hardware instance
*/
struct edma_hw edma_hw;
/*
* edma_get_port_num_from_netdev()
* Get port number from net device
*/
static int edma_get_port_num_from_netdev(struct net_device *netdev)
{
int i;
for (i = 0; i < EDMA_MAX_GMACS; i++) {
/* In the port-id to netdev mapping table, port-id
* starts from 1 and table index starts from 0.
* So we return index + 1 for port-id
*/
if (edma_hw.netdev_arr[i] == netdev)
return i+1;
}
return -1;
}
/*
* edma_reg_read()
* Read EDMA register
*/
uint32_t edma_reg_read(uint32_t reg_off)
{
return (uint32_t)readl(edma_hw.reg_base + reg_off);
}
/*
* edma_reg_write()
* Write EDMA register
*/
void edma_reg_write(uint32_t reg_off, uint32_t val)
{
writel(val, edma_hw.reg_base + reg_off);
}
/*
* nss_dp_edma_if_open()
* Do slow path data plane open
*/
static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
uint32_t tx_desc_ring, uint32_t rx_desc_ring,
uint32_t mode)
{
if (!dpc->dev)
return NSS_DP_FAILURE;
/*
* Enable NAPI
*/
if (edma_hw.active++ != 0)
return NSS_DP_SUCCESS;
napi_enable(&edma_hw.napi);
return NSS_DP_SUCCESS;
}
/*
* edma_if_close()
* Do slow path data plane close
*/
static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
{
if (--edma_hw.active != 0)
return NSS_DP_SUCCESS;
/*
* Disable NAPI
*/
napi_disable(&edma_hw.napi);
return NSS_DP_SUCCESS;
}
/*
* edma_if_link_state()
*/
static int edma_if_link_state(struct nss_dp_data_plane_ctx *dpc,
uint32_t link_state)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_mac_addr()
*/
static int edma_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_change_mtu()
*/
static int edma_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_xmit()
* Transmit a packet using EDMA
*/
static netdev_tx_t edma_if_xmit(struct nss_dp_data_plane_ctx *dpc,
struct sk_buff *skb)
{
struct net_device *netdev = dpc->dev;
int ret;
uint32_t tx_ring, skbq, nhead, ntail;
bool expand_skb = false;
if (skb->len < ETH_HLEN) {
netdev_dbg(netdev, "skb->len < ETH_HLEN\n");
goto drop;
}
/*
* Select a Tx ring
*/
skbq = skb_get_queue_mapping(skb);
tx_ring = 0;
if ((edma_hw.txdesc_rings > 1) && (skbq > 0))
tx_ring = edma_hw.txdesc_rings % skbq;
/*
* Check for non-linear skb
*/
if (skb_is_nonlinear(skb)) {
netdev_dbg(netdev, "cannot Tx non-linear skb:%px\n", skb);
goto drop;
}
/*
* Check for headroom/tailroom and clone
*/
nhead = netdev->needed_headroom;
ntail = netdev->needed_tailroom;
if (skb_cloned(skb) ||
(skb_headroom(skb) < nhead) ||
(skb_headroom(skb) < ntail)) {
expand_skb = true;
}
/*
* Expand the skb. This also unclones a cloned skb.
*/
if (expand_skb && pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC)) {
netdev_dbg(netdev, "cannot expand skb:%px\n", skb);
goto drop;
}
/*
* Transmit the packet
*/
ret = edma_ring_xmit(&edma_hw, netdev, skb,
&edma_hw.txdesc_ring[tx_ring]);
if (ret == EDMA_TX_OK)
return NETDEV_TX_OK;
/*
* Not enough descriptors. Stop netdev Tx queue.
*/
if (ret == EDMA_TX_DESC) {
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
drop:
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/*
* edma_if_set_features()
* Set the supported net_device features
*/
static void edma_if_set_features(struct nss_dp_data_plane_ctx *dpc)
{
/*
* TODO - add flags to support HIGHMEM/cksum offload VLAN
* the features are enabled.
*/
}
/* TODO - check if this is needed */
/*
* edma_if_pause_on_off()
* Set pause frames on or off
*
* No need to send a message if we defaulted to slow path.
*/
static int edma_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc,
uint32_t pause_on)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_vsi_assign()
* assign vsi of the data plane
*
*/
static int edma_if_vsi_assign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi)
{
struct net_device *netdev = dpc->dev;
int32_t port_num;
port_num = edma_get_port_num_from_netdev(netdev);
if (port_num < 0)
return NSS_DP_FAILURE;
if (fal_port_vsi_set(0, port_num, vsi) < 0)
return NSS_DP_FAILURE;
return NSS_DP_SUCCESS;
}
/*
* edma_if_vsi_unassign()
* unassign vsi of the data plane
*
*/
static int edma_if_vsi_unassign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi)
{
struct net_device *netdev = dpc->dev;
uint32_t port_num;
port_num = edma_get_port_num_from_netdev(netdev);
if (port_num < 0)
return NSS_DP_FAILURE;
if (fal_port_vsi_set(0, port_num, 0xffff) < 0)
return NSS_DP_FAILURE;
return NSS_DP_SUCCESS;
}
#ifdef CONFIG_RFS_ACCEL
/*
* edma_if_rx_flow_steer()
* Flow steer of the data plane
*
* Initial receive flow steering function for data plane operation.
*/
static int edma_if_rx_flow_steer(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb,
uint32_t cpu, bool is_add)
{
return NSS_DP_SUCCESS;
}
#endif
/*
* edma_if_deinit()
* Free edma resources
*/
static int edma_if_deinit(struct nss_dp_data_plane_ctx *dpc)
{
/*
* Free up resources used by EDMA if all the
* interfaces have been overridden
* */
if (edma_hw.dp_override_cnt == EDMA_MAX_GMACS - 1) {
edma_cleanup(true);
} else {
edma_hw.dp_override_cnt++;
}
return NSS_DP_SUCCESS;
}
/*
* edma_irq_init()
* Initialize interrupt handlers for the driver
*/
static int edma_irq_init(void)
{
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
int err;
uint32_t entry_num, i;
/*
* Get TXCMPL rings IRQ numbers
*/
entry_num = 0;
for (i = 0; i < edma_hw.txcmpl_rings; i++, entry_num++) {
edma_hw.txcmpl_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.txcmpl_intr[i] < 0) {
pr_warn("%s: txcmpl_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: txcmpl_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.txcmpl_intr[i]);
}
/*
* Get RXFILL rings IRQ numbers
*/
for (i = 0; i < edma_hw.rxfill_rings; i++, entry_num++) {
edma_hw.rxfill_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.rxfill_intr[i] < 0) {
pr_warn("%s: rxfill_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: rxfill_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.rxfill_intr[i]);
}
/*
* Get RXDESC rings IRQ numbers
*
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++, entry_num++) {
edma_hw.rxdesc_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.rxdesc_intr[i] < 0) {
pr_warn("%s: rxdesc_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: rxdesc_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.rxdesc_intr[i]);
}
/*
* Get misc IRQ number
*/
edma_hw.misc_intr = platform_get_irq(edma_hw.pdev, entry_num);
pr_debug("%s: misc IRQ:%u\n",
(edma_hw.device_node)->name,
edma_hw.misc_intr);
/*
* Request IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
err = request_irq(edma_hw.txcmpl_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_txcmpl", (void *)edma_hw.pdev);
if (err) {
pr_debug("TXCMPL ring IRQ:%d request failed\n",
edma_hw.txcmpl_intr[i]);
return -1;
}
}
/*
* Request IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
err = request_irq(edma_hw.rxfill_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_rxfill", (void *)edma_hw.pdev);
if (err) {
pr_debug("RXFILL ring IRQ:%d request failed\n",
edma_hw.rxfill_intr[i]);
goto rx_fill_ring_intr_req_fail;
}
}
/*
* Request IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
err = request_irq(edma_hw.rxdesc_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_rxdesc", (void *)edma_hw.pdev);
if (err) {
pr_debug("RXDESC ring IRQ:%d request failed\n",
edma_hw.rxdesc_intr[i]);
goto rx_desc_ring_intr_req_fail;
}
}
/*
* Request Misc IRQ
*/
err = request_irq(edma_hw.misc_intr, edma_handle_misc_irq,
IRQF_SHARED, "edma_misc",
(void *)edma_hw.pdev);
if (err) {
pr_debug("MISC IRQ:%d request failed\n",
edma_hw.misc_intr);
goto misc_intr_req_fail;
}
/*
* Set interrupt mask
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
rxfill_ring = &edma_hw.rxfill_ring[i];
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
edma_hw.rxfill_intr_mask);
}
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
edma_hw.txcmpl_intr_mask);
}
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
rxdesc_ring = &edma_hw.rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
edma_hw.rxdesc_intr_mask);
}
edma_reg_write(EDMA_REG_MISC_INT_MASK, edma_hw.misc_intr_mask);
return 0;
misc_intr_req_fail:
/*
* Free IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
synchronize_irq(edma_hw.rxdesc_intr[i]);
free_irq(edma_hw.rxdesc_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
rx_desc_ring_intr_req_fail:
/*
* Free IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
synchronize_irq(edma_hw.rxfill_intr[i]);
free_irq(edma_hw.rxfill_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
rx_fill_ring_intr_req_fail:
/*
* Free IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
synchronize_irq(edma_hw.txcmpl_intr[i]);
free_irq(edma_hw.txcmpl_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
return -1;
}
/*
* edma_register_netdevice()
* Register netdevice with EDMA
*/
static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
{
if (!netdev) {
pr_info("nss_dp_edma: Invalid netdev pointer %px\n", netdev);
return -EINVAL;
}
if ((macid < EDMA_START_GMACS) || (macid > EDMA_MAX_GMACS)) {
netdev_dbg(netdev, "nss_dp_edma: Invalid macid(%d) for %s\n",
macid, netdev->name);
return -EINVAL;
}
netdev_info(netdev, "nss_dp_edma: Registering netdev %s(qcom-id:%d) with EDMA\n",
netdev->name, macid);
/*
* We expect 'macid' to correspond to ports numbers on
* IPQ807x. These begin from '1' and hence we subtract
* one when using it as an array index.
*/
edma_hw.netdev_arr[macid - 1] = netdev;
/*
* NAPI add
*/
if (!edma_hw.napi_added) {
netif_napi_add(netdev, &edma_hw.napi, edma_napi,
EDMA_NAPI_WORK);
/*
* Register the interrupt handlers and enable interrupts
*/
if (edma_irq_init() < 0)
return -EINVAL;
edma_hw.napi_added = 1;
}
return 0;
}
/*
* edma_if_init()
*/
static int edma_if_init(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
int ret = 0;
/*
* Register the netdev
*/
ret = edma_register_netdevice(netdev, dp_dev->macid);
if (ret) {
netdev_dbg(netdev,
"Error registering netdevice with EDMA %s\n",
netdev->name);
return NSS_DP_FAILURE;
}
/*
* Headroom needed for Tx preheader
*/
netdev->needed_headroom += EDMA_TX_PREHDR_SIZE;
return NSS_DP_SUCCESS;
}
/*
* nss_dp_edma_ops
*/
struct nss_dp_data_plane_ops nss_dp_edma_ops = {
.init = edma_if_init,
.open = edma_if_open,
.close = edma_if_close,
.link_state = edma_if_link_state,
.mac_addr = edma_if_mac_addr,
.change_mtu = edma_if_change_mtu,
.xmit = edma_if_xmit,
.set_features = edma_if_set_features,
.pause_on_off = edma_if_pause_on_off,
.vsi_assign = edma_if_vsi_assign,
.vsi_unassign = edma_if_vsi_unassign,
#ifdef CONFIG_RFS_ACCEL
.rx_flow_steer = edma_if_rx_flow_steer,
#endif
.deinit = edma_if_deinit,
};
/*
* edma_of_get_pdata()
* Read the device tree details for EDMA
*/
static int edma_of_get_pdata(struct resource *edma_res)
{
/*
* Find EDMA node in device tree
*/
edma_hw.device_node = of_find_node_by_name(NULL,
EDMA_DEVICE_NODE_NAME);
if (!edma_hw.device_node) {
pr_warn("EDMA device tree node (%s) not found\n",
EDMA_DEVICE_NODE_NAME);
return -EINVAL;
}
/*
* Get EDMA device node
*/
edma_hw.pdev = of_find_device_by_node(edma_hw.device_node);
if (!edma_hw.pdev) {
pr_warn("Platform device for node %px(%s) not found\n",
edma_hw.device_node,
(edma_hw.device_node)->name);
return -EINVAL;
}
/*
* Get EDMA register resource
*/
if (of_address_to_resource(edma_hw.device_node, 0, edma_res) != 0) {
pr_warn("Unable to get register address for edma device: "
EDMA_DEVICE_NODE_NAME"\n");
return -EINVAL;
}
/*
* Get id of first TXDESC ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-ring-start",
&edma_hw.txdesc_ring_start) != 0) {
pr_warn("Read error 1st TXDESC ring (txdesc_ring_start)\n");
return -EINVAL;
}
/*
* Get number of TXDESC rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-rings",
&edma_hw.txdesc_rings) != 0) {
pr_warn("Unable to read number of txdesc rings.\n");
return -EINVAL;
}
edma_hw.txdesc_ring_end = edma_hw.txdesc_ring_start +
edma_hw.txdesc_rings;
/*
* Get id of first TXCMPL ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-ring-start",
&edma_hw.txcmpl_ring_start) != 0) {
pr_warn("Read error 1st TXCMPL ring (txcmpl_ring_start)\n");
return -EINVAL;
}
/*
* Get number of TXCMPL rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-rings",
&edma_hw.txcmpl_rings) != 0) {
pr_warn("Unable to read number of txcmpl rings.\n");
return -EINVAL;
}
edma_hw.txcmpl_ring_end = edma_hw.txcmpl_ring_start +
edma_hw.txcmpl_rings;
/*
* Get id of first RXFILL ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-ring-start",
&edma_hw.rxfill_ring_start) != 0) {
pr_warn("Read error 1st RXFILL ring (rxfill-ring-start)\n");
return -EINVAL;
}
/*
* Get number of RXFILL rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-rings",
&edma_hw.rxfill_rings) != 0) {
pr_warn("Unable to read number of rxfill rings.\n");
return -EINVAL;
}
edma_hw.rxfill_ring_end = edma_hw.rxfill_ring_start +
edma_hw.rxfill_rings;
/*
* Get id of first RXDESC ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-ring-start",
&edma_hw.rxdesc_ring_start) != 0) {
pr_warn("Read error 1st RXDESC ring (rxdesc-ring-start)\n");
return -EINVAL;
}
/*
* Get number of RXDESC rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-rings",
&edma_hw.rxdesc_rings) != 0) {
pr_warn("Unable to read number of rxdesc rings.\n");
return -EINVAL;
}
edma_hw.rxdesc_ring_end = edma_hw.rxdesc_ring_start +
edma_hw.rxdesc_rings;
return 0;
}
/*
* edma_init()
* EDMA init
*/
int edma_init(void)
{
int ret = 0;
struct resource res_edma;
/*
* Get all the DTS data needed
*/
if (edma_of_get_pdata(&res_edma) < 0) {
pr_warn("Unable to get EDMA DTS data.\n");
return -EINVAL;
}
/*
* Request memory region for EDMA registers
*/
edma_hw.reg_resource = request_mem_region(res_edma.start,
resource_size(&res_edma),
EDMA_DEVICE_NODE_NAME);
if (!edma_hw.reg_resource) {
pr_warn("Unable to request EDMA register memory.\n");
return -EFAULT;
}
/*
* Remap register resource
*/
edma_hw.reg_base = ioremap_nocache((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
if (!edma_hw.reg_base) {
pr_warn("Unable to remap EDMA register memory.\n");
ret = -EFAULT;
goto edma_init_remap_fail;
}
if (edma_hw_init(&edma_hw) != 0) {
ret = -EFAULT;
goto edma_init_hw_init_fail;
}
platform_set_drvdata(edma_hw.pdev, (void *)&edma_hw);
edma_hw.napi_added = 0;
return 0;
edma_init_hw_init_fail:
iounmap(edma_hw.reg_base);
edma_init_remap_fail:
release_mem_region((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
return ret;
}
/*
* edma_disable_port()
* EDMA disable port
*/
static void edma_disable_port(void)
{
edma_reg_write(EDMA_REG_PORT_CTRL, EDMA_DISABLE);
}
/*
* edma_cleanup()
* EDMA cleanup
*/
void edma_cleanup(bool is_dp_override)
{
int i;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
/*
* The cleanup can happen from data plane override
* or from module_exit, we want to cleanup only once
*/
if (!edma_hw.edma_initialized) {
/*
* Disable EDMA only at module exit time, since NSS firmware
* depends on this setting.
*/
if (!is_dp_override) {
edma_disable_port();
}
return;
}
/*
* Disable Rx rings used by this driver
*/
for (i = edma_hw.rxdesc_ring_start; i < edma_hw.rxdesc_ring_end; i++)
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), EDMA_RING_DISABLE);
/*
* Disable Tx rings used by this driver
*/
for (i = edma_hw.txdesc_ring_start; i < edma_hw.txdesc_ring_end; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TXDESC_CTRL(i),
EDMA_RING_DISABLE);
}
/*
* Disable RxFill Rings used by this driver
*/
for (i = edma_hw.rxfill_ring_start; i < edma_hw.rxfill_ring_end; i++)
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), EDMA_RING_DISABLE);
/*
* Clear interrupt mask
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
rxdesc_ring = &edma_hw.rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_CLEAR);
}
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
EDMA_MASK_INT_CLEAR);
}
edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_CLEAR);
/*
* Remove interrupt handlers and NAPI
*/
if (edma_hw.napi_added) {
/*
* Free IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
synchronize_irq(edma_hw.txcmpl_intr[i]);
free_irq(edma_hw.txcmpl_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
synchronize_irq(edma_hw.rxfill_intr[i]);
free_irq(edma_hw.rxfill_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
synchronize_irq(edma_hw.rxdesc_intr[i]);
free_irq(edma_hw.rxdesc_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free Misc IRQ
*/
synchronize_irq(edma_hw.misc_intr);
free_irq(edma_hw.misc_intr, (void *)(edma_hw.pdev));
netif_napi_del(&edma_hw.napi);
edma_hw.napi_added = 0;
}
/*
* Disable EDMA only at module exit time, since NSS firmware
* depends on this setting.
*/
if (!is_dp_override) {
edma_disable_port();
}
/*
* cleanup rings and free
*/
edma_cleanup_rings(&edma_hw);
iounmap(edma_hw.reg_base);
release_mem_region((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
/*
* Mark initialize false, so that we do not
* try to cleanup again
*/
edma_hw.edma_initialized = false;
}

View File

@@ -0,0 +1,287 @@
/*
**************************************************************************
* Copyright (c) 2016, 2018-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include "nss_dp_dev.h"
#ifndef __NSS_DP_EDMA_DATAPLANE__
#define __NSS_DP_EDMA_DATAPLANE__
#define EDMA_BUF_SIZE 2000
#define EDMA_DEVICE_NODE_NAME "edma"
#define EDMA_RX_BUFF_SIZE (EDMA_BUF_SIZE + EDMA_RX_PREHDR_SIZE)
#define EDMA_RX_PREHDR_SIZE (sizeof(struct edma_rx_preheader))
#define EDMA_TX_PREHDR_SIZE (sizeof(struct edma_tx_preheader))
#define EDMA_RING_SIZE 128
#define EDMA_NAPI_WORK 100
#define EDMA_START_GMACS NSS_DP_START_IFNUM
#define EDMA_MAX_GMACS NSS_DP_HAL_MAX_PORTS
#define EDMA_TX_PKT_MIN_SIZE 33
#if defined(NSS_DP_IPQ60XX)
#define EDMA_MAX_TXCMPL_RINGS 24 /* Max TxCmpl rings */
#else
#define EDMA_MAX_TXCMPL_RINGS 8 /* Max TxCmpl rings */
#endif
#define EDMA_MAX_RXDESC_RINGS 16 /* Max RxDesc rings */
#define EDMA_MAX_RXFILL_RINGS 8 /* Max RxFill rings */
#define EDMA_MAX_TXDESC_RINGS 24 /* Max TxDesc rings */
#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxfill_desc)
#define EDMA_RXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxdesc_desc)
#define EDMA_TXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_txdesc_desc)
#define EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph)->src_info >> 8) & 0xf0)
#define EDMA_RXPH_SERVICE_CODE_GET(rxph) (((rxph)->rx_pre4) & 0xff)
/*
* Tx descriptor
*/
struct edma_txdesc_desc {
uint32_t buffer_addr;
/* buffer address */
uint32_t word1;
/* more bit, TSO, preheader, pool, offset and length */
};
/*
* TxCmpl descriptor
*/
struct edma_txcmpl_desc {
uint32_t buffer_addr; /* buffer address/opaque */
uint32_t status; /* status */
};
/*
* Rx descriptor
*/
struct edma_rxdesc_desc {
uint32_t buffer_addr; /* buffer address */
uint32_t status; /* status */
};
/*
* RxFill descriptor
*/
struct edma_rxfill_desc {
uint32_t buffer_addr; /* Buffer address */
uint32_t word1; /* opaque_ind and buffer size */
};
/*
* Tx descriptor ring
*/
struct edma_txdesc_ring {
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
spinlock_t tx_lock; /* Tx ring lock */
uint16_t count; /* number of descriptors */
};
/*
* TxCmpl ring
*/
struct edma_txcmpl_ring {
uint32_t id; /* TXCMPL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* RxFill ring
*/
struct edma_rxfill_ring {
uint32_t id; /* RXFILL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
spinlock_t lock; /* Rx ring lock */
uint16_t count; /* number of descriptors in the ring */
};
/*
* RxDesc ring
*/
struct edma_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
struct edma_rxfill_ring *rxfill; /* RXFILL ring used */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* EDMA Tx Preheader
*/
struct edma_tx_preheader {
uint32_t opaque; /* Opaque, contains skb pointer */
uint16_t src_info; /* Src information */
uint16_t dst_info; /* Dest information */
uint32_t tx_pre2; /* SVLAN & CVLAN flag, drop prec, hash value */
uint32_t tx_pre3; /* STAG, CTAG */
uint32_t tx_pre4; /* CPU code, L3 & L4 offset, service code */
uint32_t tx_pre5; /* IP addr index, ACL index */
uint32_t tx_pre6; /* IP payload checksum, copy2cpu, timestamp, dscp */
uint32_t tx_pre7; /* Timestamp, QoS TAG */
};
/*
* EDMA Rx Preheader
*/
struct edma_rx_preheader {
uint32_t opaque;
/* Opaque, contains skb pointer*/
uint16_t src_info;
/* Src information */
uint16_t dst_info;
/* Dest information */
uint32_t rx_pre2;
/* SVLAN & CVLAN flag, drop prec, hash value */
uint32_t rx_pre3;
/* STAG, CTAG */
uint32_t rx_pre4;
/* CPU code, L3 & L4 offset, service code */
uint32_t rx_pre5;
/* IP addr index, ACL index */
uint32_t rx_pre6;
/* IP payload checksum, copy2cpu, timestamp, dscp */
uint32_t rx_pre7;
/* Timestamp, QoS TAG */
};
enum edma_tx {
EDMA_TX_OK = 0, /* Tx success */
EDMA_TX_DESC = 1, /* Not enough descriptors */
EDMA_TX_FAIL = 2, /* Tx failure */
};
/*
* EDMA private data structure
*/
struct edma_hw {
struct napi_struct napi;
/* napi structure */
struct net_device *netdev_arr[EDMA_MAX_GMACS];
/* netdev for each gmac port */
struct device_node *device_node;
/* Device tree node */
struct platform_device *pdev;
/* Platform device */
void __iomem *reg_base;
/* Base register address */
struct resource *reg_resource;
/* Memory resource */
uint16_t rx_payload_offset;
/* start of the payload offset */
uint32_t flags;
/* internal flags */
int active;
/* status */
int napi_added;
/* flag to indicate napi add status */
/*
* Debugfs entries
*/
struct dentry *edma_dentry;
struct dentry *txdesc_dentry;
struct dentry *txcmpl_dentry;
struct dentry *rxdesc_dentry;
/*
* Store for tx and rx skbs
*/
struct sk_buff *rx_skb_store[EDMA_RING_SIZE];
struct sk_buff *tx_skb_store[EDMA_RING_SIZE];
struct edma_rxfill_ring *rxfill_ring;
/* Rx Fill Ring, SW is producer */
struct edma_rxdesc_ring *rxdesc_ring;
/* Rx Descriptor Ring, SW is consumer */
struct edma_txdesc_ring *txdesc_ring;
/* Tx Descriptor Ring, SW is producer */
struct edma_txcmpl_ring *txcmpl_ring;
/* Tx Completion Ring, SW is consumer */
uint32_t txdesc_rings;
/* Number of TxDesc rings */
uint32_t txdesc_ring_start;
/* Id of first TXDESC ring */
uint32_t txdesc_ring_end;
/* Id of the last TXDESC ring */
uint32_t txcmpl_rings;
/* Number of TxCmpl rings */
uint32_t txcmpl_ring_start;
/* Id of first TXCMPL ring */
uint32_t txcmpl_ring_end;
/* Id of last TXCMPL ring */
uint32_t rxfill_rings;
/* Number of RxFill rings */
uint32_t rxfill_ring_start;
/* Id of first RxFill ring */
uint32_t rxfill_ring_end;
/* Id of last RxFill ring */
uint32_t rxdesc_rings;
/* Number of RxDesc rings */
uint32_t rxdesc_ring_start;
/* Id of first RxDesc ring */
uint32_t rxdesc_ring_end;
/* Id of last RxDesc ring */
uint32_t txcmpl_intr[EDMA_MAX_TXCMPL_RINGS];
/* TxCmpl ring IRQ numbers */
uint32_t rxfill_intr[EDMA_MAX_RXFILL_RINGS];
/* Rx fill ring IRQ numbers */
uint32_t rxdesc_intr[EDMA_MAX_RXDESC_RINGS];
/* Rx desc ring IRQ numbers */
uint32_t misc_intr;
/* Misc IRQ number */
uint32_t tx_intr_mask;
/* Tx interrupt mask */
uint32_t rxfill_intr_mask;
/* Rx fill ring interrupt mask */
uint32_t rxdesc_intr_mask;
/* Rx Desc ring interrupt mask */
uint32_t txcmpl_intr_mask;
/* Tx Cmpl ring interrupt mask */
uint32_t misc_intr_mask;
/* misc interrupt interrupt mask */
uint32_t dp_override_cnt;
/* number of interfaces overriden */
bool edma_initialized;
/* flag to check initialization status */
};
extern struct edma_hw edma_hw;
uint32_t edma_reg_read(uint32_t reg_off);
void edma_reg_write(uint32_t reg_off, uint32_t val);
int edma_alloc_rx_buffer(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring);
enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
struct net_device *netdev,
struct sk_buff *skb,
struct edma_txdesc_ring *txdesc_ring);
uint32_t edma_clean_tx(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring);
irqreturn_t edma_handle_irq(int irq, void *ctx);
irqreturn_t edma_handle_misc_irq(int irq, void *ctx);
int edma_napi(struct napi_struct *napi, int budget);
void edma_cleanup_rings(struct edma_hw *ehw);
void edma_cleanup(bool is_dp_override);
int edma_hw_init(struct edma_hw *ehw);
#endif /* __NSS_DP_EDMA_DATAPLANE__ */

View File

@@ -0,0 +1,454 @@
/*
**************************************************************************
* Copyright (c) 2016,2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __EDMA_REGS__
#define __EDMA_REGS__
/*
* IPQ807x EDMA register offsets
*/
#define EDMA_REG_MAS_CTRL 0x0
#define EDMA_REG_PORT_CTRL 0x4
#define EDMA_REG_VLAN_CTRL 0x8
#define EDMA_REG_RXDESC2FILL_MAP_0 0x18
#define EDMA_REG_RXDESC2FILL_MAP_1 0x1c
#define EDMA_REG_TXQ_CTRL 0x20
#define EDMA_REG_TXQ_CTRL_2 0x24
#define EDMA_REG_TXQ_FC_0 0x28
#define EDMA_REG_TXQ_FC_1 0x30
#define EDMA_REG_TXQ_FC_2 0x34
#define EDMA_REG_TXQ_FC_3 0x38
#define EDMA_REG_RXQ_CTRL 0x3c
#define EDMA_REG_RX_TX_FULL_QID 0x40
#define EDMA_REG_RXQ_FC_THRE 0x44
#define EDMA_REG_DMAR_CTRL 0x48
#define EDMA_REG_AXIR_CTRL 0x4c
#define EDMA_REG_AXIW_CTRL 0x50
#define EDMA_REG_MIN_MSS 0x54
#define EDMA_REG_LOOPBACK_CTRL 0x58
#define EDMA_REG_MISC_INT_STAT 0x5c
#define EDMA_REG_MISC_INT_MASK 0x60
#define EDMA_REG_DBG_CTRL 0x64
#define EDMA_REG_DBG_DATA 0x68
#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * n))
#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * n))
#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * n))
#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * n))
#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * n))
#if defined(NSS_DP_IPQ807X)
#define EDMA_REG_TXDESC2CMPL_MAP_0 0xc
#define EDMA_REG_TXDESC2CMPL_MAP_1 0x10
#define EDMA_REG_TXDESC2CMPL_MAP_2 0x14
#define EDMA_REG_TXCMPL_BASE 0x19000
#define EDMA_REG_TX_BASE 0x21000
#else
#define EDMA_REG_TXCMPL_BASE 0x79000
#define EDMA_REG_TX_BASE 0x91000
#endif
#define EDMA_REG_TXCMPL_BA_OFFSET 0x00000
#define EDMA_REG_TXCMPL_PROD_IDX_OFFSET 0x00004
#define EDMA_REG_TXCMPL_CONS_IDX_OFFSET 0x00008
#define EDMA_REG_TXCMPL_RING_SIZE_OFFSET 0x0000c
#define EDMA_REG_TXCMPL_UGT_THRE_OFFSET 0x00010
#define EDMA_REG_TXCMPL_CTRL_OFFSET 0x00014
#define EDMA_REG_TXCMPL_BPC_OFFSET 0x00018
#define EDMA_REG_TX_INT_STAT_OFFSET 0x00000
#define EDMA_REG_TX_INT_MASK_OFFSET 0x00004
#define EDMA_REG_TX_MOD_TIMER_OFFSET 0x00008
#define EDMA_REG_TX_INT_CTRL_OFFSET 0x0000c
#define EDMA_REG_TXCMPL_BA(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BA_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_PROD_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_PROD_IDX_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_CONS_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CONS_IDX_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_RING_SIZE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_RING_SIZE_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_UGT_THRE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_UGT_THRE_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_CTRL(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CTRL_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_BPC(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BPC_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_STAT(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_STAT_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_MASK(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_MASK_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_MOD_TIMER(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_MOD_TIMER_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_CTRL(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_CTRL_OFFSET + (0x1000 * n))
#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * n))
#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * n))
#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * n))
#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * n))
#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * n))
#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * n))
#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * n))
#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * n))
#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * n))
#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * n))
#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * n))
#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * n))
#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * n))
#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * n))
#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * n))
#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * n))
#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * n))
#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * n))
#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * n))
#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * n))
#define EDMA_REG_RXDESC_INT_STAT(n) (0x49000 + (0x1000 * n))
#define EDMA_REG_RXDESC_INT_MASK(n) (0x49004 + (0x1000 * n))
#define EDMA_REG_RX_MOD_TIMER(n) (0x49008 + (0x1000 * n))
#define EDMA_REG_RX_INT_CTRL(n) (0x4900c + (0x1000 * n))
#define EDMA_QID2RID_TABLE_MEM(q) (0x5a000 + (0x4 * q))
#define EDMA_REG_RXRING_PC(n) (0x5A200 + (0x10 * n))
#define EDMA_REG_RXRING_BC_0(n) (0x5A204 + (0x10 * n))
#define EDMA_REG_RXRING_BC_1(n) (0x5A208 + (0x10 * n))
#define EDMA_REG_TXRING_PC(n) (0x74000 + (0x10 * n))
#define EDMA_REG_TXRING_BC_0(n) (0x74004 + (0x10 * n))
#define EDMA_REG_TXRING_BC_1(n) (0x74008 + (0x10 * n))
/*
* EDMA_REG_PORT_CTRL register
*/
#define EDMA_PORT_PAD_EN 0x1
#define EDMA_PORT_EDMA_EN 0x2
/*
* EDMA_REG_TXQ_CTRL register
*/
#define EDMA_TXDESC_PF_THRE_MASK 0xf
#define EDMA_TXDESC_PF_THRE_SHIFT 0
#define EDMA_TXCMPL_WB_THRE_MASK 0xf
#define EDMA_TXCMPL_WB_THRE_SHIFT 4
#define EDMA_TXDESC_PKT_SRAM_THRE_MASK 0xff
#define EDMA_TXDESC_PKT_SRAM_THRE_SHIFT 8
#define EDMA_TXCMPL_WB_TIMER_MASK 0xffff
#define EDMA_TXCMPL_WB_TIMER_SHIFT 16
/*
* EDMA_REG_RXQ_CTRL register
*/
#define EDMA_RXFILL_PF_THRE_MASK 0xf
#define EDMA_RXFILL_PF_THRE_SHIFT 0
#define EDMA_RXDESC_WB_THRE_MASK 0xf
#define EDMA_RXDESC_WB_THRE_SHIFT 4
#define EDMA_RXDESC_WB_TIMER_MASK 0xffff
#define EDMA_RXDESC_WB_TIMER_SHIFT 16
/*
* EDMA_REG_RX_TX_FULL_QID register
*/
#define EDMA_RX_DESC_FULL_QID_MASK 0xff
#define EDMA_RX_DESC_FULL_QID_SHIFT 0
#define EDMA_TX_CMPL_BUF_FULL_QID_MASK 0xff
#define EDMA_TX_CMPL_BUF_FULL_QID_SHIFT 8
#define EDMA_TX_SRAM_FULL_QID_MASK 0x1f
#define EDMA_TX_SRAM_FULL_QID_SHIFT 16
/*
* EDMA_REG_RXQ_FC_THRE reister
*/
#define EDMA_RXFILL_FIFO_XOFF_THRE_MASK 0x1f
#define EDMA_RXFILL_FIFO_XOFF_THRE_SHIFT 0
#define EDMA_DESC_FIFO_XOFF_THRE_MASK 0x3f
#define EDMA_DESC_FIFO_XOFF_THRE_SHIFT 16
/*
* EDMA_REG_DMAR_CTRL register
*/
#define EDMA_DMAR_REQ_PRI_MASK 0x7
#define EDMA_DMAR_REQ_PRI_SHIFT 0
#define EDMA_DMAR_BURST_LEN_MASK 0x1
#define EDMA_DMAR_BURST_LEN_SHIFT 3
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK 0x1f
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT 4
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK 0x7
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT 9
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK 0x7
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT 12
#define EDMA_DMAR_REQ_PRI_SET(x) (((x) & EDMA_DMAR_REQ_PRI_MASK) << EDMA_DMAR_REQ_PRI_SHIFT)
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK) << EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_BURST_LEN_SET(x) (((x) & EDMA_DMAR_BURST_LEN_MASK) << EDMA_DMAR_BURST_LEN_SHIFT)
/*
* Enable 128 byte EDMA burts for IPQ60xx
*/
#if defined(NSS_DP_IPQ60XX)
#define EDMA_BURST_LEN_ENABLE 1
#else
#define EDMA_BURST_LEN_ENABLE 0
#endif
/*
* EDMA_REG_AXIW_CTRL_REG
*/
#define EDMA_AXIW_MAX_WR_SIZE_EN 0x400
/*
* EDMA DISABLE
*/
#define EDMA_DISABLE 0
/*
* EDMA_REG_TXDESC_PROD_IDX register
*/
#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_TXDESC_CONS_IDX register
*/
#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_TXDESC_RING_SIZE register
*/
#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
/*
* EDMA_REG_TXDESC_CTRL register
*/
#define EDMA_TXDESC_ARB_GRP_ID_MASK 0x3
#define EDMA_TXDESC_ARB_GRP_ID_SHIFT 4
#define EDMA_TXDESC_FC_GRP_ID_MASK 0x7
#define EDMA_TXDESC_FC_GRP_ID_SHIFT 1
#define EDMA_TXDESC_TX_EN 0x1
/*
* EDMA_REG_TXCMPL_PROD_IDX register
*/
#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_TXCMPL_CONS_IDX register
*/
#define EDMA_TXCMPL_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_TXCMPL_RING_SIZE register
*/
#define EDMA_TXCMPL_RING_SIZE_MASK 0xffff
/*
* EDMA_REG_TXCMPL_UGT_THRE register
*/
#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
#define EDMA_TXCMPL_FC_THRE_MASK 0x3f
#define EDMA_TXCMPL_FC_THRE_SHIFT 16
/*
* EDMA_REG_TXCMPL_CTRL register
*/
#define EDMA_TXCMPL_RET_MODE_BUFF_ADDR 0x0
#define EDMA_TXCMPL_RET_MODE_OPAQUE 0x1
/*
* EDMA_REG_TX_MOD_TIMER register
*/
#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA_REG_TX_INT_CTRL register
*/
#define EDMA_TX_INT_MASK 0x3
/*
* EDMA_REG_RXFILL_PROD_IDX register
*/
#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_RXFILL_CONS_IDX register
*/
#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_RXFILL_RING_SIZE register
*/
#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
#define EDMA_RXFILL_BUF_SIZE_MASK 0x3fff
#define EDMA_RXFILL_BUF_SIZE_SHIFT 16
/*
* EDMA_REG_RXFILL_FC_THRE register
*/
#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
/*
* EDMA_REG_RXFILL_UGT_THRE register
*/
#define EDMA_RXFILL_LOW_THRE_MASK 0xffff
#define EDMA_RXFILL_LOW_THRE_SHIFT 0
/*
* EDMA_REG_RXFILL_RING_EN register
*/
#define EDMA_RXFILL_RING_EN 0x1
/*
* EDMA_REG_RXFILL_INT_MASK register
*/
#define EDMA_RXFILL_INT_MASK 0x1
/*
* EDMA_REG_RXDESC_PROD_IDX register
*/
#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_RXDESC_CONS_IDX register
*/
#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_RXDESC_RING_SIZE register
*/
#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
/*
* EDMA_REG_RXDESC_FC_THRE register
*/
#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
/*
* EDMA_REG_RXDESC_UGT_THRE register
*/
#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
#define EDMA_RXDESC_LOW_THRE_SHIFT 0
/*
* EDMA_REG_RXDESC_CTRL register
*/
#define EDMA_RXDESC_STAG_REMOVE_EN 0x8
#define EDMA_RXDESC_CTAG_REMOVE_EN 0x4
#define EDMA_RXDESC_QDISC_EN 0x2
#define EDMA_RXDESC_RX_EN 0x1
/*
* EDMA_REG_TX_INT_MASK register
*/
#define EDMA_TX_INT_MASK_PKT_INT 0x1
#define EDMA_TX_INT_MASK_UGT_INT 0x2
/*
* EDMA_REG_RXDESC_INT_STAT register
*/
#define EDMA_RXDESC_INT_STAT_PKT_INT 0x1
#define EDMA_RXDESC_INT_STAT_UGT_INT 0x2
/*
* EDMA_REG_RXDESC_INT_MASK register
*/
#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
#define EDMA_RXDESC_INT_MASK_TIMER_INT_DIS 0x2
#define EDMA_MASK_INT_DISABLE 0x0
#define EDMA_MASK_INT_CLEAR 0x0
/*
* EDMA_REG_RX_MOD_TIMER register
*/
#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA QID2RID register sizes
*/
#define EDMA_QID2RID_DEPTH 0x40
#define EDMA_QID2RID_QUEUES_PER_ENTRY 8
/*
* TXDESC shift values
*/
#define EDMA_TXDESC_MORE_SHIFT 31
#define EDMA_TXDESC_TSO_EN_SHIFT 30
#define EDMA_TXDESC_PREHEADER_SHIFT 29
#define EDMA_TXDESC_POOL_ID_SHIFT 24
#define EDMA_TXDESC_POOL_ID_MASK 0x1f
#define EDMA_TXDESC_DATA_OFFSET_SHIFT 16
#define EDMA_TXDESC_DATA_OFFSET_MASK 0xff
#define EDMA_TXDESC_DATA_LENGTH_SHIFT 0
#define EDMA_TXDESC_DATA_LENGTH_MASK 0xffff
#define EDMA_PREHDR_DSTINFO_PORTID_IND 0x20
#define EDMA_PREHDR_PORTNUM_BITS 0x0fff
#define EDMA_RING_DMA_MASK 0xffffffff
/*
* RXDESC shift values
*/
#define EDMA_RXDESC_RX_RXFILL_CNT_MASK 0x000f
#define EDMA_RXDESC_RX_RXFILL_CNT_SHIFT 16
#define EDMA_RXDESC_PKT_SIZE_MASK 0x3fff
#define EDMA_RXDESC_PKT_SIZE_SHIFT 0
#define EDMA_RXDESC_RXD_VALID_MASK 0x1
#define EDMA_RXDESC_RXD_VALID_SHIFT 31
#define EDMA_RXDESC_PACKET_LEN_MASK 0x3fff
#define EDMA_RXDESC_RING_INT_STATUS_MASK 0x3
#define EDMA_RING_DISABLE 0
#define EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define EDMA_RXFILL_RING_INT_STATUS_MASK 0x1
/*
* TODO tune the timer and threshold values
*/
#define EDMA_RXFILL_FIFO_XOFF_THRE 0x3
#define EDMA_RXFILL_PF_THRE 0x3
#define EDMA_RXDESC_WB_THRE 0x0
#define EDMA_RXDESC_WB_TIMER 0x2
#define EDMA_RXDESC_XON_THRE 50
#define EDMA_RXDESC_XOFF_THRE 30
#define EDMA_RXDESC_LOW_THRE 0
#define EDMA_RX_MOD_TIMER_INIT 1000
#define EDMA_TXDESC_PF_THRE 0x3
#define EDMA_TXCMPL_WB_THRE 0X0
#define EDMA_TXDESC_PKT_SRAM_THRE 0x20
#define EDMA_TXCMPL_WB_TIMER 0x2
#define EDMA_TX_MOD_TIMER 150
/*
* EDMA misc error mask
*/
#define EDMA_MISC_AXI_RD_ERR_MASK_EN 0x1
#define EDMA_MISC_AXI_WR_ERR_MASK_EN 0x2
#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN 0x4
#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN 0x8
#define EDMA_MISC_TX_SRAM_FULL_MASK_EN 0x10
#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN 0x20
#if defined(NSS_DP_IPQ807X)
#define EDMA_MISC_PKT_LEN_LA_64K_MASK_EN 0x40
#define EDMA_MISC_PKT_LEN_LE_40_MASK_EN 0x80
#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x100
#else
#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x40
#define EDMA_MISC_TX_TIMEOUT_MASK_EN 0x80
#endif
#endif /* __EDMA_REGS__ */

View File

@@ -0,0 +1,773 @@
/*
* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
/*
* edma_alloc_rx_buffer()
* Alloc Rx buffers for one RxFill ring
*/
int edma_alloc_rx_buffer(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
uint16_t num_alloc = 0;
uint16_t cons, next, counter;
struct edma_rxfill_desc *rxfill_desc;
uint32_t reg_data = 0;
uint32_t store_index = 0;
struct edma_rx_preheader *rxph = NULL;
/*
* Read RXFILL ring producer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id));
next = reg_data & EDMA_RXFILL_PROD_IDX_MASK & (rxfill_ring->count - 1);
/*
* Read RXFILL ring consumer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id));
cons = reg_data & EDMA_RXFILL_CONS_IDX_MASK;
while (1) {
counter = next;
if (++counter == rxfill_ring->count)
counter = 0;
if (counter == cons)
break;
/*
* Allocate buffer
*/
skb = dev_alloc_skb(EDMA_RX_BUFF_SIZE);
if (unlikely(!skb))
break;
/*
* Get RXFILL descriptor
*/
rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, next);
/*
* Make room for Rx preheader
*/
rxph = (struct edma_rx_preheader *)
skb_push(skb, EDMA_RX_PREHDR_SIZE);
/*
* Store the skb in the rx store
*/
store_index = next;
if (ehw->rx_skb_store[store_index] != NULL) {
dev_kfree_skb_any(skb);
break;
}
ehw->rx_skb_store[store_index] = skb;
memcpy((uint8_t *)&rxph->opaque, (uint8_t *)&store_index, 4);
/*
* Save buffer size in RXFILL descriptor
*/
rxfill_desc->word1 = cpu_to_le32(EDMA_RX_BUFF_SIZE
& EDMA_RXFILL_BUF_SIZE_MASK);
/*
* Map Rx buffer for DMA
*/
rxfill_desc->buffer_addr = cpu_to_le32(dma_map_single(
&pdev->dev,
skb->data,
EDMA_RX_BUFF_SIZE,
DMA_FROM_DEVICE));
if (!rxfill_desc->buffer_addr) {
dev_kfree_skb_any(skb);
ehw->rx_skb_store[store_index] = NULL;
break;
}
num_alloc++;
next = counter;
}
if (num_alloc) {
/*
* Update RXFILL ring producer index
*/
reg_data = next & EDMA_RXFILL_PROD_IDX_MASK;
/*
* make sure the producer index updated before
* updating the hardware
*/
wmb();
edma_reg_write(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id),
reg_data);
}
return num_alloc;
}
/*
* edma_clean_tx()
* Reap Tx descriptors
*/
uint32_t edma_clean_tx(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
struct edma_txcmpl_desc *txcmpl = NULL;
uint16_t prod_idx = 0;
uint16_t cons_idx = 0;
uint32_t data = 0;
uint32_t txcmpl_consumed = 0;
struct sk_buff *skb;
uint32_t len;
int store_index;
dma_addr_t daddr;
/*
* Get TXCMPL ring producer index
*/
data = edma_reg_read(EDMA_REG_TXCMPL_PROD_IDX(txcmpl_ring->id));
prod_idx = data & EDMA_TXCMPL_PROD_IDX_MASK;
/*
* Get TXCMPL ring consumer index
*/
data = edma_reg_read(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id));
cons_idx = data & EDMA_TXCMPL_CONS_IDX_MASK;
while (cons_idx != prod_idx) {
txcmpl = &(((struct edma_txcmpl_desc *)
(txcmpl_ring->desc))[cons_idx]);
/*
* skb for this is stored in tx store and
* tx header contains the index in the field
* buffer address (opaque) of txcmpl
*/
store_index = txcmpl->buffer_addr;
skb = ehw->tx_skb_store[store_index];
ehw->tx_skb_store[store_index] = NULL;
if (unlikely(!skb)) {
pr_warn("Invalid skb: cons_idx:%u prod_idx:%u status %x\n",
cons_idx, prod_idx, txcmpl->status);
goto next_txcmpl_desc;
}
len = skb_headlen(skb);
daddr = (dma_addr_t)virt_to_phys(skb->data);
pr_debug("skb:%px cons_idx:%d prod_idx:%d word1:0x%x\n",
skb, cons_idx, prod_idx, txcmpl->status);
dma_unmap_single(&pdev->dev, daddr,
len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
next_txcmpl_desc:
if (++cons_idx == txcmpl_ring->count)
cons_idx = 0;
txcmpl_consumed++;
}
if (txcmpl_consumed == 0)
return 0;
pr_debug("TXCMPL:%u txcmpl_consumed:%u prod_idx:%u cons_idx:%u\n",
txcmpl_ring->id, txcmpl_consumed, prod_idx, cons_idx);
/*
* Update TXCMPL ring consumer index
*/
wmb();
edma_reg_write(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id), cons_idx);
return txcmpl_consumed;
}
/*
* nss_phy_tstamp_rx_buf()
* Receive timestamp packet
*/
void nss_phy_tstamp_rx_buf(__attribute__((unused))void *app_data, struct sk_buff *skb)
{
struct net_device *ndev = skb->dev;
/*
* The PTP_CLASS_ value 0 is passed to phy driver, which will be
* set to the correct PTP class value by calling ptp_classify_raw
* in drv->rxtstamp function.
*/
if (ndev && ndev->phydev && ndev->phydev->drv &&
ndev->phydev->drv->rxtstamp)
if(ndev->phydev->drv->rxtstamp(ndev->phydev, skb, 0))
return;
netif_receive_skb(skb);
}
EXPORT_SYMBOL(nss_phy_tstamp_rx_buf);
/*
* nss_phy_tstamp_tx_buf()
* Transmit timestamp packet
*/
void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb)
{
/*
* Function drv->txtstamp will create a clone of skb if necessary,
* the PTP_CLASS_ value 0 is passed to phy driver, which will be
* set to the correct PTP class value by calling ptp_classify_raw
* in the drv->txtstamp function.
*/
if (ndev && ndev->phydev && ndev->phydev->drv &&
ndev->phydev->drv->txtstamp)
ndev->phydev->drv->txtstamp(ndev->phydev, skb, 0);
}
EXPORT_SYMBOL(nss_phy_tstamp_tx_buf);
/*
* edma_clean_rx()
* Reap Rx descriptors
*/
static uint32_t edma_clean_rx(struct edma_hw *ehw,
int work_to_do,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct net_device *ndev;
struct sk_buff *skb = NULL;
struct edma_rxdesc_desc *rxdesc_desc;
struct edma_rx_preheader *rxph = NULL;
uint16_t prod_idx = 0;
int src_port_num = 0;
int pkt_length = 0;
uint16_t cons_idx = 0;
uint32_t work_done = 0;
int store_index;
/*
* Read Rx ring consumer index
*/
cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id))
& EDMA_RXDESC_CONS_IDX_MASK;
while (1) {
/*
* Read Rx ring producer index
*/
prod_idx = edma_reg_read(
EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id))
& EDMA_RXDESC_PROD_IDX_MASK;
if (cons_idx == prod_idx)
break;
if (work_done >= work_to_do)
break;
rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx);
/*
* Get Rx preheader
*/
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxdesc_desc->buffer_addr);
/*
* DMA unmap Rx buffer
*/
dma_unmap_single(&pdev->dev,
rxdesc_desc->buffer_addr,
EDMA_RX_BUFF_SIZE,
DMA_FROM_DEVICE);
store_index = rxph->opaque;
skb = ehw->rx_skb_store[store_index];
ehw->rx_skb_store[store_index] = NULL;
if (unlikely(!skb)) {
pr_warn("WARN: empty skb reference in rx_store:%d\n",
cons_idx);
goto next_rx_desc;
}
/*
* Check src_info from Rx preheader
*/
if (EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) ==
EDMA_PREHDR_DSTINFO_PORTID_IND) {
src_port_num = rxph->src_info &
EDMA_PREHDR_PORTNUM_BITS;
} else {
pr_warn("WARN: src_info_type:0x%x. Drop skb:%px\n",
EDMA_RXPH_SRC_INFO_TYPE_GET(rxph), skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Get packet length
*/
pkt_length = rxdesc_desc->status & EDMA_RXDESC_PACKET_LEN_MASK;
if (unlikely((src_port_num < NSS_DP_START_IFNUM) ||
(src_port_num > NSS_DP_HAL_MAX_PORTS))) {
pr_warn("WARN: Port number error :%d. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Get netdev for this port using the source port
* number as index into the netdev array. We need to
* subtract one since the indices start form '0' and
* port numbers start from '1'.
*/
ndev = ehw->netdev_arr[src_port_num - 1];
if (unlikely(!ndev)) {
pr_warn("WARN: netdev Null src_info_type:0x%x. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Remove Rx preheader
*/
skb_pull(skb, EDMA_RX_PREHDR_SIZE);
/*
* Update skb fields and indicate packet to stack
*/
skb->dev = ndev;
skb->skb_iif = ndev->ifindex;
skb_put(skb, pkt_length);
skb->protocol = eth_type_trans(skb, skb->dev);
#ifdef CONFIG_NET_SWITCHDEV
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = ndev->offload_fwd_mark;
#else
/*
* TODO: Implement ndo_get_devlink_port()
*/
skb->offload_fwd_mark = 0;
#endif
pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x mark:%u\n",
skb, cons_idx, pkt_length, skb->protocol,
skb->offload_fwd_mark);
#else
pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x\n",
skb, cons_idx, pkt_length, skb->protocol);
#endif
/*
* Deliver the ptp packet to phy driver for RX timestamping
*/
if (unlikely(EDMA_RXPH_SERVICE_CODE_GET(rxph) ==
NSS_PTP_EVENT_SERVICE_CODE))
nss_phy_tstamp_rx_buf(ndev, skb);
else
netif_receive_skb(skb);
next_rx_desc:
/*
* Update consumer index
*/
if (++cons_idx == rxdesc_ring->count)
cons_idx = 0;
/*
* Update work done
*/
work_done++;
}
edma_alloc_rx_buffer(ehw, rxdesc_ring->rxfill);
/*
* make sure the consumer index is updated
* before updating the hardware
*/
wmb();
edma_reg_write(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id), cons_idx);
return work_done;
}
/*
* edma_napi()
* EDMA NAPI handler
*/
int edma_napi(struct napi_struct *napi, int budget)
{
struct edma_hw *ehw = container_of(napi, struct edma_hw, napi);
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct net_device *ndev;
int work_done = 0;
int i;
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
}
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
work_done += edma_clean_tx(ehw, txcmpl_ring);
}
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
work_done += edma_alloc_rx_buffer(ehw, rxfill_ring);
}
/*
* Resume netdev Tx queue
*/
/*
* TODO works currently since we have a single queue.
* Need to make sure we have support in place when there is
* support for multiple queues
*/
for (i = 0; i < EDMA_MAX_GMACS; i++) {
ndev = ehw->netdev_arr[i];
if (!ndev)
continue;
if (netif_queue_stopped(ndev) && netif_carrier_ok(ndev))
netif_start_queue(ndev);
}
/*
* TODO - rework and fix the budget control
*/
if (work_done < budget) {
/*
* TODO per core NAPI
*/
napi_complete(napi);
/*
* Set RXDESC ring interrupt mask
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
edma_reg_write(
EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
ehw->rxdesc_intr_mask);
}
/*
* Set TXCMPL ring interrupt mask
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
ehw->txcmpl_intr_mask);
}
/*
* Set RXFILL ring interrupt mask
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(
rxfill_ring->id),
edma_hw.rxfill_intr_mask);
}
}
return work_done;
}
/*
* edma_ring_xmit()
* Transmit a packet using an EDMA ring
*/
enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
struct net_device *netdev,
struct sk_buff *skb,
struct edma_txdesc_ring *txdesc_ring)
{
struct nss_dp_dev *dp_dev = netdev_priv(netdev);
struct edma_txdesc_desc *txdesc = NULL;
uint16_t buf_len = skb_headlen(skb);
uint16_t hw_next_to_use, hw_next_to_clean, chk_idx;
uint32_t data;
uint32_t store_index = 0;
struct edma_tx_preheader *txph = NULL;
/*
* TODO - revisit locking
*/
spin_lock_bh(&txdesc_ring->tx_lock);
/*
* Read TXDESC ring producer index
*/
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
hw_next_to_use = data & EDMA_TXDESC_PROD_IDX_MASK;
/*
* Read TXDESC ring consumer index
*/
/*
* TODO - read to local variable to optimize uncached access
*/
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
hw_next_to_clean = data & EDMA_TXDESC_CONS_IDX_MASK;
/*
* Check for available Tx descriptor
*/
chk_idx = (hw_next_to_use + 1) & (txdesc_ring->count-1);
if (chk_idx == hw_next_to_clean) {
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_DESC;
}
/*
* Deliver the ptp packet to phy driver for TX timestamping
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
nss_phy_tstamp_tx_buf(netdev, skb);
/*
* Make room for Tx preheader
*/
txph = (struct edma_tx_preheader *)skb_push(skb,
EDMA_TX_PREHDR_SIZE);
memset((void *)txph, 0, EDMA_TX_PREHDR_SIZE);
/*
* Populate Tx preheader dst info, port id is macid in dp_dev
*/
txph->dst_info = (EDMA_PREHDR_DSTINFO_PORTID_IND << 8) |
(dp_dev->macid & 0x0fff);
/*
* Store the skb in tx_store
*/
store_index = hw_next_to_use & (txdesc_ring->count - 1);
if (unlikely(ehw->tx_skb_store[store_index] != NULL)) {
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_DESC;
}
ehw->tx_skb_store[store_index] = skb;
memcpy(skb->data, &store_index, 4);
/*
* Get Tx descriptor
*/
txdesc = EDMA_TXDESC_DESC(txdesc_ring, hw_next_to_use);
memset(txdesc, 0, sizeof(struct edma_txdesc_desc));
/*
* Map buffer to DMA address
*/
txdesc->buffer_addr = cpu_to_le32(dma_map_single(&(ehw->pdev)->dev,
skb->data,
buf_len + EDMA_TX_PREHDR_SIZE,
DMA_TO_DEVICE));
if (!txdesc->buffer_addr) {
/*
* DMA map failed for this address. Drop it
* and make sure does not got to stack again
*/
dev_kfree_skb_any(skb);
ehw->tx_skb_store[store_index] = NULL;
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_OK;
}
/*
* Populate Tx descriptor
*/
txdesc->word1 |= (1 << EDMA_TXDESC_PREHEADER_SHIFT)
| ((EDMA_TX_PREHDR_SIZE & EDMA_TXDESC_DATA_OFFSET_MASK)
<< EDMA_TXDESC_DATA_OFFSET_SHIFT);
txdesc->word1 |= ((buf_len & EDMA_TXDESC_DATA_LENGTH_MASK)
<< EDMA_TXDESC_DATA_LENGTH_SHIFT);
netdev_dbg(netdev, "skb:%px tx_ring:%u proto:0x%x\n",
skb, txdesc_ring->id, ntohs(skb->protocol));
netdev_dbg(netdev, "port:%u prod_idx:%u cons_idx:%u\n",
dp_dev->macid, hw_next_to_use, hw_next_to_clean);
/*
* Update producer index
*/
hw_next_to_use = (hw_next_to_use + 1) & (txdesc_ring->count - 1);
/*
* make sure the hw_next_to_use is updated before the
* write to hardware
*/
wmb();
edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id),
hw_next_to_use & EDMA_TXDESC_PROD_IDX_MASK);
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_OK;
}
/*
* edma_handle_misc_irq()
* Process IRQ
*/
irqreturn_t edma_handle_misc_irq(int irq, void *ctx)
{
uint32_t misc_intr_status = 0;
uint32_t reg_data = 0;
struct edma_hw *ehw = NULL;
struct platform_device *pdev = (struct platform_device *)ctx;
ehw = platform_get_drvdata(pdev);
/*
* Read Misc intr status
*/
reg_data = edma_reg_read(EDMA_REG_MISC_INT_STAT);
misc_intr_status = reg_data & ehw->misc_intr_mask;
/*
* TODO - error logging
*/
if (misc_intr_status == 0)
return IRQ_NONE;
else
edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_DISABLE);
return IRQ_HANDLED;
}
/*
* edma_handle_irq()
* Process IRQ and schedule napi
*/
irqreturn_t edma_handle_irq(int irq, void *ctx)
{
uint32_t reg_data = 0;
uint32_t rxdesc_intr_status = 0;
uint32_t txcmpl_intr_status = 0;
uint32_t rxfill_intr_status = 0;
int i;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_hw *ehw = NULL;
struct platform_device *pdev = (struct platform_device *)ctx;
ehw = platform_get_drvdata(pdev);
if (!ehw) {
pr_info("Unable to retrieve platrofm data");
return IRQ_HANDLED;
}
/*
* Read RxDesc intr status
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
reg_data = edma_reg_read(
EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
rxdesc_intr_status |= reg_data &
EDMA_RXDESC_RING_INT_STATUS_MASK;
/*
* Disable RxDesc intr
*/
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
* Read TxCmpl intr status
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
reg_data = edma_reg_read(
EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
txcmpl_intr_status |= reg_data &
EDMA_TXCMPL_RING_INT_STATUS_MASK;
/*
* Disable TxCmpl intr
*/
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
* Read RxFill intr status
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
reg_data = edma_reg_read(
EDMA_REG_RXFILL_INT_STAT(rxfill_ring->id));
rxfill_intr_status |= reg_data &
EDMA_RXFILL_RING_INT_STATUS_MASK;
/*
* Disable RxFill intr
*/
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
EDMA_MASK_INT_DISABLE);
}
if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0) &&
(rxfill_intr_status == 0))
return IRQ_NONE;
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
*TODO - per core NAPI
*/
if (rxdesc_intr_status || txcmpl_intr_status || rxfill_intr_status)
if (likely(napi_schedule_prep(&ehw->napi)))
__napi_schedule(&ehw->napi);
return IRQ_HANDLED;
}

View File

@@ -0,0 +1,697 @@
/*
**************************************************************************
* Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __QCOM_DEV_H__
#define __QCOM_DEV_H__
#include <nss_dp_hal_if.h>
#include "qcom_reg.h"
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
/*
* Subclass for base nss_gmac_haldev
*/
struct qcom_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
fal_mib_counter_t stats; /* Stats structure */
};
/*
* qcom_set_rx_flow_ctrl()
*/
static inline void qcom_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE);
}
/*
* qcom_clear_rx_flow_ctrl()
*/
static inline void qcom_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE);
}
/*
* qcom_set_tx_flow_ctrl()
*/
static inline void qcom_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE);
}
/*
* qcom_clear_tx_flow_ctrl()
*/
static inline void qcom_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE);
}
/*
* qcom_clear_mac_ctrl0()
*/
static inline void qcom_clear_mac_ctrl0(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, 0);
}
/*
* qcom_rx_enable()
*/
static inline void qcom_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE);
}
/*
* qcom_rx_disable()
* Disable the reception of frames on GMII/MII.
* GMAC receive state machine is disabled after completion of reception of
* current frame.
*/
static inline void qcom_rx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE);
}
/*
* qcom_tx_enable()
*/
static inline void qcom_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE);
}
/*
* qcom_tx_disable()
* Disable the transmission of frames on GMII/MII.
* GMAC transmit state machine is disabled after completion of
* transmission of current frame.
*/
static inline void qcom_tx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE);
}
/*
* qcom_set_full_duplex()
*/
static inline void qcom_set_full_duplex(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX);
}
/*
* qcom_set_half_duplex()
*/
static inline void qcom_set_half_duplex(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX);
}
/*
* qcom_set_ipgt()
*/
static inline void qcom_set_ipgt(struct nss_gmac_hal_dev *nghd, uint32_t ipgt)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0);
data &= ~QCOM_IPGT_POS;
ipgt = ipgt << QCOM_IPGT_LSB;
data |= ipgt;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data);
}
/*
* qcom_set_ipgr()
*/
static inline void qcom_set_ipgr(struct nss_gmac_hal_dev *nghd, uint32_t ipgr)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0);
data &= ~QCOM_IPGR2_POS;
ipgr = ipgr << QCOM_IPGR2_LSB;
data |= ipgr;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data);
}
/*
* qcom_set_half_thdf_ctrl()
*/
static inline void qcom_set_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL);
}
/*
* qcom_reset_half_thdf_ctrl()
*/
static inline void qcom_reset_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL);
}
/*
* qcom_set_frame_len_chk()
*/
static inline void qcom_set_frame_len_chk(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK);
}
/*
* qcom_reset_frame_len_chk()
*/
static inline void qcom_reset_frame_len_chk(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK);
}
/*
* qcom_set_abebe()
*/
static inline void qcom_set_abebe(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE);
}
/*
* qcom_reset_abebe()
*/
static inline void qcom_reset_abebe(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE);
}
/*
* qcom_set_amaxe()
*/
static inline void qcom_set_amaxe(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE);
}
/*
* qcom_reset_amaxe()
*/
static inline void qcom_reset_amaxe(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE);
}
/*
* qcom_set_bpnb()
*/
static inline void qcom_set_bpnb(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB);
}
/*
* qcom_reset_bpnb()
*/
static inline void qcom_reset_bpnb(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB);
}
/*
* qcom_set_nobo()
*/
static inline void qcom_set_nobo(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO);
}
/*
* qcom_reset_nobo()
*/
static inline void qcom_reset_nobo(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO);
}
/*
* qcom_set_drbnib_rxok()
*/
static inline void qcom_set_drbnib_rxok(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK);
}
/*
* qcom_reset_drbnib_rxok()
*/
static inline void qcom_reset_drbnib_rxok(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK);
}
/*
* qcom_set_jam_ipg()
*/
static inline void qcom_set_jam_ipg(struct nss_gmac_hal_dev *nghd,
uint32_t jam_ipg)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_JAM_IPG_POS;
jam_ipg = jam_ipg << QCOM_JAM_IPG_LSB;
data |= jam_ipg;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_ctrl1_test_pause()
*/
static inline void qcom_set_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE);
}
/*
* qcom_reset_ctrl1_test_pause()
*/
static inline void qcom_reset_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE);
}
/*
* qcom_reset_ctrl1_test_pause()
*/
static inline void qcom_set_tctl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL);
}
/*
* qcom_reset_tctl()
*/
static inline void qcom_reset_tctl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL);
}
/*
* qcom_set_sstct()
*/
static inline void qcom_set_sstct(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT);
}
/*
* qcom_reset_sstct()
*/
static inline void qcom_reset_sstct(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT);
}
/*
* qcom_set_simr()
*/
static inline void qcom_set_simr(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR);
}
/*
* qcom_reset_simr()
*/
static inline void qcom_reset_simr(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR);
}
/*
* qcom_set_retry()
*/
static inline void qcom_set_retry(struct nss_gmac_hal_dev *nghd, uint32_t retry)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_RETRY_POS;
retry = retry << QCOM_RETRY_LSB;
data |= retry;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_prlen()
*/
static inline void qcom_set_prlen(struct nss_gmac_hal_dev *nghd, uint32_t prlen)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_PRLEN_POS;
prlen = prlen << QCOM_PRLEN_LSB;
data |= prlen;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_ppad()
*/
static inline void qcom_set_ppad(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD);
}
/*
* qcom_reset_ppad()
*/
static inline void qcom_reset_ppad(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD);
}
/*
* qcom_set_povr()
*/
static inline void qcom_set_povr(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR);
}
/*
* qcom_reset_povr()
*/
static inline void qcom_reset_povr(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR);
}
/*
* qcom_set_phug()
*/
static inline void qcom_set_phug(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG);
}
/*
* qcom_reset_phug()
*/
static inline void qcom_reset_phug(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG);
}
/*
* qcom_set_mbof()
*/
static inline void qcom_set_mbof(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF);
}
/*
* qcom_reset_mbof()
*/
static inline void qcom_reset_mbof(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF);
}
/*
* qcom_set_lcol()
*/
static inline void qcom_set_lcol(struct nss_gmac_hal_dev *nghd, uint32_t lcol)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_LCOL_POS;
lcol = lcol << QCOM_LCOL_LSB;
data |= lcol;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_long_jam()
*/
static inline void qcom_set_long_jam(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM);
}
/*
* qcom_reset_long_jam()
*/
static inline void qcom_reset_long_jam(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM);
}
/*
* qcom_set_ipg_dec_len()
*/
static inline void qcom_set_ipg_dec_len(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN);
}
/*
* qcom_reset_ipg_dec_len()
*/
static inline void qcom_reset_ipg_dec_len(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN);
}
/*
* qcom_set_ctrl2_test_pause()
*/
static inline void qcom_set_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE);
}
/*
* qcom_reset_ctrl2_test_pause()
*/
static inline void qcom_reset_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE);
}
/*
* qcom_set_mac_loopback()
*/
static inline void qcom_set_mac_loopback(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK);
}
/*
* qcom_reset_mac_loopback()
*/
static inline void qcom_reset_mac_loopback(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK);
}
/*
* qcom_set_ipg_dec()
*/
static inline void qcom_set_ipg_dec(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC);
}
/*
* qcom_reset_ipg_dec()
*/
static inline void qcom_reset_ipg_dec(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC);
}
/*
* qcom_set_crs_sel()
*/
static inline void qcom_set_crs_sel(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL);
}
/*
* qcom_reset_crs_sel()
*/
static inline void qcom_reset_crs_sel(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL);
}
/*
* qcom_set_crc_rsv()
*/
static inline void qcom_set_crc_rsv(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV);
}
/*
* qcom_reset_crc_rsv()
*/
static inline void qcom_reset_crc_rsv(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV);
}
/*
* qcom_set_ipgr1()
*/
static inline void qcom_set_ipgr1(struct nss_gmac_hal_dev *nghd, uint32_t ipgr1)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_IPGR1_POS;
ipgr1 = ipgr1 << QCOM_DBG_IPGR1_LSB;
data |= ipgr1;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_hihg_ipg()
*/
static inline void qcom_set_hihg_ipg(struct nss_gmac_hal_dev *nghd,
uint32_t hihg_ipg)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_HIHG_IPG_POS;
data |= hihg_ipg << QCOM_DBG_HIHG_IPG_LSB;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_mac_ipg_ctrl()
*/
static inline void qcom_set_mac_ipg_ctrl(struct nss_gmac_hal_dev *nghd,
uint32_t mac_ipg_ctrl)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_MAC_IPG_CTRL_POS;
data |= mac_ipg_ctrl << QCOM_DBG_MAC_IPG_CTRL_LSB;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_mac_len_ctrl()
*/
static inline void qcom_set_mac_len_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL);
}
/*
* qcom_reset_mac_len_ctrl()
*/
static inline void qcom_reset_mac_len_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL);
}
/*
* qcom_set_edxsdfr_transmit()
*/
static inline void qcom_set_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS);
}
/*
* qcom_reset_edxsdfr_transmit()
*/
static inline void qcom_reset_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS);
}
/*
* qcom_set_mac_dbg_addr()
*/
static inline void qcom_set_mac_dbg_addr(struct nss_gmac_hal_dev *nghd,
uint8_t mac_dbg_addr)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_ADDR, mac_dbg_addr);
}
/*
* qcom_set_mac_dbg_data()
*/
static inline void qcom_set_mac_dbg_data(struct nss_gmac_hal_dev *nghd,
uint32_t mac_dbg_data)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_DATA, mac_dbg_data);
}
/*
* qcom_set_mac_jumbosize()
*/
static inline void qcom_set_mac_jumbosize(struct nss_gmac_hal_dev *nghd,
uint16_t mac_jumbo_size)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_JMB_SIZE, mac_jumbo_size);
}
/*
* qcom_clear_mib_ctrl()
*/
static inline void qcom_clear_mib_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_MIB_CTRL, 0);
}
/*
* qcom_set_mib_ctrl()
*/
static inline void qcom_set_mib_ctrl(struct nss_gmac_hal_dev *nghd,
int mib_settings)
{
hal_set_reg_bits(nghd, QCOM_MAC_MIB_CTRL,
mib_settings);
}
/*
* qcom_get_stats()
*/
static int qcom_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *stats = &(qhd->stats);
if (fal_mib_counter_get(0, nghd->mac_id, stats) < 0)
return -1;
return 0;
}
#endif /* __QCOM_DEV_H__ */

View File

@@ -0,0 +1,479 @@
/*
**************************************************************************
* Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal_if.h>
#include <nss_dp_dev.h>
#include "qcom_dev.h"
#define QCOM_STAT(m) offsetof(fal_mib_counter_t, m)
/*
* Ethtool stats pointer structure
*/
struct qcom_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint32_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct qcom_ethtool_stats qcom_gstrings_stats[] = {
{"rx_broadcast", QCOM_STAT(RxBroad)},
{"rx_pause", QCOM_STAT(RxPause)},
{"rx_unicast", QCOM_STAT(RxUniCast)},
{"rx_multicast", QCOM_STAT(RxMulti)},
{"rx_fcserr", QCOM_STAT(RxFcsErr)},
{"rx_alignerr", QCOM_STAT(RxAllignErr)},
{"rx_runt", QCOM_STAT(RxRunt)},
{"rx_frag", QCOM_STAT(RxFragment)},
{"rx_jmbfcserr", QCOM_STAT(RxJumboFcsErr)},
{"rx_jmbalignerr", QCOM_STAT(RxJumboAligenErr)},
{"rx_pkt64", QCOM_STAT(Rx64Byte)},
{"rx_pkt65to127", QCOM_STAT(Rx128Byte)},
{"rx_pkt128to255", QCOM_STAT(Rx256Byte)},
{"rx_pkt256to511", QCOM_STAT(Rx512Byte)},
{"rx_pkt512to1023", QCOM_STAT(Rx1024Byte)},
{"rx_pkt1024to1518", QCOM_STAT(Rx1518Byte)},
{"rx_pkt1519tox", QCOM_STAT(RxMaxByte)},
{"rx_toolong", QCOM_STAT(RxTooLong)},
{"rx_pktgoodbyte", QCOM_STAT(RxGoodByte)},
{"rx_pktbadbyte", QCOM_STAT(RxBadByte)},
{"rx_overflow", QCOM_STAT(RxOverFlow)},
{"tx_broadcast", QCOM_STAT(TxBroad)},
{"tx_pause", QCOM_STAT(TxPause)},
{"tx_multicast", QCOM_STAT(TxMulti)},
{"tx_underrun", QCOM_STAT(TxUnderRun)},
{"tx_pkt64", QCOM_STAT(Tx64Byte)},
{"tx_pkt65to127", QCOM_STAT(Tx128Byte)},
{"tx_pkt128to255", QCOM_STAT(Tx256Byte)},
{"tx_pkt256to511", QCOM_STAT(Tx512Byte)},
{"tx_pkt512to1023", QCOM_STAT(Tx1024Byte)},
{"tx_pkt1024to1518", QCOM_STAT(Tx1518Byte)},
{"tx_pkt1519tox", QCOM_STAT(TxMaxByte)},
{"tx_oversize", QCOM_STAT(TxOverSize)},
{"tx_pktbyte_h", QCOM_STAT(TxByte)},
{"tx_collisions", QCOM_STAT(TxCollision)},
{"tx_abortcol", QCOM_STAT(TxAbortCol)},
{"tx_multicol", QCOM_STAT(TxMultiCol)},
{"tx_singlecol", QCOM_STAT(TxSingalCol)},
{"tx_exesdeffer", QCOM_STAT(TxExcDefer)},
{"tx_deffer", QCOM_STAT(TxDefer)},
{"tx_latecol", QCOM_STAT(TxLateCol)},
{"tx_unicast", QCOM_STAT(TxUniCast)},
};
/*
* Array of strings describing private flag names
*/
static const char * const qcom_strings_priv_flags[] = {
"linkpoll",
"tstamp",
"tsmode",
};
#define QCOM_STATS_LEN ARRAY_SIZE(qcom_gstrings_stats)
#define QCOM_PRIV_FLAGS_LEN ARRAY_SIZE(qcom_strings_priv_flags)
/*
* qcom_set_mac_speed()
*/
static int32_t qcom_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_get_mac_speed()
*/
static uint32_t qcom_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_set_duplex_mode()
*/
static void qcom_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "This API deprecated\n");
}
/*
* qcom_get_duplex_mode()
*/
static uint8_t qcom_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_rx_flow_control()
*/
static void qcom_rx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled)
{
if (enabled)
qcom_set_rx_flow_ctrl(nghd);
else
qcom_clear_rx_flow_ctrl(nghd);
}
/*
* qcom_tx_flow_control()
*/
static void qcom_tx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled)
{
if (enabled)
qcom_set_tx_flow_ctrl(nghd);
else
qcom_clear_tx_flow_ctrl(nghd);
}
/*
* qcom_get_mib_stats()
*/
static int32_t qcom_get_mib_stats(struct nss_gmac_hal_dev *nghd)
{
if (qcom_get_stats(nghd))
return -1;
return 0;
}
/*
* qcom_set_maxframe()
*/
static int32_t qcom_set_maxframe(struct nss_gmac_hal_dev *nghd,
uint32_t maxframe)
{
return fal_port_max_frame_size_set(0, nghd->mac_id, maxframe);
}
/*
* qcom_get_maxframe()
*/
static int32_t qcom_get_maxframe(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* qcom_get_netdev_stats()
*/
static int32_t qcom_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *hal_stats = &(qhd->stats);
if (qcom_get_mib_stats(nghd))
return -1;
stats->rx_packets = hal_stats->RxUniCast + hal_stats->RxBroad
+ hal_stats->RxMulti;
stats->tx_packets = hal_stats->TxUniCast + hal_stats->TxBroad
+ hal_stats->TxMulti;
stats->rx_bytes = hal_stats->RxGoodByte;
stats->tx_bytes = hal_stats->TxByte;
/* RX errors */
stats->rx_crc_errors = hal_stats->RxFcsErr + hal_stats->RxJumboFcsErr;
stats->rx_frame_errors = hal_stats->RxAllignErr +
hal_stats->RxJumboAligenErr;
stats->rx_fifo_errors = hal_stats->RxRunt;
stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors +
stats->rx_fifo_errors;
stats->rx_dropped = hal_stats->RxTooLong + stats->rx_errors;
/* TX errors */
stats->tx_fifo_errors = hal_stats->TxUnderRun;
stats->tx_aborted_errors = hal_stats->TxAbortCol;
stats->tx_errors = stats->tx_fifo_errors + stats->tx_aborted_errors;
stats->collisions = hal_stats->TxCollision;
stats->multicast = hal_stats->RxMulti;
return 0;
}
/*
* qcom_get_strset_count()
* Get string set count for ethtool operations
*/
int32_t qcom_get_strset_count(struct nss_gmac_hal_dev *nghd, int32_t sset)
{
struct net_device *netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return QCOM_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return QCOM_PRIV_FLAGS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* qcom_get_strings()
* Get strings
*/
int32_t qcom_get_strings(struct nss_gmac_hal_dev *nghd, int32_t sset,
uint8_t *data)
{
struct net_device *netdev = nghd->netdev;
int i;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < QCOM_STATS_LEN; i++) {
memcpy(data, qcom_gstrings_stats[i].stat_string,
strlen(qcom_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < QCOM_PRIV_FLAGS_LEN; i++) {
memcpy(data, qcom_strings_priv_flags[i],
strlen(qcom_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* qcom_get_eth_stats()
*/
static int32_t qcom_get_eth_stats(struct nss_gmac_hal_dev *nghd, uint64_t *data)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *stats = &(qhd->stats);
uint8_t *p;
int i;
if (qcom_get_mib_stats(nghd))
return -1;
for (i = 0; i < QCOM_STATS_LEN; i++) {
p = (uint8_t *)stats + qcom_gstrings_stats[i].stat_offset;
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* qcom_send_pause_frame()
*/
static void qcom_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
qcom_set_ctrl2_test_pause(nghd);
}
/*
* qcom_stop_pause_frame()
*/
static void qcom_stop_pause_frame(struct nss_gmac_hal_dev *nghd)
{
qcom_reset_ctrl2_test_pause(nghd);
}
/*
* qcom_start()
*/
static int32_t qcom_start(struct nss_gmac_hal_dev *nghd)
{
qcom_set_full_duplex(nghd);
/* TODO: Read speed from dts */
if (qcom_set_mac_speed(nghd, SPEED_1000))
return -1;
qcom_tx_enable(nghd);
qcom_rx_enable(nghd);
netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
return 0;
}
/*
* qcom_stop()
*/
static int32_t qcom_stop(struct nss_gmac_hal_dev *nghd)
{
qcom_tx_disable(nghd);
qcom_rx_disable(nghd);
netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
return 0;
}
/*
* qcom_init()
*/
static void *qcom_init(struct gmac_hal_platform_data *gmacpdata)
{
struct qcom_hal_dev *qhd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start,
resource_size(res), ndev->name)) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
return NULL;
}
qhd = (struct qcom_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct qcom_hal_dev), GFP_KERNEL);
if (!qhd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
/* Save netdev context in QCOM HAL context */
qhd->nghd.netdev = gmacpdata->netdev;
qhd->nghd.mac_id = gmacpdata->macid;
/* Populate the mac base addresses */
qhd->nghd.mac_base = devm_ioremap_nocache(&dp_priv->pdev->dev,
res->start, resource_size(res));
if (!qhd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
return NULL;
}
spin_lock_init(&qhd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
qhd->nghd.mac_base);
/* Reset MIB Stats */
if (fal_mib_port_flush_counters(0, qhd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)qhd;
}
/*
* qcom_get_mac_address()
*/
static void qcom_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR0);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR1);
macaddr[0] = (data >> 24) & 0xff;
macaddr[1] = (data >> 16) & 0xff;
macaddr[2] = (data >> 8) & 0xff;
macaddr[3] = (data) & 0xff;
}
/*
* qcom_set_mac_address()
*/
static void qcom_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data = (macaddr[5] << 8) | macaddr[4];
hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR0, data);
data = (macaddr[0] << 24) | (macaddr[1] << 16)
| (macaddr[2] << 8) | macaddr[3];
hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR1, data);
}
/*
* MAC hal_ops base structure
*/
struct nss_gmac_hal_ops qcom_hal_ops = {
.init = &qcom_init,
.start = &qcom_start,
.stop = &qcom_stop,
.setmacaddr = &qcom_set_mac_address,
.getmacaddr = &qcom_get_mac_address,
.rxflowcontrol = &qcom_rx_flow_control,
.txflowcontrol = &qcom_tx_flow_control,
.setspeed = &qcom_set_mac_speed,
.getspeed = &qcom_get_mac_speed,
.setduplex = &qcom_set_duplex_mode,
.getduplex = &qcom_get_duplex_mode,
.getstats = &qcom_get_mib_stats,
.setmaxframe = &qcom_set_maxframe,
.getmaxframe = &qcom_get_maxframe,
.getndostats = &qcom_get_netdev_stats,
.getssetcount = &qcom_get_strset_count,
.getstrings = &qcom_get_strings,
.getethtoolstats = &qcom_get_eth_stats,
.sendpause = &qcom_send_pause_frame,
.stoppause = &qcom_stop_pause_frame,
};

View File

@@ -0,0 +1,156 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __QCOM_REG_H__
#define __QCOM_REG_H__
/* Register Offsets */
/* Offsets of GMAC config and status registers within NSS_GMAC_QCOM_MAC_BASE */
#define QCOM_MAC_ENABLE 0x0000
#define QCOM_MAC_SPEED 0x0004
#define QCOM_MAC_ADDR0 0x0008
#define QCOM_MAC_ADDR1 0x000c
#define QCOM_MAC_CTRL0 0x0010
#define QCOM_MAC_CTRL1 0x0014
#define QCOM_MAC_CTRL2 0x0018
#define QCOM_MAC_DBG_CTRL 0x001c
#define QCOM_MAC_DBG_ADDR 0x0020
#define QCOM_MAC_DBG_DATA 0x0024
#define QCOM_MAC_JMB_SIZE 0x0030
#define QCOM_MAC_MIB_CTRL 0x0034
/* RX stats */
#define QCOM_RXBROAD 0x0040
#define QCOM_RXPAUSE 0x0044
#define QCOM_RXMULTI 0x0048
#define QCOM_RXFCSERR 0x004c
#define QCOM_RXALIGNERR 0x0050
#define QCOM_RXRUNT 0x0054
#define QCOM_RXFRAG 0x0058
#define QCOM_RXJMBFCSERR 0x005c
#define QCOM_RXJMBALIGNERR 0x0060
#define QCOM_RXPKT64 0x0064
#define QCOM_RXPKT65TO127 0x0068
#define QCOM_RXPKT128TO255 0x006c
#define QCOM_RXPKT256TO511 0x0070
#define QCOM_RXPKT512TO1023 0x0074
#define QCOM_RXPKT1024TO1518 0x0078
#define QCOM_RXPKT1519TOX 0x007c
#define QCOM_RXPKTTOOLONG 0x0080
#define QCOM_RXPKTGOODBYTE_L 0x0084
#define QCOM_RXPKTGOODBYTE_H 0x0088
#define QCOM_RXPKTBADBYTE_L 0x008c
#define QCOM_RXPKTBADBYTE_H 0x0090
#define QCOM_RXUNI 0x0094
/* TX stats */
#define QCOM_TXBROAD 0x00a0
#define QCOM_TXPAUSE 0x00a4
#define QCOM_TXMULTI 0x00a8
#define QCOM_TXUNDERUN 0x00aC
#define QCOM_TXPKT64 0x00b0
#define QCOM_TXPKT65TO127 0x00b4
#define QCOM_TXPKT128TO255 0x00b8
#define QCOM_TXPKT256TO511 0x00bc
#define QCOM_TXPKT512TO1023 0x00c0
#define QCOM_TXPKT1024TO1518 0x00c4
#define QCOM_TXPKT1519TOX 0x00c8
#define QCOM_TXPKTBYTE_L 0x00cc
#define QCOM_TXPKTBYTE_H 0x00d0
#define QCOM_TXCOLLISIONS 0x00d4
#define QCOM_TXABORTCOL 0x00d8
#define QCOM_TXMULTICOL 0x00dc
#define QCOM_TXSINGLECOL 0x00e0
#define QCOM_TXEXCESSIVEDEFER 0x00e4
#define QCOM_TXDEFER 0x00e8
#define QCOM_TXLATECOL 0x00ec
#define QCOM_TXUNI 0x00f0
/* Bit Masks */
/* GMAC BITs */
#define QCOM_RX_MAC_ENABLE 1
#define QCOM_TX_MAC_ENABLE 0x2
#define QCOM_DUPLEX 0x10
#define QCOM_RX_FLOW_ENABLE 0x20
#define QCOM_TX_FLOW_ENABLE 0x40
#define QCOM_MAC_SPEED_10 0
#define QCOM_MAC_SPEED_100 1
#define QCOM_MAC_SPEED_1000 2
/* MAC CTRL0 */
#define QCOM_IPGT_POS 0x0000007f
#define QCOM_IPGT_LSB 0
#define QCOM_IPGR2_POS 0x00007f00
#define QCOM_IPGR2_LSB 8
#define QCOM_HALF_THDF_CTRL 0x8000
#define QCOM_HUGE_RECV 0x10000
#define QCOM_HUGE_TRANS 0x20000
#define QCOM_FLCHK 0x40000
#define QCOM_ABEBE 0x80000
#define QCOM_AMAXE 0x10000000
#define QCOM_BPNB 0x20000000
#define QCOM_NOBO 0x40000000
#define QCOM_DRBNIB_RXOK 0x80000000
/* MAC CTRL1 */
#define QCOM_JAM_IPG_POS 0x0000000f
#define QCOM_JAM_IPG_LSB 0
#define QCOM_TPAUSE 0x10
#define QCOM_TCTL 0x20
#define QCOM_SSTCT 0x40
#define QCOM_SIMR 0x80
#define QCOM_RETRY_POS 0x00000f00
#define QCOM_RETRY_LSB 8
#define QCOM_PRLEN_POS 0x0000f000
#define QCOM_PRLEN_LSB 8
#define QCOM_PPAD 0x10000
#define QCOM_POVR 0x20000
#define QCOM_PHUG 0x40000
#define QCOM_MBOF 0x80000
#define QCOM_LCOL_POS 0x0ff00000
#define QCOM_LCOL_LSB 20
#define QCOM_LONG_JAM 0x10000000
/* MAC CTRL2 */
#define QCOM_IPG_DEC_LEN 0x2
#define QCOM_TEST_PAUSE 0x4
#define QCOM_MAC_LPI_TX_IDLE 0x8
#define QCOM_MAC_LOOPBACK 0x10
#define QCOM_IPG_DEC 0x20
#define QCOM_SRS_SEL 0x40
#define QCOM_CRC_RSV 0x80
#define QCOM_MAXFR_POS 0x003fff00
#define QCOM_MAXFR_LSB 8
/* MAC DEBUG_CTRL */
#define QCOM_DBG_IPGR1_POS 0x0000007f
#define QCOM_DBG_IPGR1_LSB 0
#define QCOM_DBG_HIHG_IPG_POS 0x0000ff00
#define QCOM_DBG_HIHG_IPG_LSB 8
#define QCOM_DBG_MAC_IPG_CTRL_POS 0x0000ff00
#define QCOM_DBG_MAC_IPG_CTRL_LSB 20
#define QCOM_DBG_MAC_LEN_CTRL 0x40000000
#define QCOM_DBG_EDxSDFR_TRANS 0x80000000
/* MAC MIB-CTRL*/
#define QCOM_MIB_ENABLE 1
#define QCOM_MIB_RESET 0x2
#define QCOM_MIB_RD_CLR 0x4
#endif /*__QCOM_REG_H__*/

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_DEV_H__
#define __SYN_DEV_H__
#include <nss_dp_dev.h>
/*
* Subclass for base nss_gmac_hal_dev
*/
struct syn_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
struct nss_dp_gmac_stats stats; /* Stats structure */
};
#endif /*__SYN_DEV_H__*/

View File

@@ -0,0 +1,959 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal.h>
#include "syn_dev.h"
#include "syn_reg.h"
#define SYN_STAT(m) offsetof(struct nss_dp_hal_gmac_stats, m)
#define HW_ERR_SIZE sizeof(uint64_t)
/*
* Array to store ethtool statistics
*/
struct syn_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint64_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct syn_ethtool_stats syn_gstrings_stats[] = {
{"rx_bytes", SYN_STAT(rx_bytes)},
{"rx_packets", SYN_STAT(rx_packets)},
{"rx_errors", SYN_STAT(rx_errors)},
{"rx_receive_errors", SYN_STAT(rx_receive_errors)},
{"rx_descriptor_errors", SYN_STAT(rx_descriptor_errors)},
{"rx_late_collision_errors", SYN_STAT(rx_late_collision_errors)},
{"rx_dribble_bit_errors", SYN_STAT(rx_dribble_bit_errors)},
{"rx_length_errors", SYN_STAT(rx_length_errors)},
{"rx_ip_header_errors", SYN_STAT(rx_ip_header_errors)},
{"rx_ip_payload_errors", SYN_STAT(rx_ip_payload_errors)},
{"rx_no_buffer_errors", SYN_STAT(rx_no_buffer_errors)},
{"rx_transport_csum_bypassed", SYN_STAT(rx_transport_csum_bypassed)},
{"tx_bytes", SYN_STAT(tx_bytes)},
{"tx_packets", SYN_STAT(tx_packets)},
{"tx_collisions", SYN_STAT(tx_collisions)},
{"tx_errors", SYN_STAT(tx_errors)},
{"tx_jabber_timeout_errors", SYN_STAT(tx_jabber_timeout_errors)},
{"tx_frame_flushed_errors", SYN_STAT(tx_frame_flushed_errors)},
{"tx_loss_of_carrier_errors", SYN_STAT(tx_loss_of_carrier_errors)},
{"tx_no_carrier_errors", SYN_STAT(tx_no_carrier_errors)},
{"tx_late_collision_errors", SYN_STAT(tx_late_collision_errors)},
{"tx_excessive_collision_errors", SYN_STAT(tx_excessive_collision_errors)},
{"tx_excessive_deferral_errors", SYN_STAT(tx_excessive_deferral_errors)},
{"tx_underflow_errors", SYN_STAT(tx_underflow_errors)},
{"tx_ip_header_errors", SYN_STAT(tx_ip_header_errors)},
{"tx_ip_payload_errors", SYN_STAT(tx_ip_payload_errors)},
{"tx_dropped", SYN_STAT(tx_dropped)},
{"rx_missed", SYN_STAT(rx_missed)},
{"fifo_overflows", SYN_STAT(fifo_overflows)},
{"rx_scatter_errors", SYN_STAT(rx_scatter_errors)},
{"tx_ts_create_errors", SYN_STAT(tx_ts_create_errors)},
{"pmt_interrupts", SYN_STAT(hw_errs[0])},
{"mmc_interrupts", SYN_STAT(hw_errs[0]) + (1 * HW_ERR_SIZE)},
{"line_interface_interrupts", SYN_STAT(hw_errs[0]) + (2 * HW_ERR_SIZE)},
{"fatal_bus_error_interrupts", SYN_STAT(hw_errs[0]) + (3 * HW_ERR_SIZE)},
{"rx_buffer_unavailable_interrupts", SYN_STAT(hw_errs[0]) + (4 * HW_ERR_SIZE)},
{"rx_process_stopped_interrupts", SYN_STAT(hw_errs[0]) + (5 * HW_ERR_SIZE)},
{"tx_underflow_interrupts", SYN_STAT(hw_errs[0]) + (6 * HW_ERR_SIZE)},
{"rx_overflow_interrupts", SYN_STAT(hw_errs[0]) + (7 * HW_ERR_SIZE)},
{"tx_jabber_timeout_interrutps", SYN_STAT(hw_errs[0]) + (8 * HW_ERR_SIZE)},
{"tx_process_stopped_interrutps", SYN_STAT(hw_errs[0]) + (9 * HW_ERR_SIZE)},
{"gmac_total_ticks", SYN_STAT(gmac_total_ticks)},
{"gmac_worst_case_ticks", SYN_STAT(gmac_worst_case_ticks)},
{"gmac_iterations", SYN_STAT(gmac_iterations)},
{"tx_pause_frames", SYN_STAT(tx_pause_frames)},
{"mmc_rx_overflow_errors", SYN_STAT(mmc_rx_overflow_errors)},
{"mmc_rx_watchdog_timeout_errors", SYN_STAT(mmc_rx_watchdog_timeout_errors)},
{"mmc_rx_crc_errors", SYN_STAT(mmc_rx_crc_errors)},
{"mmc_rx_ip_header_errors", SYN_STAT(mmc_rx_ip_header_errors)},
{"mmc_rx_octets_g", SYN_STAT(mmc_rx_octets_g)},
{"mmc_rx_ucast_frames", SYN_STAT(mmc_rx_ucast_frames)},
{"mmc_rx_bcast_frames", SYN_STAT(mmc_rx_bcast_frames)},
{"mmc_rx_mcast_frames", SYN_STAT(mmc_rx_mcast_frames)},
{"mmc_rx_undersize", SYN_STAT(mmc_rx_undersize)},
{"mmc_rx_oversize", SYN_STAT(mmc_rx_oversize)},
{"mmc_rx_jabber", SYN_STAT(mmc_rx_jabber)},
{"mmc_rx_octets_gb", SYN_STAT(mmc_rx_octets_gb)},
{"mmc_rx_frag_frames_g", SYN_STAT(mmc_rx_frag_frames_g)},
{"mmc_tx_octets_g", SYN_STAT(mmc_tx_octets_g)},
{"mmc_tx_ucast_frames", SYN_STAT(mmc_tx_ucast_frames)},
{"mmc_tx_bcast_frames", SYN_STAT(mmc_tx_bcast_frames)},
{"mmc_tx_mcast_frames", SYN_STAT(mmc_tx_mcast_frames)},
{"mmc_tx_deferred", SYN_STAT(mmc_tx_deferred)},
{"mmc_tx_single_col", SYN_STAT(mmc_tx_single_col)},
{"mmc_tx_multiple_col", SYN_STAT(mmc_tx_multiple_col)},
{"mmc_tx_octets_gb", SYN_STAT(mmc_tx_octets_gb)},
};
#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
/*
* syn_set_rx_flow_ctrl()
*/
static inline void syn_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_RX_FLOW_CONTROL);
}
/*
* syn_clear_rx_flow_ctrl()
*/
static inline void syn_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_RX_FLOW_CONTROL);
}
/*
* syn_set_tx_flow_ctrl()
*/
static inline void syn_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_TX_FLOW_CONTROL);
}
/*
* syn_send_tx_pause_frame()
*/
static inline void syn_send_tx_pause_frame(struct nss_gmac_hal_dev *nghd)
{
syn_set_tx_flow_ctrl(nghd);
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_SEND_PAUSE_FRAME);
}
/*
* syn_clear_tx_flow_ctrl()
*/
static inline void syn_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_TX_FLOW_CONTROL);
}
/*
* syn_rx_enable()
*/
static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_RX);
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
}
/*
* syn_tx_enable()
*/
static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_TX);
}
/************Ip checksum offloading APIs*************/
/*
* syn_enable_rx_chksum_offload()
* Enable IPv4 header and IPv4/IPv6 TCP/UDP checksum calculation by GMAC.
*/
static inline void syn_enable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd,
SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
}
/*
* syn_disable_rx_chksum_offload()
* Disable the IP checksum offloading in receive path.
*/
static inline void syn_disable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd,
SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
}
/*
* syn_rx_tcpip_chksum_drop_enable()
* Instruct the DMA to drop the packets that fail TCP/IP checksum.
*
* This is to instruct the receive DMA engine to drop the recevied
* packet if they fails the tcp/ip checksum in hardware. Valid only when
* full checksum offloading is enabled(type-2).
*/
static inline void syn_rx_tcpip_chksum_drop_enable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd,
SYN_DMA_OPERATION_MODE, SYN_DMA_DISABLE_DROP_TCP_CS);
}
/*******************Ip checksum offloading APIs**********************/
/*
* syn_ipc_offload_init()
* Initialize IPC Checksum offloading.
*/
static inline void syn_ipc_offload_init(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv;
dp_priv = netdev_priv(nghd->netdev);
if (test_bit(__NSS_DP_RXCSUM, &dp_priv->flags)) {
/*
* Enable the offload engine in the receive path
*/
syn_enable_rx_chksum_offload(nghd);
/*
* DMA drops the packets if error in encapsulated ethernet
* payload.
*/
syn_rx_tcpip_chksum_drop_enable(nghd);
netdev_dbg(nghd->netdev, "%s: enable Rx checksum\n", __func__);
} else {
syn_disable_rx_chksum_offload(nghd);
netdev_dbg(nghd->netdev, "%s: disable Rx checksum\n", __func__);
}
}
/*
* syn_disable_mac_interrupt()
* Disable all the interrupts.
*/
static inline void syn_disable_mac_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_INTERRUPT_MASK, 0xffffffff);
}
/*
* syn_disable_mmc_tx_interrupt()
* Disable the MMC Tx interrupt.
*
* The MMC tx interrupts are masked out as per the mask specified.
*/
static inline void syn_disable_mmc_tx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_TX_INTERRUPT_MASK, mask);
}
/*
* syn_disable_mmc_rx_interrupt()
* Disable the MMC Rx interrupt.
*
* The MMC rx interrupts are masked out as per the mask specified.
*/
static inline void syn_disable_mmc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_RX_INTERRUPT_MASK, mask);
}
/*
* syn_disable_mmc_ipc_rx_interrupt()
* Disable the MMC ipc rx checksum offload interrupt.
*
* The MMC ipc rx checksum offload interrupts are masked out as
* per the mask specified.
*/
static inline void syn_disable_mmc_ipc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_IPC_RX_INTR_MASK, mask);
}
/*
* syn_disable_dma_interrupt()
* Disables all DMA interrupts.
*/
void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_DISABLE);
}
/*
* syn_enable_dma_interrupt()
* Enables all DMA interrupts.
*/
void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_EN);
}
/*
* syn_disable_interrupt_all()
* Disable all the interrupts.
*/
static inline void syn_disable_interrupt_all(struct nss_gmac_hal_dev *nghd)
{
syn_disable_mac_interrupt(nghd);
syn_disable_dma_interrupt(nghd);
syn_disable_mmc_tx_interrupt(nghd, 0xFFFFFFFF);
syn_disable_mmc_rx_interrupt(nghd, 0xFFFFFFFF);
syn_disable_mmc_ipc_rx_interrupt(nghd, 0xFFFFFFFF);
}
/*
* syn_dma_bus_mode_init()
* Function to program DMA bus mode register.
*/
static inline void syn_dma_bus_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_BUS_MODE, SYN_DMA_BUS_MODE_VAL);
}
/*
* syn_clear_dma_status()
* Clear all the pending dma interrupts.
*/
void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_STATUS);
hal_write_reg(nghd->mac_base, SYN_DMA_STATUS, data);
}
/*
* syn_enable_dma_rx()
* Enable Rx GMAC operation
*/
void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data |= SYN_DMA_RX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_disable_dma_rx()
* Disable Rx GMAC operation
*/
void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data &= ~SYN_DMA_RX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_enable_dma_tx()
* Enable Rx GMAC operation
*/
void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data |= SYN_DMA_TX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_disable_dma_tx()
* Disable Rx GMAC operation
*/
void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data &= ~SYN_DMA_TX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_resume_dma_tx
* Resumes the DMA Transmission.
*/
void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_TX_POLL_DEMAND, 0);
}
/*
* syn_get_rx_missed
* Get Rx missed errors
*/
uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd)
{
uint32_t missed_frame_buff_overflow;
missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
return missed_frame_buff_overflow & 0xFFFF;
}
/*
* syn_get_fifo_overflows
* Get FIFO overflows
*/
uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd)
{
uint32_t missed_frame_buff_overflow;
missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
return (missed_frame_buff_overflow >> 17) & 0x7ff;
}
/*
* syn_init_tx_desc_base()
* Programs the Dma Tx Base address with the starting address of the descriptor ring or chain.
*/
void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma)
{
hal_write_reg(nghd->mac_base, SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS, tx_desc_dma);
}
/*
* syn_init_rx_desc_base()
* Programs the Dma Rx Base address with the starting address of the descriptor ring or chain.
*/
void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma)
{
hal_write_reg(nghd->mac_base, SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS, rx_desc_dma);
}
/*
* syn_dma_axi_bus_mode_init()
* Function to program DMA AXI bus mode register.
*/
static inline void syn_dma_axi_bus_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_AXI_BUS_MODE,
SYN_DMA_AXI_BUS_MODE_VAL);
}
/*
* syn_dma_operation_mode_init()
* Function to program DMA Operation Mode register.
*/
static inline void syn_dma_operation_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, SYN_DMA_OMR);
}
/*
* syn_broadcast_enable()
* Enables Broadcast frames.
*
* When enabled Address filtering module passes all incoming broadcast frames.
*/
static inline void syn_broadcast_enable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_BROADCAST);
}
/*
* syn_multicast_enable()
* Enables Multicast frames.
*
* When enabled all multicast frames are passed.
*/
static inline void syn_multicast_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_MULTICAST_FILTER);
}
/*
* syn_promisc_enable()
* Enables promiscous mode.
*
* When enabled Address filter modules pass all incoming frames
* regardless of their Destination and source addresses.
*/
static inline void syn_promisc_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER,
SYN_MAC_PROMISCUOUS_MODE_ON);
}
/*
* syn_get_stats()
*/
static int syn_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv;
struct syn_hal_dev *shd;
struct nss_dp_gmac_stats *stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
dp_priv = netdev_priv(nghd->netdev);
if (!dp_priv->data_plane_ops)
return -1;
dp_priv->data_plane_ops->get_stats(dp_priv->dpc, stats);
return 0;
}
/*
* syn_rx_flow_control()
*/
static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_rx_flow_ctrl(nghd);
else
syn_clear_rx_flow_ctrl(nghd);
}
/*
* syn_tx_flow_control()
*/
static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_tx_flow_ctrl(nghd);
else
syn_clear_tx_flow_ctrl(nghd);
}
/*
* syn_get_max_frame_size()
*/
static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
BUG_ON(nghd == NULL);
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* syn_set_max_frame_size()
*/
static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd,
uint32_t val)
{
BUG_ON(nghd == NULL);
return fal_port_max_frame_size_set(0, nghd->mac_id, val);
}
/*
* syn_set_mac_speed()
*/
static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_mac_speed()
*/
static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_set_duplex_mode()
*/
static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
}
/*
* syn_get_duplex_mode()
*/
static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_netdev_stats()
*/
static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct syn_hal_dev *shd;
struct nss_dp_hal_gmac_stats *ndo_stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
ndo_stats = &(shd->stats.stats);
/*
* Read stats from the registered dataplane.
*/
if (syn_get_stats(nghd))
return -1;
stats->rx_packets = ndo_stats->rx_packets;
stats->rx_bytes = ndo_stats->rx_bytes;
stats->rx_errors = ndo_stats->rx_errors;
stats->rx_dropped = ndo_stats->rx_errors;
stats->rx_length_errors = ndo_stats->rx_length_errors;
stats->rx_over_errors = ndo_stats->mmc_rx_overflow_errors;
stats->rx_crc_errors = ndo_stats->mmc_rx_crc_errors;
stats->rx_frame_errors = ndo_stats->rx_dribble_bit_errors;
stats->rx_fifo_errors = ndo_stats->fifo_overflows;
stats->rx_missed_errors = ndo_stats->rx_missed;
stats->collisions = ndo_stats->tx_collisions + ndo_stats->rx_late_collision_errors;
stats->tx_packets = ndo_stats->tx_packets;
stats->tx_bytes = ndo_stats->tx_bytes;
stats->tx_errors = ndo_stats->tx_errors;
stats->tx_dropped = ndo_stats->tx_dropped;
stats->tx_carrier_errors = ndo_stats->tx_loss_of_carrier_errors + ndo_stats->tx_no_carrier_errors;
stats->tx_fifo_errors = ndo_stats->tx_underflow_errors;
stats->tx_window_errors = ndo_stats->tx_late_collision_errors;
return 0;
}
/*
* syn_get_eth_stats()
*/
static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd,
uint64_t *data)
{
struct syn_hal_dev *shd;
struct nss_dp_gmac_stats *stats;
uint8_t *p = NULL;
int i;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
/*
* Read stats from the registered dataplane.
*/
if (syn_get_stats(nghd))
return -1;
for (i = 0; i < SYN_STATS_LEN; i++) {
p = ((uint8_t *)(stats) +
syn_gstrings_stats[i].stat_offset);
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* syn_get_strset_count()
*/
static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd,
int32_t sset)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return SYN_STATS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* syn_get_strings()
*/
static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd,
int32_t stringset, uint8_t *data)
{
struct net_device *netdev;
int i;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < SYN_STATS_LEN; i++) {
memcpy(data, syn_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* syn_send_pause_frame()
*/
static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_send_tx_pause_frame(nghd);
}
/*
* syn_set_mac_address()
*/
static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
if (!macaddr) {
netdev_warn(nghd->netdev, "macaddr is not valid.\n");
return;
}
data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_HIGH_AE;
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data);
data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8)
| macaddr[0];
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data);
}
/*
* syn_get_mac_address()
*/
static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
if (!macaddr) {
netdev_warn(nghd->netdev, "macaddr is not valid.\n");
return;
}
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW);
macaddr[3] = (data >> 24) & 0xff;
macaddr[2] = (data >> 16) & 0xff;
macaddr[1] = (data >> 8) & 0xff;
macaddr[0] = (data) & 0xff;
}
/*
* syn_dma_init()
* Initialize settings for GMAC DMA and AXI bus.
*/
static void syn_dma_init(struct nss_gmac_hal_dev *nghd)
{
struct net_device *ndev = nghd->netdev;
struct nss_dp_dev *dp_priv = netdev_priv(ndev);
/*
* Enable SoC specific GMAC clocks.
*/
nss_dp_hal_clk_enable(dp_priv);
/*
* Configure DMA registers.
*/
syn_dma_bus_mode_init(nghd);
syn_dma_axi_bus_mode_init(nghd);
syn_dma_operation_mode_init(nghd);
}
/*
* syn_init()
*/
static void *syn_init(struct gmac_hal_platform_data *gmacpdata)
{
struct syn_hal_dev *shd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct syn_hal_dev),
GFP_KERNEL);
if (!shd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
shd->nghd.mac_reg_len = resource_size(res);
shd->nghd.memres = devm_request_mem_region(&dp_priv->pdev->dev,
res->start,
resource_size(res),
ndev->name);
if (!shd->nghd.memres) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
devm_kfree(&dp_priv->pdev->dev, shd);
return NULL;
}
/*
* Save netdev context in syn HAL context
*/
shd->nghd.netdev = gmacpdata->netdev;
shd->nghd.mac_id = gmacpdata->macid;
shd->nghd.duplex_mode = DUPLEX_FULL;
set_bit(__NSS_DP_RXCSUM, &dp_priv->flags);
/*
* Populate the mac base addresses
*/
shd->nghd.mac_base =
devm_ioremap_nocache(&dp_priv->pdev->dev, res->start,
resource_size(res));
if (!shd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
devm_kfree(&dp_priv->pdev->dev, shd);
return NULL;
}
spin_lock_init(&shd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
shd->nghd.mac_base);
syn_disable_interrupt_all(&shd->nghd);
syn_dma_init(&shd->nghd);
syn_ipc_offload_init(&shd->nghd);
syn_promisc_enable(&shd->nghd);
syn_broadcast_enable(&shd->nghd);
syn_multicast_enable(&shd->nghd);
syn_rx_enable(&shd->nghd);
syn_tx_enable(&shd->nghd);
/*
* Reset MIB Stats
*/
if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)shd;
}
/*
* syn_exit()
*/
static void syn_exit(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv = NULL;
dp_priv = netdev_priv(nghd->netdev);
devm_iounmap(&dp_priv->pdev->dev,
(void *)nghd->mac_base);
devm_release_mem_region(&dp_priv->pdev->dev,
(nghd->memres)->start,
nghd->mac_reg_len);
nghd->memres = NULL;
nghd->mac_base = NULL;
}
struct nss_gmac_hal_ops syn_hal_ops = {
.init = &syn_init,
.start = NULL,
.stop = NULL,
.exit = &syn_exit,
.setmacaddr = &syn_set_mac_address,
.getmacaddr = &syn_get_mac_address,
.rxflowcontrol = &syn_rx_flow_control,
.txflowcontrol = &syn_tx_flow_control,
.setspeed = &syn_set_mac_speed,
.getspeed = &syn_get_mac_speed,
.setduplex = &syn_set_duplex_mode,
.getduplex = &syn_get_duplex_mode,
.setmaxframe = &syn_set_max_frame_size,
.getmaxframe = &syn_get_max_frame_size,
.getndostats = &syn_get_netdev_stats,
.getssetcount = &syn_get_strset_count,
.getstrings = &syn_get_strings,
.getethtoolstats = &syn_get_eth_stats,
.sendpause = &syn_send_pause_frame,
};

View File

@@ -0,0 +1,531 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_REG_H__
#define __SYN_REG_H__
/*
* MAC register offset
*/
#define SYN_MAC_CONFIGURATION 0x0000
#define SYN_MAC_FRAME_FILTER 0x0004
#define SYN_MAC_FLOW_CONTROL 0x0018
#define SYN_VLAN_TAG 0x001C
#define SYN_VERSION 0x0020
#define SYN_DEBUG 0x0024
#define SYN_REMOTE_WAKE_UP_FRAME_FILTER 0x0028
#define SYN_PMT_CONTROL_STATUS 0x002C
#define SYN_LPI_CONTROL_STATUS 0x0030
#define SYN_LPI_TIMERS_CONTROL 0x0034
#define SYN_INTERRUPT_STATUS 0x0038
#define SYN_INTERRUPT_MASK 0x003C
/*
* MAC address register offset
*/
#define SYN_MAC_ADDR0_HIGH 0x0040
#define SYN_MAC_ADDR0_LOW 0x0044
#define SYN_MAC_ADDR1_HIGH 0x0048
#define SYN_MAC_ADDR1_LOW 0x004C
#define SYN_MAC_ADDR2_HIGH 0x0050
#define SYN_MAC_ADDR2_LOW 0x0054
#define SYN_MAC_ADDR3_HIGH 0x0058
#define SYN_MAC_ADDR3_LOW 0x005C
#define SYN_MAC_ADDR4_HIGH 0x0060
#define SYN_MAC_ADDR4_LOW 0x0064
/*
* Watchdog timeout register
*/
#define SYN_WDOG_TIMEOUT 0x00DC
/*
* Mac Management Counters (MMC) register offset
*/
#define SYN_MMC_CONTROL 0x0100
#define SYN_MMC_RX_INTERRUPT 0x0104
#define SYN_MMC_TX_INTERRUPT 0x0108
#define SYN_MMC_RX_INTERRUPT_MASK 0x010C
#define SYN_MMC_TX_INTERRUPT_MASK 0x0110
#define SYN_MMC_IPC_RX_INTR_MASK 0x0200
/*
* DMA Register offset
*/
#define SYN_DMA_BUS_MODE 0x1000
#define SYN_DMA_TX_POLL_DEMAND 0x1004
#define SYN_DMA_RX_POLL_DEMAND 0x1008
#define SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS 0x100C
#define SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS 0x1010
#define SYN_DMA_STATUS 0x1014
#define SYN_DMA_OPERATION_MODE 0x1018
#define SYN_DMA_INT_ENABLE 0x101C
#define SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER 0x1020
#define SYN_DMA_RX_INTERRUPT_WATCHDOG_TIMER 0x1024
#define SYN_DMA_AXI_BUS_MODE 0x1028
#define SYN_DMA_AHB_OR_AXI_STATUS 0x102C
#define SYN_DMA_CURRENT_HOST_TX_DESCRIPTOR 0x1048
#define SYN_DMA_CURRENT_HOST_RX_DESCRIPTOR 0x104C
#define SYN_DMA_CURRENT_HOST_TX_BUFFER_ADDRESS 0x1050
#define SYN_DMA_CURRENT_HOST_RX_BUFFER_ADDRESS 0x1054
/*
* Optional HW feature register
*/
#define SYN_HW_FEATURE 0x1058
/*
* Register Bit Definitions
*/
/*
* SYN_MAC_CONFIGURATION = 0x0000, MAC config Register Layout
*/
enum syn_mac_config_reg {
SYN_MAC_TWOKPE = 0x08000000, /* Support for 2K packets */
SYN_MAC_TWOKPE_ENABLE = 0x08000000,
SYN_MAC_TWOKPE_DISABLE = 0x00000000,
SYN_MAC_CST = 0x02000000, /* (CST) CRC Stripping for Type Frames */
SYN_MAC_CST_ENABLE = 0x02000000,
SYN_MAC_CST_DISABLE = 0x02000000,
SYN_MAC_TC = 0x01000000, /* (TC) Transmit configuration */
SYN_MAC_WATCHDOG = 0x00800000,
SYN_MAC_WATCHDOG_ENABLE = 0x00000000, /* Enable watchdog timer */
SYN_MAC_WATCHDOG_DISABLE = 0x00800000, /* (WD)Disable watchdog timer on Rx */
SYN_MAC_JABBER = 0x00400000,
SYN_MAC_JABBER_ENABLE = 0x00000000, /* Enable jabber timer */
SYN_MAC_JABBER_DISABLE = 0x00400000, /* (JD)Disable jabber timer on Tx */
SYN_MAC_FRAME_BURST = 0x00200000,
SYN_MAC_FRAME_BURST_ENABLE = 0x00200000, /* (BE)Enable frame bursting
during Tx */
SYN_MAC_FRAME_BURST_DISABLE = 0x00000000, /* Disable frame bursting */
SYN_MAC_JUMBO_FRAME = 0x00100000,
SYN_MAC_JUMBO_FRAME_ENABLE = 0x00100000, /* (JE)Enable jumbo frame for Rx */
SYN_MAC_JUMBO_FRAME_DISABLE = 0x00000000, /* Disable jumbo frame */
SYN_MAC_INTER_FRAME_GAP7 = 0x000E0000, /* (IFG) Config7 - 40bit times */
SYN_MAC_INTER_FRAME_GAP6 = 0x000C0000, /* (IFG) Config6 - 48bit times */
SYN_MAC_INTER_FRAME_GAP5 = 0x000A0000, /* (IFG) Config5 - 56bit times */
SYN_MAC_INTER_FRAME_GAP4 = 0x00080000, /* (IFG) Config4 - 64bit times */
SYN_MAC_INTER_FRAME_GAP3 = 0x00060000, /* (IFG) Config3 - 72bit times */
SYN_MAC_INTER_FRAME_GAP2 = 0x00040000, /* (IFG) Config2 - 80bit times */
SYN_MAC_INTER_FRAME_GAP1 = 0x00020000, /* (IFG) Config1 - 88bit times */
SYN_MAC_INTER_FRAME_GAP0 = 0x00000000, /* (IFG) Config0 - 96bit times */
SYN_MAC_DISABLE_CRS = 0x00010000, /* (DCRS) Disable Carrier Sense During Transmission */
SYN_MAC_MII_GMII = 0x00008000,
SYN_MAC_SELECT_MII = 0x00008000, /* (PS)Port Select-MII mode */
SYN_MAC_SELECT_GMII = 0x00000000, /* GMII mode */
SYN_MAC_FE_SPEED100 = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
SYN_MAC_FE_SPEED = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
SYN_MAC_FE_SPEED10 = 0x00000000, /* (FES)Fast Ethernet speed 10Mbps */
SYN_MAC_RX_OWN = 0x00002000,
SYN_MAC_DISABLE_RX_OWN = 0x00002000, /* (DO)Disable receive own packets */
SYN_MAC_ENABLE_RX_OWN = 0x00000000, /* Enable receive own packets */
SYN_MAC_LOOPBACK = 0x00001000,
SYN_MAC_LOOPBACK_ON = 0x00001000, /* (LM)Loopback mode for GMII/MII */
SYN_MAC_LOOPBACK_OFF = 0x00000000, /* Normal mode */
SYN_MAC_DUPLEX = 0x00000800,
SYN_MAC_FULL_DUPLEX = 0x00000800, /* (DM)Full duplex mode */
SYN_MAC_HALF_DUPLEX = 0x00000000, /* Half duplex mode */
SYN_MAC_RX_IPC_OFFLOAD = 0x00000400, /* IPC checksum offload */
SYN_MAC_RX_IPC_OFFLOAD_ENABLE = 0x00000400,
SYN_MAC_RX_IPC_OFFLOAD_DISABLE = 0x00000000,
SYN_MAC_RETRY = 0x00000200,
SYN_MAC_RETRY_DISABLE = 0x00000200, /* (DR)Disable Retry */
SYN_MAC_RETRY_ENABLE = 0x00000000, /* Enable retransmission as per BL */
SYN_MAC_LINK_UP = 0x00000100, /* (LUD)Link UP */
SYN_MAC_LINK_DOWN = 0x00000100, /* Link Down */
SYN_MAC_PAD_CRC_STRIP = 0x00000080,
SYN_MAC_PAD_CRC_STRIP_ENABLE = 0x00000080, /* (ACS) Automatic Pad/Crc strip enable */
SYN_MAC_PAD_CRC_STRIP_DISABLE = 0x00000000, /* Automatic Pad/Crc stripping disable */
SYN_MAC_BACKOFF_LIMIT = 0x00000060,
SYN_MAC_BACKOFF_LIMIT3 = 0x00000060, /* (BL)Back-off limit in HD mode */
SYN_MAC_BACKOFF_LIMIT2 = 0x00000040,
SYN_MAC_BACKOFF_LIMIT1 = 0x00000020,
SYN_MAC_BACKOFF_LIMIT0 = 0x00000000,
SYN_MAC_DEFERRAL_CHECK = 0x00000010,
SYN_MAC_DEFERRAL_CHECK_ENABLE = 0x00000010, /* (DC)Deferral check enable in HD mode */
SYN_MAC_DEFERRAL_CHECK_DISABLE = 0x00000000, /* Deferral check disable */
SYN_MAC_TX = 0x00000008,
SYN_MAC_TX_ENABLE = 0x00000008, /* (TE)Transmitter enable */
SYN_MAC_TX_DISABLE = 0x00000000, /* Transmitter disable */
SYN_MAC_RX = 0x00000004,
SYN_MAC_RX_ENABLE = 0x00000004, /* (RE)Receiver enable */
SYN_MAC_RX_DISABLE = 0x00000000, /* Receiver disable */
SYN_MAC_PRELEN_RESERVED = 0x00000003, /* Preamble Length for Transmit Frames */
SYN_MAC_PRELEN_3B = 0x00000002,
SYN_MAC_PRELEN_5B = 0x00000001,
SYN_MAC_PRELEN_7B = 0x00000000,
};
/*
* SYN_MAC_FRAME_FILTER = 0x0004, Mac frame filtering controls Register
*/
enum syn_mac_frame_filter_reg {
SYN_MAC_FILTER = 0x80000000,
SYN_MAC_FILTER_OFF = 0x80000000, /* (RA)Receive all incoming packets */
SYN_MAC_FILTER_ON = 0x00000000, /* Receive filtered pkts only */
SYN_MAC_HASH_PERFECT_FILTER = 0x00000400, /* Hash or Perfect Filter enable */
SYN_MAC_SRC_ADDR_FILTER = 0x00000200,
SYN_MAC_SRC_ADDR_FILTER_ENABLE = 0x00000200, /* (SAF)Source Address Filter enable */
SYN_MAC_SRC_ADDR_FILTER_DISABLE = 0x00000000,
SYN_MAC_SRC_INVA_ADDR_FILTER = 0x00000100,
SYN_MAC_SRC_INV_ADDR_FILTER_EN = 0x00000100, /* (SAIF)Inv Src Addr Filter enable */
SYN_MAC_SRC_INV_ADDR_FILTER_DIS = 0x00000000,
SYN_MAC_PASS_CONTROL = 0x000000C0,
SYN_MAC_PASS_CONTROL3 = 0x000000C0, /* (PCF)Forwards ctrl frames that pass AF */
SYN_MAC_PASS_CONTROL2 = 0x00000080, /* Forwards all control frames
even if they fail the AF */
SYN_MAC_PASS_CONTROL1 = 0x00000040, /* Forwards all control frames except
PAUSE control frames to application
even if they fail the AF */
SYN_MAC_PASS_CONTROL0 = 0x00000000, /* Don't pass control frames */
SYN_MAC_BROADCAST = 0x00000020,
SYN_MAC_BROADCAST_DISABLE = 0x00000020, /* (DBF)Disable Rx of broadcast frames */
SYN_MAC_BROADCAST_ENABLE = 0x00000000, /* Enable broadcast frames */
SYN_MAC_MULTICAST_FILTER = 0x00000010,
SYN_MAC_MULTICAST_FILTER_OFF = 0x00000010, /* (PM) Pass all multicast packets */
SYN_MAC_MULTICAST_FILTER_ON = 0x00000000, /* Pass filtered multicast packets */
SYN_MAC_DEST_ADDR_FILTER = 0x00000008,
SYN_MAC_DEST_ADDR_FILTER_INV = 0x00000008, /* (DAIF)Inverse filtering for DA */
SYN_MAC_DEST_ADDR_FILTER_NOR = 0x00000000, /* Normal filtering for DA */
SYN_MAC_MCAST_HASH_FILTER = 0x00000004,
SYN_MAC_MCAST_HASH_FILTER_ON = 0x00000004, /* (HMC)perfom multicast hash filtering */
SYN_MAC_MCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
SYN_MAC_UCAST_HASH_FILTER = 0x00000002,
SYN_MAC_UCAST_HASH_FILTER_ON = 0x00000002, /* (HUC)Unicast Hash filtering only */
SYN_MAC_UCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
SYN_MAC_PROMISCUOUS_MODE = 0x00000001,
SYN_MAC_PROMISCUOUS_MODE_ON = 0x00000001, /* Receive all frames */
SYN_MAC_PROMISCUOUS_MODE_OFF = 0x00000000, /* Receive filtered packets only */
};
/*
* SYN_MAC_FLOW_CONTROL = 0x0018, Flow control Register Layout
*/
enum syn_mac_flow_control_reg {
SYN_MAC_FC_PAUSE_TIME_MASK = 0xFFFF0000, /* (PT) PAUSE TIME field
in the control frame */
SYN_MAC_FC_PAUSE_TIME_SHIFT = 16,
SYN_MAC_FC_PAUSE_LOW_THRESH = 0x00000030,
SYN_MAC_FC_PAUSE_LOW_THRESH3 = 0x00000030, /* (PLT)thresh for pause
tmr 256 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH2 = 0x00000020, /* 144 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH1 = 0x00000010, /* 28 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH0 = 0x00000000, /* 4 slot time */
SYN_MAC_FC_UNICAST_PAUSE_FRAME = 0x00000008,
SYN_MAC_FC_UNICAST_PAUSE_FRAME_ON = 0x00000008, /* (UP)Detect pause frame
with unicast addr. */
SYN_MAC_FC_UNICAST_PAUSE_FRAME_OFF = 0x00000000,/* Detect only pause frame
with multicast addr. */
SYN_MAC_FC_RX_FLOW_CONTROL = 0x00000004,
SYN_MAC_FC_RX_FLOW_CONTROL_ENABLE = 0x00000004, /* (RFE)Enable Rx flow control */
SYN_MAC_FC_RX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable Rx flow control */
SYN_MAC_FC_TX_FLOW_CONTROL = 0x00000002,
SYN_MAC_FC_TX_FLOW_CONTROL_ENABLE = 0x00000002, /* (TFE)Enable Tx flow control */
SYN_MAC_FC_TX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable flow control */
SYN_MAC_FC_FLOW_CONTROL_BACK_PRESSURE = 0x00000001,
SYN_MAC_FC_SEND_PAUSE_FRAME = 0x00000001, /* (FCB/PBA)send pause frm/Apply
back pressure */
};
/*
* SYN_MAC_ADDR_HIGH Register
*/
enum syn_mac_addr_high {
SYN_MAC_ADDR_HIGH_AE = 0x80000000,
};
/*
* SYN_DMA_BUS_MODE = 0x0000, CSR0 - Bus Mode
*/
enum syn_dma_bus_mode_reg {
SYN_DMA_FIXED_BURST_ENABLE = 0x00010000, /* (FB)Fixed Burst SINGLE, INCR4,
INCR8 or INCR16 */
SYN_DMA_FIXED_BURST_DISABLE = 0x00000000, /* SINGLE, INCR */
SYN_DMA_TX_PRIORITY_RATIO11 = 0x00000000, /* (PR)TX:RX DMA priority ratio 1:1 */
SYN_DMA_TX_PRIORITY_RATIO21 = 0x00004000, /* (PR)TX:RX DMA priority ratio 2:1 */
SYN_DMA_TX_PRIORITY_RATIO31 = 0x00008000, /* (PR)TX:RX DMA priority ratio 3:1 */
SYN_DMA_TX_PRIORITY_RATIO41 = 0x0000C000, /* (PR)TX:RX DMA priority ratio 4:1 */
SYN_DMA_ADDRESS_ALIGNED_BEATS = 0x02000000, /* Address Aligned beats */
SYN_DMA_BURST_LENGTHX8 = 0x01000000, /* When set mutiplies the PBL by 8 */
SYN_DMA_BURST_LENGTH256 = 0x01002000, /* (dma_burst_lengthx8 |
dma_burst_length32) = 256 */
SYN_DMA_BURST_LENGTH128 = 0x01001000, /* (dma_burst_lengthx8 |
dma_burst_length16) = 128 */
SYN_DMA_BURST_LENGTH64 = 0x01000800, /* (dma_burst_lengthx8 |
dma_burst_length8) = 64 */
/* (PBL) programmable burst length */
SYN_DMA_BURST_LENGTH32 = 0x00002000, /* Dma burst length = 32 */
SYN_DMA_BURST_LENGTH16 = 0x00001000, /* Dma burst length = 16 */
SYN_DMA_BURST_LENGTH8 = 0x00000800, /* Dma burst length = 8 */
SYN_DMA_BURST_LENGTH4 = 0x00000400, /* Dma burst length = 4 */
SYN_DMA_BURST_LENGTH2 = 0x00000200, /* Dma burst length = 2 */
SYN_DMA_BURST_LENGTH1 = 0x00000100, /* Dma burst length = 1 */
SYN_DMA_BURST_LENGTH0 = 0x00000000, /* Dma burst length = 0 */
SYN_DMA_DESCRIPTOR8_WORDS = 0x00000080, /* Enh Descriptor works 1=>
8 word descriptor */
SYN_DMA_DESCRIPTOR4_WORDS = 0x00000000, /* Enh Descriptor works 0=>
4 word descriptor */
SYN_DMA_DESCRIPTOR_SKIP16 = 0x00000040, /* (DSL)Descriptor skip length (no.of dwords) */
SYN_DMA_DESCRIPTOR_SKIP8 = 0x00000020, /* between two unchained descriptors */
SYN_DMA_DESCRIPTOR_SKIP4 = 0x00000010,
SYN_DMA_DESCRIPTOR_SKIP2 = 0x00000008,
SYN_DMA_DESCRIPTOR_SKIP1 = 0x00000004,
SYN_DMA_DESCRIPTOR_SKIP0 = 0x00000000,
SYN_DMA_ARBIT_RR = 0x00000000, /* (DA) DMA RR arbitration */
SYN_DMA_ARBIT_PR = 0x00000002, /* Rx has priority over Tx */
SYN_DMA_RESET_ON = 0x00000001, /* (SWR)Software Reset DMA engine */
SYN_DMA_RESET_OFF = 0x00000000,
};
/*
* SYN_DMA_STATUS = 0x0014, CSR5 - Dma status Register
*/
enum syn_dma_status_reg {
SYN_DMA_GMAC_PMT_INTR = 0x10000000, /* (GPI)Gmac subsystem interrupt */
SYN_DMA_GMAC_MMC_INTR = 0x08000000, /* (GMI)Gmac MMC subsystem interrupt */
SYN_DMA_GMAC_LINE_INTF_INTR = 0x04000000, /* Line interface interrupt */
SYN_DMA_ERROR_BIT2 = 0x02000000, /* (EB)Error bits 0-data buffer, 1-desc access */
SYN_DMA_ERROR_BIT1 = 0x01000000, /* (EB)Error bits 0-write trnsf, 1-read transfer */
SYN_DMA_ERROR_BIT0 = 0x00800000, /* (EB)Error bits 0-Rx DMA, 1-Tx DMA */
SYN_DMA_TX_STATE = 0x00700000, /* (TS)Transmit process state */
SYN_DMA_TX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Tx Command issued */
SYN_DMA_TX_FETCHING = 0x00100000, /* Running - fetching the Tx descriptor */
SYN_DMA_TX_WAITING = 0x00200000, /* Running - waiting for status */
SYN_DMA_TX_READING = 0x00300000, /* Running - reading the data from host memory */
SYN_DMA_TX_SUSPENDED = 0x00600000, /* Suspended - Tx Descriptor unavailabe */
SYN_DMA_TX_CLOSING = 0x00700000, /* Running - closing Rx descriptor */
SYN_DMA_RX_STATE = 0x000E0000, /* (RS)Receive process state */
SYN_DMA_RX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Rx Command issued */
SYN_DMA_RX_FETCHING = 0x00020000, /* Running - fetching the Rx descriptor */
SYN_DMA_RX_WAITING = 0x00060000, /* Running - waiting for packet */
SYN_DMA_RX_SUSPENDED = 0x00080000, /* Suspended - Rx Descriptor unavailable */
SYN_DMA_RX_CLOSING = 0x000A0000, /* Running - closing descriptor */
SYN_DMA_RX_QUEUING = 0x000E0000, /* Running - queuing the receive frame into host memory */
SYN_DMA_INT_NORMAL = 0x00010000, /* (NIS)Normal interrupt summary */
SYN_DMA_INT_ABNORMAL = 0x00008000, /* (AIS)Abnormal interrupt summary */
SYN_DMA_INT_EARLY_RX = 0x00004000, /* Early receive interrupt (Normal) */
SYN_DMA_INT_BUS_ERROR = 0x00002000, /* Fatal bus error (Abnormal) */
SYN_DMA_INT_EARLY_TX = 0x00000400, /* Early transmit interrupt (Abnormal) */
SYN_DMA_INT_RX_WDOG_TO = 0x00000200, /* Receive Watchdog Timeout (Abnormal) */
SYN_DMA_INT_RX_STOPPED = 0x00000100, /* Receive process stopped (Abnormal) */
SYN_DMA_INT_RX_NO_BUFFER = 0x00000080, /* RX buffer unavailable (Abnormal) */
SYN_DMA_INT_RX_COMPLETED = 0x00000040, /* Completion of frame RX (Normal) */
SYN_DMA_INT_TX_UNDERFLOW = 0x00000020, /* Transmit underflow (Abnormal) */
SYN_DMA_INT_RCV_OVERFLOW = 0x00000010, /* RX Buffer overflow interrupt */
SYN_DMA_INT_TX_JABBER_TO = 0x00000008, /* TX Jabber Timeout (Abnormal) */
SYN_DMA_INT_TX_NO_BUFFER = 0x00000004, /* TX buffer unavailable (Normal) */
SYN_DMA_INT_TX_STOPPED = 0x00000002, /* TX process stopped (Abnormal) */
SYN_DMA_INT_TX_COMPLETED = 0x00000001, /* Transmit completed (Normal) */
};
/*
* SYN_DMA_OPERATION_MODE = 0x0018, CSR6 - Dma Operation Mode Register
*/
enum syn_dma_operation_mode_reg {
SYN_DMA_DISABLE_DROP_TCP_CS = 0x04000000, /* (DT) Dis. drop. of tcp/ip
CS error frames */
SYN_DMA_RX_STORE_AND_FORWARD = 0x02000000, /* Rx (SF)Store and forward */
SYN_DMA_RX_FRAME_FLUSH = 0x01000000, /* Disable Receive Frame Flush*/
SYN_DMA_TX_STORE_AND_FORWARD = 0x00200000, /* Tx (SF)Store and forward */
SYN_DMA_FLUSH_TX_FIFO = 0x00100000, /* (FTF)Tx FIFO controller
is reset to default */
SYN_DMA_TX_THRESH_CTRL = 0x0001C000, /* (TTC)Controls thre Thresh of
MTL tx Fifo */
SYN_DMA_TX_THRESH_CTRL16 = 0x0001C000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 16 */
SYN_DMA_TX_THRESH_CTRL24 = 0x00018000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 24 */
SYN_DMA_TX_THRESH_CTRL32 = 0x00014000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 32 */
SYN_DMA_TX_THRESH_CTRL40 = 0x00010000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 40 */
SYN_DMA_TX_THRESH_CTRL256 = 0x0000c000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 256 */
SYN_DMA_TX_THRESH_CTRL192 = 0x00008000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 192 */
SYN_DMA_TX_THRESH_CTRL128 = 0x00004000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 128 */
SYN_DMA_TX_THRESH_CTRL64 = 0x00000000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 64 */
SYN_DMA_TX_START = 0x00002000, /* (ST)Start/Stop transmission*/
SYN_DMA_RX_FLOW_CTRL_DEACT = 0x00401800, /* (RFD)Rx flow control
deact. Threshold */
SYN_DMA_RX_FLOW_CTRL_DEACT1K = 0x00000000, /* (RFD)Rx flow control
deact. Threshold (1kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT2K = 0x00000800, /* (RFD)Rx flow control
deact. Threshold (2kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT3K = 0x00001000, /* (RFD)Rx flow control
deact. Threshold (3kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT4K = 0x00001800, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT5K = 0x00400000, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT6K = 0x00400800, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT7K = 0x00401000, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT = 0x00800600, /* (RFA)Rx flow control
Act. Threshold */
SYN_DMA_RX_FLOW_CTRL_ACT1K = 0x00000000, /* (RFA)Rx flow control
Act. Threshold (1kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT2K = 0x00000200, /* (RFA)Rx flow control
Act. Threshold (2kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT3K = 0x00000400, /* (RFA)Rx flow control
Act. Threshold (3kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT4K = 0x00000600, /* (RFA)Rx flow control
Act. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT5K = 0x00800000, /* (RFA)Rx flow control
Act. Threshold (5kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT6K = 0x00800200, /* (RFA)Rx flow control
Act. Threshold (6kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT7K = 0x00800400, /* (RFA)Rx flow control
Act. Threshold (7kbytes) */
SYN_DMA_RX_THRESH_CTRL = 0x00000018, /* (RTC)Controls thre
Thresh of MTL rx Fifo */
SYN_DMA_RX_THRESH_CTRL64 = 0x00000000, /* (RTC)Controls thre
Thresh of MTL tx Fifo 64 */
SYN_DMA_RX_THRESH_CTRL32 = 0x00000008, /* (RTC)Controls thre
Thresh of MTL tx Fifo 32 */
SYN_DMA_RX_THRESH_CTRL96 = 0x00000010, /* (RTC)Controls thre
Thresh of MTL tx Fifo 96 */
SYN_DMA_RX_THRESH_CTRL128 = 0x00000018, /* (RTC)Controls thre
Thresh of MTL tx Fifo 128 */
SYN_DMA_EN_HW_FLOW_CTRL = 0x00000100, /* (EFC)Enable HW flow control*/
SYN_DMA_DIS_HW_FLOW_CTRL = 0x00000000, /* Disable HW flow control */
SYN_DMA_FWD_ERROR_FRAMES = 0x00000080, /* (FEF)Forward error frames */
SYN_DMA_FWD_UNDER_SZ_FRAMES = 0x00000040, /* (FUF)Forward undersize
frames */
SYN_DMA_TX_SECOND_FRAME = 0x00000004, /* (OSF)Operate on 2nd frame */
SYN_DMA_RX_START = 0x00000002, /* (SR)Start/Stop reception */
};
/*
* SYN_DMA_INT_ENABLE = 0x101C, CSR7 - Interrupt enable Register Layout
*/
enum syn_dma_interrupt_reg {
SYN_DMA_IE_NORMAL = SYN_DMA_INT_NORMAL, /* Normal interrupt enable */
SYN_DMA_IE_ABNORMAL = SYN_DMA_INT_ABNORMAL, /* Abnormal interrupt enable */
SYN_DMA_IE_EARLY_RX = SYN_DMA_INT_EARLY_RX, /* Early RX interrupt enable */
SYN_DMA_IE_BUS_ERROR = SYN_DMA_INT_BUS_ERROR, /* Fatal bus error enable */
SYN_DMA_IE_EARLY_TX = SYN_DMA_INT_EARLY_TX, /* Early TX interrupt enable */
SYN_DMA_IE_RX_WDOG_TO = SYN_DMA_INT_RX_WDOG_TO, /* RX Watchdog Timeout enable */
SYN_DMA_IE_RX_STOPPED = SYN_DMA_INT_RX_STOPPED, /* RX process stopped enable */
SYN_DMA_IE_RX_NO_BUFFER = SYN_DMA_INT_RX_NO_BUFFER,
/* Receive buffer unavailable enable */
SYN_DMA_IE_RX_COMPLETED = SYN_DMA_INT_RX_COMPLETED,
/* Completion of frame reception enable */
SYN_DMA_IE_TX_UNDERFLOW = SYN_DMA_INT_TX_UNDERFLOW,
/* TX underflow enable */
SYN_DMA_IE_RX_OVERFLOW = SYN_DMA_INT_RCV_OVERFLOW,
/* RX Buffer overflow interrupt */
SYN_DMA_IE_TX_JABBER_TO = SYN_DMA_INT_TX_JABBER_TO,
/* TX Jabber Timeout enable */
SYN_DMA_IE_TX_NO_BUFFER = SYN_DMA_INT_TX_NO_BUFFER,
/* TX buffer unavailable enable */
SYN_DMA_IE_TX_STOPPED = SYN_DMA_INT_TX_STOPPED,
/* TX process stopped enable */
SYN_DMA_IE_TX_COMPLETED = SYN_DMA_INT_TX_COMPLETED,
/* TX completed enable */
};
/*
* SYN_DMA_AXI_BUS_MODE = 0x1028
*/
enum syn_dma_axi_bus_mode_reg {
SYN_DMA_EN_LPI = 0x80000000,
SYN_DMA_LPI_XIT_FRM = 0x40000000,
SYN_DMA_WR_OSR_NUM_REQS16 = 0x00F00000,
SYN_DMA_WR_OSR_NUM_REQS8 = 0x00700000,
SYN_DMA_WR_OSR_NUM_REQS4 = 0x00300000,
SYN_DMA_WR_OSR_NUM_REQS2 = 0x00100000,
SYN_DMA_WR_OSR_NUM_REQS1 = 0x00000000,
SYN_DMA_RD_OSR_NUM_REQS16 = 0x000F0000,
SYN_DMA_RD_OSR_NUM_REQS8 = 0x00070000,
SYN_DMA_RD_OSR_NUM_REQS4 = 0x00030000,
SYN_DMA_RD_OSR_NUM_REQS2 = 0x00010000,
SYN_DMA_RD_OSR_NUM_REQS1 = 0x00000000,
SYN_DMA_ONEKBBE = 0x00002000,
SYN_DMA_AXI_AAL = 0x00001000,
SYN_DMA_AXI_BLEN256 = 0x00000080,
SYN_DMA_AXI_BLEN128 = 0x00000040,
SYN_DMA_AXI_BLEN64 = 0x00000020,
SYN_DMA_AXI_BLEN32 = 0x00000010,
SYN_DMA_AXI_BLEN16 = 0x00000008,
SYN_DMA_AXI_BLEN8 = 0x00000004,
SYN_DMA_AXI_BLEN4 = 0x00000002,
SYN_DMA_UNDEFINED = 0x00000001,
};
/*
* Values to initialize DMA registers
*/
enum syn_dma_init_values {
/*
* Interrupt groups
*/
SYN_DMA_INT_ERROR_MASK = SYN_DMA_INT_BUS_ERROR, /* Error */
SYN_DMA_INT_RX_ABN_MASK = SYN_DMA_INT_RX_NO_BUFFER, /* RX abnormal intr */
SYN_DMA_INT_RX_NORM_MASK = SYN_DMA_INT_RX_COMPLETED, /* RXnormal intr */
SYN_DMA_INT_RX_STOPPED_MASK = SYN_DMA_INT_RX_STOPPED, /* RXstopped */
SYN_DMA_INT_TX_ABN_MASK = SYN_DMA_INT_TX_UNDERFLOW, /* TX abnormal intr */
SYN_DMA_INT_TX_NORM_MASK = SYN_DMA_INT_TX_COMPLETED, /* TX normal intr */
SYN_DMA_INT_TX_STOPPED_MASK = SYN_DMA_INT_TX_STOPPED, /* TX stopped */
SYN_DMA_BUS_MODE_INIT = SYN_DMA_FIXED_BURST_ENABLE | SYN_DMA_BURST_LENGTH8
| SYN_DMA_DESCRIPTOR_SKIP2 | SYN_DMA_RESET_OFF,
SYN_DMA_BUS_MODE_VAL = SYN_DMA_BURST_LENGTH32
| SYN_DMA_BURST_LENGTHX8 | SYN_DMA_DESCRIPTOR_SKIP0
| SYN_DMA_DESCRIPTOR8_WORDS | SYN_DMA_ARBIT_PR | SYN_DMA_ADDRESS_ALIGNED_BEATS,
SYN_DMA_OMR = SYN_DMA_TX_STORE_AND_FORWARD | SYN_DMA_RX_STORE_AND_FORWARD
| SYN_DMA_RX_THRESH_CTRL128 | SYN_DMA_TX_SECOND_FRAME,
SYN_DMA_INT_EN = SYN_DMA_IE_NORMAL | SYN_DMA_IE_ABNORMAL | SYN_DMA_INT_ERROR_MASK
| SYN_DMA_INT_RX_ABN_MASK | SYN_DMA_INT_RX_NORM_MASK
| SYN_DMA_INT_RX_STOPPED_MASK | SYN_DMA_INT_TX_ABN_MASK
| SYN_DMA_INT_TX_NORM_MASK | SYN_DMA_INT_TX_STOPPED_MASK,
SYN_DMA_INT_DISABLE = 0,
SYN_DMA_AXI_BUS_MODE_VAL = SYN_DMA_AXI_BLEN16 | SYN_DMA_RD_OSR_NUM_REQS8
| SYN_DMA_WR_OSR_NUM_REQS8,
};
/*
* desc_mode
* GMAC descriptors mode
*/
enum desc_mode {
RINGMODE = 0x00000001,
CHAINMODE = 0x00000002,
};
extern void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd);
extern void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd);
extern void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd);
extern void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd);
extern void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd);
extern uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd);
extern uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd);
extern void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma);
extern void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma);
#endif /*__SYN_REG_H__*/

View File

@@ -0,0 +1,189 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __SYN_DEV_H__
#define __SYN_DEV_H__
#include "syn_reg.h"
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
/*
* Subclass for base nss_gmac_haldev
*/
struct syn_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
fal_xgmib_info_t stats; /* Stats structure */
};
/*
* syn_set_rx_flow_ctrl()
*/
static inline void syn_set_rx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL,
SYN_MAC_RX_FLOW_ENABLE);
}
/*
* syn_clear_rx_flow_ctrl()
*/
static inline void syn_clear_rx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL,
SYN_MAC_RX_FLOW_ENABLE);
}
/*
* syn_set_tx_flow_ctrl()
*/
static inline void syn_set_tx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
}
/*
* syn_send_tx_pause_frame()
*/
static inline void syn_send_tx_pause_frame(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_PAUSE_SEND);
}
/*
* syn_clear_tx_flow_ctrl()
*/
static inline void syn_clear_tx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
}
/*
* syn_clear_mac_ctrl()
*/
static inline void syn_clear_mac_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_MAC_TX_CONFIG, 0);
hal_write_reg(nghd->mac_base, SYN_MAC_RX_CONFIG, 0);
}
/*
* syn_rx_enable()
*/
static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE);
hal_set_reg_bits(nghd, SYN_MAC_PACKET_FILTER, SYN_MAC_RX_ENABLE);
}
/*
* syn_rx_disable()
*/
static inline void syn_rx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE);
}
/*
* syn_tx_enable()
*/
static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_TX_CONFIG, SYN_MAC_TX_ENABLE);
}
/*
* syn_tx_disable()
*/
static inline void syn_tx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_TX_CONFIG,
SYN_MAC_TX_ENABLE);
}
/*
* syn_set_mmc_stats()
*/
static inline void syn_set_mmc_stats(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_MMC_CTL,
SYN_MAC_MMC_RSTONRD);
}
/*
* syn_rx_jumbo_frame_enable()
*/
static inline void syn_rx_jumbo_frame_enable(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG,
SYN_MAC_JUMBO_FRAME_ENABLE);
}
/*
* syn_rx_jumbo_frame_disable()
*/
static inline void syn_rx_jumbo_frame_disable(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG,
SYN_MAC_JUMBO_FRAME_ENABLE);
}
/*
* syn_set_full_duplex()
*/
static inline void syn_set_full_duplex(
struct nss_gmac_hal_dev *nghd)
{
/* TBD */
return;
}
/*
* syn_set_half_duplex()
*/
static inline void syn_set_half_duplex(
struct nss_gmac_hal_dev *nghd)
{
/* TBD */
return;
}
static int syn_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct syn_hal_dev *shd = (struct syn_hal_dev *)nghd;
fal_xgmib_info_t *stats = &(shd->stats);
if (fal_get_xgmib_info(0, nghd->mac_id, stats))
return -1;
return 0;
}
#endif /*__SYN_DEV_H__*/

View File

@@ -0,0 +1,505 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal_if.h>
#include <nss_dp_dev.h>
#include "syn_dev.h"
#define SYN_STAT(m) offsetof(fal_xgmib_info_t, m)
struct syn_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint64_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct syn_ethtool_stats syn_gstrings_stats[] = {
{"rx_frame", SYN_STAT(RxFrame)},
{"rx_bytes", SYN_STAT(RxByte)},
{"rx_bytes_g", SYN_STAT(RxByteGood)},
{"rx_broadcast", SYN_STAT(RxBroadGood)},
{"rx_multicast", SYN_STAT(RxMultiGood)},
{"rx_crc_err", SYN_STAT(RxFcsErr)},
{"rx_runt_err", SYN_STAT(RxRuntErr)},
{"rx_jabber_err", SYN_STAT(RxJabberError)},
{"rx_undersize", SYN_STAT(RxUndersizeGood)},
{"rx_oversize", SYN_STAT(RxOversizeGood)},
{"rx_pkt64", SYN_STAT(Rx64Byte)},
{"rx_pkt65to127", SYN_STAT(Rx128Byte)},
{"rx_pkt128to255", SYN_STAT(Rx256Byte)},
{"rx_pkt256to511", SYN_STAT(Rx512Byte)},
{"rx_pkt512to1023", SYN_STAT(Rx1024Byte)},
{"rx_pkt1024tomax", SYN_STAT(RxMaxByte)},
{"rx_unicast", SYN_STAT(RxUnicastGood)},
{"rx_len_err", SYN_STAT(RxLengthError)},
{"rx_outofrange_err_ctr", SYN_STAT(RxOutOfRangeError)},
{"rx_pause", SYN_STAT(RxPause)},
{"rx_fifo_overflow", SYN_STAT(RxOverFlow)},
{"rx_vlan", SYN_STAT(RxVLANFrameGoodBad)},
{"rx_wdog", SYN_STAT(RxWatchDogError)},
{"rx_lpi_usec_ctr", SYN_STAT(RxLPIUsec)},
{"rx_lpi_tran_ctr", SYN_STAT(RxLPITran)},
{"rx_drop_frame_ctr", SYN_STAT(RxDropFrameGoodBad)},
{"rx_drop_byte_ctr", SYN_STAT(RxDropByteGoodBad)},
{"tx_bytes", SYN_STAT(TxByte)},
{"tx_frame", SYN_STAT(TxFrame)},
{"tx_broadcast", SYN_STAT(TxBroadGood)},
{"tx_broadcast_gb", SYN_STAT(TxBroad)},
{"tx_multicast", SYN_STAT(TxMultiGood)},
{"tx_multicast_gb", SYN_STAT(TxMulti)},
{"tx_pkt64", SYN_STAT(Tx64Byte)},
{"tx_pkt65to127", SYN_STAT(Tx128Byte)},
{"tx_pkt128to255", SYN_STAT(Tx256Byte)},
{"tx_pkt256to511", SYN_STAT(Tx512Byte)},
{"tx_pkt512to1023", SYN_STAT(Tx1024Byte)},
{"tx_pkt1024tomax", SYN_STAT(TxMaxByte)},
{"tx_unicast", SYN_STAT(TxUnicast)},
{"tx_underflow_err", SYN_STAT(TxUnderFlowError)},
{"tx_bytes_g", SYN_STAT(TxByteGood)},
{"tx_frame_g", SYN_STAT(TxFrameGood)},
{"tx_pause", SYN_STAT(TxPause)},
{"tx_vlan", SYN_STAT(TxVLANFrameGood)},
{"tx_lpi_usec_ctr", SYN_STAT(TxLPIUsec)},
{"tx_lpi_tran_ctr", SYN_STAT(TxLPITran)},
};
/*
* Array of strings describing private flag names
*/
static const char *const syn_strings_priv_flags[] = {
"test",
};
#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
#define SYN_PRIV_FLAGS_LEN ARRAY_SIZE(syn_strings_priv_flags)
/*
* syn_rx_flow_control()
*/
static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_rx_flow_ctrl(nghd);
else
syn_clear_rx_flow_ctrl(nghd);
}
/*
* syn_tx_flow_control()
*/
static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_tx_flow_ctrl(nghd);
else
syn_clear_tx_flow_ctrl(nghd);
}
/*
* syn_get_mmc_stats()
*/
static int32_t syn_get_mmc_stats(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
if (syn_get_stats(nghd))
return -1;
return 0;
}
/*
* syn_get_max_frame_size()
*/
static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* syn_set_max_frame_size()
*/
static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd,
uint32_t val)
{
return fal_port_max_frame_size_set(0, nghd->mac_id, val);
}
/*
* syn_set_mac_speed()
*/
static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_mac_speed()
*/
static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_set_duplex_mode()
*/
static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
}
/*
* syn_get_duplex_mode()
*/
static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_netdev_stats()
*/
static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct syn_hal_dev *shd;
fal_xgmib_info_t *hal_stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
hal_stats = &(shd->stats);
if (syn_get_stats(nghd))
return -1;
stats->rx_packets = hal_stats->RxUnicastGood
+ hal_stats->RxBroadGood + hal_stats->RxMultiGood;
stats->tx_packets = hal_stats->TxUnicast
+ hal_stats->TxBroadGood + hal_stats->TxMultiGood;
stats->rx_bytes = hal_stats->RxByte;
stats->tx_bytes = hal_stats->TxByte;
stats->multicast =
hal_stats->RxMultiGood;
stats->rx_dropped =
hal_stats->RxDropFrameGoodBad;
stats->rx_length_errors =
hal_stats->RxLengthError;
stats->rx_crc_errors =
hal_stats->RxFcsErr;
stats->rx_fifo_errors =
hal_stats->RxOverFlow;
return 0;
}
/*
* syn_get_eth_stats()
*/
static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd,
uint64_t *data)
{
struct syn_hal_dev *shd;
fal_xgmib_info_t *stats;
uint8_t *p = NULL;
int i;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
if (syn_get_stats(nghd))
return -1;
for (i = 0; i < SYN_STATS_LEN; i++) {
p = ((uint8_t *)(stats) +
syn_gstrings_stats[i].stat_offset);
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* syn_get_strset_count()
*/
static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd,
int32_t sset)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return SYN_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return SYN_PRIV_FLAGS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* syn_get_strings()
*/
static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd,
int32_t stringset, uint8_t *data)
{
struct net_device *netdev;
int i;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < SYN_STATS_LEN; i++) {
memcpy(data, syn_gstrings_stats[i].stat_string,
strlen(syn_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < SYN_PRIV_FLAGS_LEN; i++) {
memcpy(data, syn_strings_priv_flags[i],
strlen(syn_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* syn_send_pause_frame()
*/
static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_send_tx_pause_frame(nghd);
}
/*
* syn_start
*/
static int32_t syn_start(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_tx_enable(nghd);
syn_rx_enable(nghd);
syn_set_full_duplex(nghd);
if (syn_set_mac_speed(nghd, SPEED_10000))
return -1;
netdev_dbg(nghd->netdev,
"%s: mac_base:0x%px tx_enable:0x%x rx_enable:0x%x\n",
__func__,
nghd->mac_base,
hal_read_reg(nghd->mac_base,
SYN_MAC_TX_CONFIG),
hal_read_reg(nghd->mac_base,
SYN_MAC_RX_CONFIG));
return 0;
}
/*
* syn_stop
*/
static int32_t syn_stop(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_tx_disable(nghd);
syn_rx_disable(nghd);
netdev_dbg(nghd->netdev, "%s: Stopping mac_base:0x%px\n", __func__,
nghd->mac_base);
return 0;
}
/*
* syn_init()
*/
static void *syn_init(struct gmac_hal_platform_data *gmacpdata)
{
struct syn_hal_dev *shd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start,
resource_size(res), ndev->name)) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
return NULL;
}
shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct syn_hal_dev),
GFP_KERNEL);
if (!shd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
/* Save netdev context in syn HAL context */
shd->nghd.netdev = gmacpdata->netdev;
shd->nghd.mac_id = gmacpdata->macid;
/* Populate the mac base addresses */
shd->nghd.mac_base =
devm_ioremap_nocache(&dp_priv->pdev->dev, res->start,
resource_size(res));
if (!shd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
return NULL;
}
spin_lock_init(&shd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
shd->nghd.mac_base);
/* Reset MIB Stats */
if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)shd;
}
/*
* syn_set_mac_address()
*/
static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_RSVD_BIT;
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data);
data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8)
| macaddr[0];
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data);
}
/*
* syn_get_mac_address()
*/
static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW);
macaddr[3] = (data >> 24) & 0xff;
macaddr[2] = (data >> 16) & 0xff;
macaddr[1] = (data >> 8) & 0xff;
macaddr[0] = (data) & 0xff;
}
struct nss_gmac_hal_ops syn_hal_ops = {
.init = &syn_init,
.start = &syn_start,
.stop = &syn_stop,
.setmacaddr = &syn_set_mac_address,
.getmacaddr = &syn_get_mac_address,
.rxflowcontrol = &syn_rx_flow_control,
.txflowcontrol = &syn_tx_flow_control,
.setspeed = &syn_set_mac_speed,
.getspeed = &syn_get_mac_speed,
.setduplex = &syn_set_duplex_mode,
.getduplex = &syn_get_duplex_mode,
.getstats = &syn_get_mmc_stats,
.setmaxframe = &syn_set_max_frame_size,
.getmaxframe = &syn_get_max_frame_size,
.getndostats = &syn_get_netdev_stats,
.getssetcount = &syn_get_strset_count,
.getstrings = &syn_get_strings,
.getethtoolstats = &syn_get_eth_stats,
.sendpause = &syn_send_pause_frame,
};

View File

@@ -0,0 +1,255 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __SYN_REG_H__
#define __SYN_REG_H__
/*
*
MAC Register Offset
*
*/
#define SYN_MAC_TX_CONFIG 0x0000
#define SYN_MAC_RX_CONFIG 0x0004
#define SYN_MAC_PACKET_FILTER 0x0008
#define SYN_MAC_WDOG_TIMEOUT 0x000c
#define SYN_MAC_HASH_TBL_REG0 0x0010
#define SYN_MAC_VLAN_TAG 0x0050
#define SYN_MAC_VLAN_HASH_TBL 0x0058
#define SYN_MAC_VLAN_INCL 0x0060
#define SYN_MAC_INNER_VLAN_INCL 0x0064
#define SYN_MAC_RX_ETH_TYP_MATCH 0x006c
#define SYN_MAC_Q0_TX_FLOW_CTL 0x0070
#define SYN_MAC_Q1_TX_FLOW_CTL 0x0074
#define SYN_MAC_Q2_TX_FLOW_CTL 0x0078
#define SYN_MAC_Q3_TX_FLOW_CTL 0x007c
#define SYN_MAC_Q4_TX_FLOW_CTL 0x0080
#define SYN_MAC_Q5_TX_FLOW_CTL 0x0084
#define SYN_MAC_Q6_TX_FLOW_CTL 0x0088
#define SYN_MAC_Q7_TX_FLOW_CTL 0x008c
#define SYN_MAC_RX_FLOW_CTL 0x0090
#define SYN_MAC_RXQ_CTL0 0x00a0
#define SYN_MAC_RXQ_CTL1 0x00a4
#define SYN_MAC_RXQ_CTL2 0x00a8
#define SYN_MAC_RXQ_CTL3 0x00ac
#define SYN_MAC_INT_STATUS 0x00b0
#define SYN_MAC_INT_ENABLE 0x00b4
#define SYN_MAC_TX_RX_STATUS 0x00b8
#define SYN_MAC_PMT_CTL_STATUS 0x00c0
#define SYN_MAC_RWK_PACKET_FILTER 0x00c4
#define SYN_MAC_LPI_CTL_STATUS 0x00d0
#define SYN_MAC_LPI_TIMER_STATUS 0x00d4
#define SYN_MAC_VERSION 0x0110
#define SYN_MAC_DEBUG 0x0114
#define SYN_MAC_FW_FEATURE0 0x011c
#define SYN_MAC_FW_FEATURE1 0x0120
#define SYN_MAC_FW_FEATURE2 0x0124
#define SYN_MAC_GPIO_CTL 0x0278
#define SYN_MAC_GPIO_STATUS 0x027c
#define SYN_MAC_ADDR0_HIGH 0x0300
#define SYN_MAC_ADDR0_LOW 0x0304
#define SYN_MAC_ADDR1_HIGH 0x0308
#define SYN_MAC_ADDR1_LOW 0x030c
#define SYN_MAC_TS_CTL 0x0d00
#define SYN_MAC_SUB_SEC_INCR 0x0d04
#define SYN_MAC_SYS_TIME_SECS 0x0d08
#define SYN_MAC_SYS_TIME_NSECS 0x0d0c
#define SYN_MAC_SYS_TIME_SECS_UPDATE 0x0d10
#define SYN_MAC_SYS_TIME_NSECS_UPDATE 0x0d14
#define SYN_MAC_TS_ADDEND 0x0d18
#define SYN_MAC_TS_STATUS 0x0d20
#define SYN_MAC_TX_TS_STATUS_NSECS 0x0d30
#define SYN_MAC_TX_TS_STATUS_SECS 0x0d34
#define SYN_MAC_PPS_CTL 0x0d70
#define SYN_MAC_MMC_CTL 0x0800
#define SYN_MAC_MMC_RX_INT 0x0804
#define SYN_MAC_MMC_TX_INT 0x0808
#define SYN_MAC_MMC_RX_INT_EN 0x080c
#define SYN_MAC_MMC_TX_INT_EN 0x0810
/* MAC TX MMC Counters */
#define SYN_MAC_MMC_TX_BCAST_LO 0x0824
#define SYN_MAC_MMC_TX_BCAST_HI 0x0828
#define SYN_MAC_MMC_TX_FRAME_LO 0x0894
#define SYN_MAC_MMC_TX_FRAME_HI 0x0898
#define SYN_MAC_MMC_TX_MCAST_LO 0x082c
#define SYN_MAC_MMC_TX_MCAST_HI 0x0830
#define SYN_MAC_MMC_TX_PKT64_LO 0x0834
#define SYN_MAC_MMC_TX_PKT64_HI 0x0838
#define SYN_MAC_MMC_TX_PKT65TO127_LO 0x083c
#define SYN_MAC_MMC_TX_PKT65TO127_HI 0x0840
#define SYN_MAC_MMC_TX_PKT128TO255_LO 0x0844
#define SYN_MAC_MMC_TX_PKT128TO255_HI 0x0848
#define SYN_MAC_MMC_TX_PKT256TO511_LO 0x084c
#define SYN_MAC_MMC_TX_PKT256TO511_HI 0x0850
#define SYN_MAC_MMC_TX_PKT512TO1023_LO 0x0854
#define SYN_MAC_MMC_TX_PKT512TO1023_HI 0x0858
#define SYN_MAC_MMC_TX_PKT1024TOMAX_LO 0x085c
#define SYN_MAC_MMC_TX_PKT1024TOMAX_HI 0x0860
#define SYN_MAC_MMC_TX_UNICAST_LO 0x0864
#define SYN_MAC_MMC_TX_UNICAST_HI 0x0868
#define SYN_MAC_MMC_TX_MCAST_GB_LO 0x086c
#define SYN_MAC_MMC_TX_MCAST_GB_HI 0x0870
#define SYN_MAC_MMC_TX_BCAST_GB_LO 0x0874
#define SYN_MAC_MMC_TX_BCAST_GB_HI 0x0878
#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_LO 0x087c
#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_HI 0x0880
#define SYN_MAC_MMC_TX_BYTES_LO 0x0884
#define SYN_MAC_MMC_TX_BYTES_HI 0x0888
#define SYN_MAC_MMC_TX_PAUSE_FRAME_LO 0x0894
#define SYN_MAC_MMC_TX_PAUSE_FRAME_HI 0x0898
#define SYN_MAC_MMC_TX_VLAN_LO 0x089c
#define SYN_MAC_MMC_TX_VLAN_HI 0x08a0
#define SYN_MAC_MMC_TX_LPI_USEC_CTR_LO 0x08a4
#define SYN_MAC_MMC_TX_LPI_USEC_CTR_HI 0x08a8
/* MAC RX MMC Counters */
#define SYN_MAC_MMC_RX_FRAME_LO 0x0900
#define SYN_MAC_MMC_RX_FRAME_HI 0x0904
#define SYN_MAC_MMC_RX_BYTES_LO 0x0910
#define SYN_MAC_MMC_RX_BYTES_HI 0x0914
#define SYN_MAC_MMC_RX_BCAST_LO 0x0918
#define SYN_MAC_MMC_RX_BCAST_HI 0x091c
#define SYN_MAC_MMC_RX_MCAST_LO 0x0920
#define SYN_MAC_MMC_RX_MCAST_HI 0x0924
#define SYN_MAC_MMC_RX_CRC_ERR_LO 0x0928
#define SYN_MAC_MMC_RX_CRC_ERR_HI 0x092c
#define SYN_MAC_MMC_RX_RUNT_ERR 0x0930
#define SYN_MAC_MMC_RX_JABBER_ERR 0x0934
#define SYN_MAC_MMC_RX_UNDERSIZE 0x0938
#define SYN_MAC_MMC_RX_OVERSIZE 0x093c
#define SYN_MAC_MMC_RX_PKT64_LO 0x0940
#define SYN_MAC_MMC_RX_PKT64_HI 0x0944
#define SYN_MAC_MMC_RX_PKT65TO127_LO 0x0948
#define SYN_MAC_MMC_RX_PKT65TO127_HI 0x094c
#define SYN_MAC_MMC_RX_PKT128TO255_LO 0x0950
#define SYN_MAC_MMC_RX_PKT128TO255_HI 0x0954
#define SYN_MAC_MMC_RX_PKT256TO511_LO 0x0958
#define SYN_MAC_MMC_RX_PKT256TO511_HI 0x095c
#define SYN_MAC_MMC_RX_PKT512TO1023_LO 0x0960
#define SYN_MAC_MMC_RX_PKT512TO1023_HI 0x0964
#define SYN_MAC_MMC_RX_PKT1024TOMAX_LO 0x0968
#define SYN_MAC_MMC_RX_PKT1024TOMAX_HI 0x096c
#define SYN_MAC_MMC_RX_UNICAST_LO 0x0970
#define SYN_MAC_MMC_RX_UNICAST_HI 0x0974
#define SYN_MAC_MMC_RX_LEN_ERR_LO 0x0978
#define SYN_MAC_MMC_RX_LEN_ERR_HI 0x097c
#define SYN_MAC_MMC_RX_PAUSE_FRAME_LO 0x0988
#define SYN_MAC_MMC_RX_PAUSE_FRAME_HI 0x098c
#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_LO 0x0990
#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_HI 0x0994
#define SYN_MAC_MMC_RX_VLAN_FRAME_LO 0x0998
#define SYN_MAC_MMC_RX_VLAN_FRAME_HI 0x099c
#define SYN_MAC_MMC_RX_LPI_USEC_CTR_LO 0x09a4
#define SYN_MAC_MMC_RX_LPI_USEC_CTR_HI 0x09a8
#define SYN_MAC_MMC_RX_DISCARD_FRAME_LO 0x09ac
#define SYN_MAC_MMC_RX_DISCARD_FRAME_HI 0x09b0
/* MAC Register Bit Definitions*/
/* SYN_MAC_Q0_TX_FLOW_CTL Bit definitions */
#define SYN_MAC_TX_PAUSE_SEND 0x00000001
#define SYN_MAC_TX_FLOW_ENABLE 0x00000002
#define SYN_MAC_TX_PAUSE_LOW_THRESHOLD 0x00000070
#define SYN_MAC_ADDR_RSVD_BIT 0x80000000
/* SYN_MAC_RX_FLOW_CTL Bit definitions */
#define SYN_MAC_RX_FLOW_ENABLE 0x00000001
/* SYN_MAC_TX_CONFIG Bit definitions */
#define SYN_MAC_TX_ENABLE 0x00000001
#define SYN_MAC_TX_SPEED_SELECT 0x60000000
/* SYN_MAC_RX_CONFIG Bit definitions */
#define SYN_MAC_RX_ENABLE 0x00000001
#define SYN_MAC_JUMBO_FRAME_ENABLE 0x00000100
#define SYN_MAC_SPEED_10G 0x0
#define SYN_MAC_SPEED_2_5G 0x2
#define SYN_MAC_SPEED_1G 0x3
#define SYN_MAC_SPEED_BITPOS 29
#define SYN_MAC_SPEED_BITMASK 0x3
#define SYN_MAC_DEFAULT_MAX_FRAME_SIZE 1518
#define SYN_MAC_MAX_FRAME_SIZE_BITPOS 16
#define SYN_MAC_MAX_FRAME_SIZE_BITMASK 0x3fff
/* SYN_MAC_MMC_CTL Bit definitions */
#define SYN_MAC_MMC_RSTONRD 0x00000004
/*
*
MTL Register Offset
*
*/
#define SYN_MTL_OPER_MODE 0x1000
#define SYN_MTL_DEBUG_CTL 0x1008
#define SYN_MTL_DEBUG_STATUS 0x100c
#define SYN_MTL_DEBUG_DATA 0x1010
#define SYN_MTL_INT_STATUS 0x1020
#define SYN_MTL_RXQ_DMA_MAP0 0x1030
#define SYN_MTL_RXQ_DMA_MAP1 0x1034
#define SYN_MTL_RXQ_DMA_MAP2 0x1038
#define SYN_MTL_TC_PRIO_MAP0 0x1040
#define SYN_MTL_TC_PRIO_MAP1 0x1044
#define SYN_MTL_TXQ0_OPER_MODE 0x1100
#define SYN_MTL_TXQ0_UNDERFLOW 0x1104
#define SYN_MTL_TXQ0_DEBUG 0x1108
#define SYN_MTL_TC0_ETS_CTL 0x1110
#define SYN_MTL_TC0_ETS_STATUS 0x1114
#define SYN_MTL_TC0_QUANTUM_WEIGHT 0x1118
#define SYN_MTL_RXQ0_DEBUG 0x1148
#define SYN_MTL_RXQ0_CTL 0x114c
#define SYN_MTL_RXQ0_FLOW_CTL 0x1150
#define SYN_MTL_Q0_INT_ENABLE 0x1170
#define SYN_MTL_Q0_INT_STATUS 0x1174
/* MTL Register Bit definitions */
/*
*
DMA Register Offset
*
*/
#define SYN_DMA_MODE 0x3000
#define SYN_DMA_SYSBUS_MODE 0x3004
#define SYN_DMA_INT_STATUS 0x3008
#define SYN_DMA_AXI_TX_AR_ACE_CTL 0x3010
#define SYN_DMA_AXI_RX_AW_ACE_CTL 0x3018
#define SYN_DMA_AXI_TXRX_AWAR_ACE_CTL 0x301c
#define SYN_DMA_DEBUG_STATUS0 0x3020
#define SYN_DMA_DEBUG_STATUS1 0x3024
#define SYN_DMA_TX_EDMA_CTL 0x3040
#define SYN_DMA_RX_EDMA_CTL 0x3044
#define SYN_DMA_CH0_CTL 0x3100
#define SYN_DMA_CH0_TX_CTL 0x3104
#define SYN_DMA_CH0_RX_CTL 0x3108
#define SYN_DMA_CH0_TXDESC_LIST_HADDR 0x3110
#define SYN_DMA_CH0_TXDESC_LIST_LADDR 0x3114
#define SYN_DMA_CH0_RXDESC_LIST_HADDR 0x3118
#define SYN_DMA_CH0_RXDESC_LIST_LADDR 0x311c
#define SYN_DMA_CH0_TXDESC_TAIL_LPTR 0x3124
#define SYN_DMA_CH0_RXDESC_TAIL_LPTR 0x312c
#define SYN_DMA_CH0_TXDESC_RING_LEN 0x3130
#define SYN_DMA_CH0_RXDESC_RING_LEN 0x3134
#define SYN_DMA_INT_ENABLE 0x3138
#define SYN_DMA_RX_INT_WDOG_TIMER 0x313c
/* DMA Register Bit definitions */
#endif /*__SYN_REG_H__*/

View File

@@ -0,0 +1,31 @@
/*
**************************************************************************
* Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/*
* This file includes declarations defined by the EDMA
* dataplane and used by other layers of this driver.
*/
#ifndef __NSS_DP_EDMA__
#define __NSS_DP_EDMA__
extern int edma_init(void);
extern void edma_cleanup(bool is_dp_override);
extern struct nss_dp_data_plane_ops nss_dp_edma_ops;
#endif /*__NSS_DP_EDMA__ */

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_HAL_H__
#define __NSS_DP_HAL_H__
#include "nss_dp_dev.h"
/*
* nss_dp_hal_get_gmac_ops()
* Returns gmac hal ops based on the GMAC type.
*/
static inline struct nss_gmac_hal_ops *nss_dp_hal_get_gmac_ops(uint32_t gmac_type)
{
return dp_global_ctx.gmac_hal_ops[gmac_type];
}
/*
* nss_dp_hal_set_gmac_ops()
* Sets dp global gmac hal ops based on the GMAC type.
*/
static inline void nss_dp_hal_set_gmac_ops(struct nss_gmac_hal_ops *hal_ops, uint32_t gmac_type)
{
dp_global_ctx.gmac_hal_ops[gmac_type] = hal_ops;
}
/*
* HAL functions implemented by SoC specific source files.
*/
extern bool nss_dp_hal_init(void);
extern void nss_dp_hal_cleanup(void);
extern void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv);
extern struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void);
#endif /* __NSS_DP_HAL_H__ */

View File

@@ -0,0 +1,162 @@
/*
**************************************************************************
* Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __NSS_DP_HAL_IF_H__
#define __NSS_DP_HAL_IF_H__
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <uapi/linux/if_link.h>
enum gmac_device_type {
GMAC_HAL_TYPE_QCOM = 0, /* 1G GMAC type */
GMAC_HAL_TYPE_SYN_XGMAC,/* Synopsys XGMAC type */
GMAC_HAL_TYPE_SYN_GMAC, /* Synopsys 1G GMAC type */
GMAC_HAL_TYPE_MAX
};
/*
* gmac_hal_platform_data
*/
struct gmac_hal_platform_data {
struct net_device *netdev; /* Net device */
uint32_t reg_len; /* Register space length */
uint32_t mactype; /* MAC chip type */
uint32_t macid; /* MAC sequence id on the Chip */
};
/*
* NSS GMAC HAL device data
*/
struct nss_gmac_hal_dev {
void __iomem *mac_base; /* Base address of MAC registers */
uint32_t version; /* GMAC Revision version */
uint32_t drv_flags; /* Driver specific feature flags */
/*
* Phy related stuff
*/
uint32_t link_state; /* Link status as reported by the Phy */
uint32_t duplex_mode; /* Duplex mode of the Phy */
uint32_t speed; /* Speed of the Phy */
uint32_t loop_back_mode;/* Loopback status of the Phy */
uint32_t phy_mii_type; /* RGMII/SGMII/XSGMII */
struct net_device *netdev;
struct resource *memres;
uint32_t mac_reg_len; /* MAC Register block length */
uint32_t mac_id; /* MAC sequence id on the Chip */
spinlock_t slock; /* lock to protect concurrent reg access */
};
/*
* nss_gmac_hal_ops
*/
struct nss_gmac_hal_ops {
void* (*init)(struct gmac_hal_platform_data *);
void (*exit)(struct nss_gmac_hal_dev *);
int32_t (*start)(struct nss_gmac_hal_dev *);
int32_t (*stop)(struct nss_gmac_hal_dev *);
void (*setmacaddr)(struct nss_gmac_hal_dev *, uint8_t *);
void (*getmacaddr)(struct nss_gmac_hal_dev *, uint8_t *);
void (*promisc)(struct nss_gmac_hal_dev *, bool enabled);
void (*multicast)(struct nss_gmac_hal_dev *, bool enabled);
void (*broadcast)(struct nss_gmac_hal_dev *, bool enabled);
void (*rxcsumoffload)(struct nss_gmac_hal_dev *, bool enabled);
void (*txcsumoffload)(struct nss_gmac_hal_dev *, bool enabled);
void (*rxflowcontrol)(struct nss_gmac_hal_dev *, bool enabled);
void (*txflowcontrol)(struct nss_gmac_hal_dev *, bool enabled);
int32_t (*setspeed)(struct nss_gmac_hal_dev *, uint32_t);
uint32_t (*getspeed)(struct nss_gmac_hal_dev *);
void (*setduplex)(struct nss_gmac_hal_dev *, uint8_t);
uint8_t (*getduplex)(struct nss_gmac_hal_dev *);
int32_t (*getstats)(struct nss_gmac_hal_dev *);
int32_t (*setmaxframe)(struct nss_gmac_hal_dev *, uint32_t);
int32_t (*getmaxframe)(struct nss_gmac_hal_dev *);
int32_t (*getndostats)(struct nss_gmac_hal_dev *,
struct rtnl_link_stats64 *);
void (*sendpause)(struct nss_gmac_hal_dev *);
void (*stoppause)(struct nss_gmac_hal_dev *);
int32_t (*getssetcount)(struct nss_gmac_hal_dev *, int32_t);
int32_t (*getstrings)(struct nss_gmac_hal_dev *, int32_t, uint8_t *);
int32_t (*getethtoolstats)(struct nss_gmac_hal_dev *, uint64_t *);
};
extern struct nss_gmac_hal_ops qcom_hal_ops;
extern struct nss_gmac_hal_ops syn_hal_ops;
/**********************************************************
* Common functions
**********************************************************/
/*
* hal_read_reg()
*/
static inline uint32_t hal_read_reg(void __iomem *regbase, uint32_t regoffset)
{
return readl_relaxed(regbase + regoffset);
}
/*
* hal_write_reg()
*/
static inline void hal_write_reg(void __iomem *regbase, uint32_t regoffset,
uint32_t regdata)
{
writel_relaxed(regdata, regbase + regoffset);
}
/*
* hal_set_reg_bits()
*/
static inline void hal_set_reg_bits(struct nss_gmac_hal_dev *nghd,
uint32_t regoffset,
uint32_t bitpos)
{
uint32_t data;
spin_lock(&nghd->slock);
data = bitpos | hal_read_reg(nghd->mac_base, regoffset);
hal_write_reg(nghd->mac_base, regoffset, data);
spin_unlock(&nghd->slock);
}
/*
* hal_clear_reg_bits()
*/
static inline void hal_clear_reg_bits(struct nss_gmac_hal_dev *nghd,
uint32_t regoffset,
uint32_t bitpos)
{
uint32_t data;
spin_lock(&nghd->slock);
data = ~bitpos & hal_read_reg(nghd->mac_base, regoffset);
hal_write_reg(nghd->mac_base, regoffset, data);
spin_unlock(&nghd->slock);
}
/*
* hal_check_reg_bits()
*/
static inline bool hal_check_reg_bits(void __iomem *regbase,
uint32_t regoffset,
uint32_t bitpos)
{
return (bitpos & hal_read_reg(regbase, regoffset)) != 0;
}
#endif /* __NSS_DP_HAL_IF_H__ */

View File

@@ -0,0 +1,336 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include "syn_data_plane.h"
#include "syn_reg.h"
#define SYN_DP_NAPI_BUDGET 64
/*
* GMAC Ring info
*/
struct syn_dp_info dp_info[NSS_DP_HAL_MAX_PORTS];
/*
* syn_dp_napi_poll()
* Scheduled by napi to process RX and TX complete
*/
static int syn_dp_napi_poll(struct napi_struct *napi, int budget)
{
struct nss_dp_dev *gmac_dev = container_of(napi, struct nss_dp_dev, napi);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
int work_done;
/*
* Update GMAC stats
*/
spin_lock_bh(&dp_info->stats_lock);
dp_info->stats.stats.rx_missed += syn_get_rx_missed(gmac_dev->gmac_hal_ctx);
dp_info->stats.stats.rx_missed += syn_get_fifo_overflows(gmac_dev->gmac_hal_ctx);
spin_unlock_bh(&dp_info->stats_lock);
syn_dp_process_tx_complete(gmac_dev, dev_info);
work_done = syn_dp_rx(gmac_dev, dev_info, budget);
syn_dp_rx_refill(gmac_dev, dev_info);
if (work_done < budget) {
napi_complete(napi);
syn_enable_dma_interrupt(gmac_dev->gmac_hal_ctx);
}
return work_done;
}
/*
* syn_dp_handle_irq()
* Process IRQ and schedule napi
*/
static irqreturn_t syn_dp_handle_irq(int irq, void *ctx)
{
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)ctx;
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_clear_dma_status(nghd);
syn_disable_dma_interrupt(nghd);
/*
* Schedule NAPI
*/
napi_schedule(&gmac_dev->napi);
return IRQ_HANDLED;
}
/*
* syn_dp_if_init()
* Initialize the GMAC data plane operations
*/
static int syn_dp_if_init(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
uint32_t macid = gmac_dev->macid;
struct syn_dp_info *dev_info = &dp_info[macid - 1];
struct device *dev = &gmac_dev->pdev->dev;
int err;
if (!netdev) {
netdev_dbg(netdev, "nss_dp_gmac: Invalid netdev pointer %px\n", netdev);
return NSS_DP_FAILURE;
}
netdev_info(netdev, "nss_dp_gmac: Registering netdev %s(qcom-id:%d) with GMAC\n", netdev->name, macid);
if (!dev_info->napi_added) {
netif_napi_add(netdev, &gmac_dev->napi, syn_dp_napi_poll, SYN_DP_NAPI_BUDGET);
/*
* Requesting irq
*/
netdev->irq = platform_get_irq(gmac_dev->pdev, 0);
err = request_irq(netdev->irq, syn_dp_handle_irq, 0, "nss-dp-gmac", gmac_dev);
if (err) {
netdev_dbg(netdev, "err_code:%d, Mac %d IRQ %d request failed\n", err,
gmac_dev->macid, netdev->irq);
return NSS_DP_FAILURE;
}
gmac_dev->drv_flags |= NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
dev_info->napi_added = 1;
}
/*
* Forcing the kernel to use 32-bit DMA addressing
*/
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
/*
* Initialize the Tx/Rx ring
*/
if (syn_dp_setup_rings(gmac_dev, netdev, dev, dev_info)) {
netdev_dbg(netdev, "nss_dp_gmac: Error initializing GMAC rings %px\n", netdev);
return NSS_DP_FAILURE;
}
spin_lock_init(&dev_info->data_lock);
spin_lock_init(&dev_info->stats_lock);
netdev_dbg(netdev,"Synopsys GMAC dataplane initialized\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_open()
* Open the GMAC data plane operations
*/
static int syn_dp_if_open(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring,
uint32_t rx_desc_ring, uint32_t mode)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_enable_dma_rx(nghd);
syn_enable_dma_tx(nghd);
napi_enable(&gmac_dev->napi);
syn_enable_dma_interrupt(nghd);
netdev_dbg(netdev, "Synopsys GMAC dataplane opened\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_close()
* Close the GMAC data plane operations
*/
static int syn_dp_if_close(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_disable_dma_rx(nghd);
syn_disable_dma_tx(nghd);
syn_disable_dma_interrupt(nghd);
napi_disable(&gmac_dev->napi);
netdev_dbg(netdev, "Synopsys GMAC dataplane closed\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_link_state()
* Change of link for the dataplane
*/
static int syn_dp_if_link_state(struct nss_dp_data_plane_ctx *dpc, uint32_t link_state)
{
struct net_device *netdev = dpc->dev;
/*
* Switch interrupt based on the link state
*/
if (link_state) {
netdev_dbg(netdev, "Data plane link up\n");
} else {
netdev_dbg(netdev, "Data plane link down\n");
}
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_mac_addr()
*/
static int syn_dp_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr)
{
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_change_mtu()
*/
static int syn_dp_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu)
{
/*
* TODO: Work on MTU fix along with register update for frame length
*/
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_set_features()
* Set the supported net_device features
*/
static void syn_dp_if_set_features(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->wanted_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
}
/*
* syn_dp_if_xmit()
* Dataplane method to transmit the packet
*/
static netdev_tx_t syn_dp_if_xmit(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
int nfrags = skb_shinfo(skb)->nr_frags;
/*
* Most likely, it is not a fragmented pkt, optimize for that
*/
if (likely(nfrags == 0)) {
if (syn_dp_tx(gmac_dev, dev_info, skb)) {
goto drop;
}
return NETDEV_TX_OK;
}
drop:
dev_kfree_skb_any(skb);
dev_info->stats.stats.tx_dropped++;
return NETDEV_TX_BUSY;
}
/*
* syn_dp_if_pause_on_off()
*/
static int syn_dp_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc, uint32_t pause_on)
{
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_get_stats
* Get Synopsys GMAC data plane stats
*/
static void syn_dp_if_get_stats(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
spin_lock_bh(&dev_info->stats_lock);
netdev_dbg(netdev, "GETTING stats: rx_packets:%llu rx_bytes:%llu mmc_rx_crc_errors:%llu", dev_info->stats.stats.rx_packets,
dev_info->stats.stats.rx_bytes, dev_info->stats.stats.mmc_rx_crc_errors);
memcpy(stats, &dev_info->stats, sizeof(*stats));
spin_unlock_bh(&dev_info->stats_lock);
}
/*
* syn_dp_if_deinit()
* Free all the Synopsys GMAC resources
*/
static int syn_dp_if_deinit(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
if (dev_info->napi_added) {
/*
* Remove interrupt handlers and NAPI
*/
if (gmac_dev->drv_flags & NSS_DP_PRIV_FLAG(IRQ_REQUESTED)) {
netdev_dbg(netdev, "Freeing IRQ %d for Mac %d\n", netdev->irq, gmac_dev->macid);
synchronize_irq(netdev->irq);
free_irq(netdev->irq, gmac_dev);
gmac_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
}
netif_napi_del(&gmac_dev->napi);
dev_info->napi_added = 0;
}
/*
* Cleanup and free the rings
*/
syn_dp_cleanup_rings(gmac_dev, netdev, dev_info);
return NSS_DP_SUCCESS;
}
/*
* nss_dp_gmac_ops
* Data plane operations for Synopsys GMAC
*/
struct nss_dp_data_plane_ops nss_dp_gmac_ops = {
.init = syn_dp_if_init,
.open = syn_dp_if_open,
.close = syn_dp_if_close,
.link_state = syn_dp_if_link_state,
.mac_addr = syn_dp_if_mac_addr,
.change_mtu = syn_dp_if_change_mtu,
.xmit = syn_dp_if_xmit,
.set_features = syn_dp_if_set_features,
.pause_on_off = syn_dp_if_pause_on_off,
.get_stats = syn_dp_if_get_stats,
.deinit = syn_dp_if_deinit,
};

View File

@@ -0,0 +1,109 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_SYN_DATAPLANE__
#define __NSS_DP_SYN_DATAPLANE__
#include "nss_dp_dev.h"
#include "syn_dma_desc.h"
#define SYN_DP_TX_DESC_SIZE 128 /* Tx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_RX_DESC_SIZE 128 /* Rx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_MINI_JUMBO_FRAME_MTU 1978
#define SYN_DP_MAX_DESC_BUFF 0x1FFF /* Max size of buffer that can be programed into one field of desc */
/*
* syn_dp_info
* Synopysys GMAC Dataplane information
*/
struct syn_dp_info {
struct nss_dp_gmac_stats stats; /* GMAC driver stats */
struct sk_buff *rx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Rx skb pool helping RX DMA descriptors*/
dma_addr_t rx_desc_dma; /* Dma-albe address of first rx descriptor
either in ring or chain mode, this is
used by the GMAC device */
struct dma_desc *rx_desc; /* start address of RX descriptors ring or
chain, this is used by the driver */
uint32_t busy_rx_desc; /* Number of Rx Descriptors owned by
DMA at any given time */
uint32_t rx_desc_count; /* number of rx descriptors in the
tx descriptor queue/pool */
uint32_t rx_busy; /* index of the rx descriptor owned by DMA,
obtained by nss_gmac_get_rx_qptr() */
uint32_t rx_next; /* index of the rx descriptor next available
with driver, given to DMA by
nss_gmac_set_rx_qptr()*/
struct dma_desc *rx_busy_desc; /* Rx Descriptor address corresponding
to the index tx_busy */
struct dma_desc *rx_next_desc; /* Rx Descriptor address corresponding
to the index rx_next */
struct sk_buff *tx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Tx skb pool helping RX DMA descriptors*/
dma_addr_t tx_desc_dma; /* Dma-able address of first tx descriptor
either in ring or chain mode, this is used
by the GMAC device */
struct dma_desc *tx_desc; /* start address of TX descriptors ring or
chain, this is used by the driver */
uint32_t busy_tx_desc; /* Number of Tx Descriptors owned by
DMA at any given time */
uint32_t tx_desc_count; /* number of tx descriptors in the
rx descriptor queue/pool */
uint32_t tx_busy; /* index of the tx descriptor owned by DMA,
is obtained by nss_gmac_get_tx_qptr() */
uint32_t tx_next; /* index of the tx descriptor next available
with driver, given to DMA by
nss_gmac_set_tx_qptr() */
struct dma_desc *tx_busy_desc; /* Tx Descriptor address corresponding
to the index tx_busy */
struct dma_desc *tx_next_desc; /* Tx Descriptor address corresponding
to the index tx_next */
spinlock_t data_lock; /* Lock to protect datapath */
spinlock_t stats_lock; /* Lock to protect datapath */
int napi_added; /* flag to indicate napi add status */
};
/*
* GMAC Tx/Tx APIs
*/
int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info);
int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info);
int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget);
void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb);
void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
#endif /* __NSS_DP_SYN_DATAPLANE__ */

View File

@@ -0,0 +1,342 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_DESC__
#define __SYN_DESC__
/**********************************************************
* DMA Engine descriptors
**********************************************************/
/*
******Enhanced Descritpor structure to support 8K buffer per buffer *******
dma_rx_base_addr = 0x000C, CSR3 - Receive Descriptor list base address
dma_rx_base_addr is the pointer to the first Rx Descriptors.
The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
Similarly
dma_tx_base_addr = 0x0010, CSR4 - Transmit Descriptor list base address
dma_tx_base_addr is the pointer to the first Tx Descriptors.
The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
-------------------------------------------------------------------------
RDES0 |OWN (31)| Status |
-------------------------------------------------------------------------
RDES1 | Ctrl | Res | Byte Count Buffer 2 | Ctrl | Res | Byte Count Buffer 1 |
-------------------------------------------------------------------------
RDES2 | Buffer 1 Address |
-------------------------------------------------------------------------
RDES3 | Buffer 2 Address / Next Descriptor Address |
-------------------------------------------------------------------------
RDES4 | Extended Status |
-------------------------------------------------------------------------
RDES5 | Reserved |
-------------------------------------------------------------------------
RDES6 | Receive Timestamp Low |
-------------------------------------------------------------------------
RDES7 | Receive Timestamp High |
-------------------------------------------------------------------------
------------------------------------------------------------------------
TDES0 |OWN (31)| Ctrl | Res | Ctrl | Res | Status |
------------------------------------------------------------------------
TDES1 | Res | Byte Count Buffer 2 | Res | Byte Count Buffer 1 |
------------------------------------------------------------------------
TDES2 | Buffer 1 Address |
------------------------------------------------------------------------
TDES3 | Buffer 2 Address / Next Descriptor Address |
------------------------------------------------------------------------
TDES4 | Reserved |
------------------------------------------------------------------------
TDES5 | Reserved |
------------------------------------------------------------------------
TDES6 | Transmit Timestamp Low |
------------------------------------------------------------------------
TDES7 | Transmit Timestamp Higher |
------------------------------------------------------------------------
*/
/*
* dma_descriptor_status
* status word of DMA descriptor
*/
enum dma_descriptor_status {
desc_own_by_dma = 0x80000000, /* (OWN)Descriptor is
owned by DMA engine */
desc_rx_da_filter_fail = 0x40000000, /* (AFM)Rx - DA Filter
Fail for the rx frame */
desc_rx_frame_length_mask = 0x3FFF0000, /* (FL)Receive descriptor
frame length */
desc_rx_frame_length_shift = 16,
desc_rx_error = 0x00008000, /* (ES)Error summary bit
- OR of the following bits:
DE || OE || IPC || GF || LC || RWT
|| RE || CE */
desc_rx_truncated = 0x00004000, /* (DE)Rx - no more descriptors
for receive frame */
desc_sa_filter_fail = 0x00002000, /* (SAF)Rx - SA Filter Fail for
the received frame */
desc_rx_length_error = 0x00001000, /* (LE)Rx - frm size not
matching with len field */
desc_rx_overflow = 0x00000800, /* (OE)Rx - frm was damaged due
to buffer overflow */
desc_rx_vlan_tag = 0x00000400, /* (VLAN)Rx - received frame
is a VLAN frame */
desc_rx_first = 0x00000200, /* (FS)Rx - first
descriptor of the frame */
desc_rx_last = 0x00000100, /* (LS)Rx - last
descriptor of the frame */
desc_rx_long_frame = 0x00000080, /* (Giant Frame)Rx - frame is
longer than 1518/1522 */
desc_rx_collision = 0x00000040, /* (LC)Rx - late collision
occurred during reception */
desc_rx_frame_ether = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_rx_watchdog = 0x00000010, /* (RWT)Rx - watchdog timer
expired during reception */
desc_rx_mii_error = 0x00000008, /* (RE)Rx - error reported
by MII interface */
desc_rx_dribbling = 0x00000004, /* (DE)Rx - frame contains non
int multiple of 8 bits */
desc_rx_crc = 0x00000002, /* (CE)Rx - CRC error */
desc_rx_ext_sts = 0x00000001, /* Extended Status Available
in RDES4 */
desc_tx_error = 0x00008000, /* (ES)Error summary Bits */
desc_tx_int_enable = 0x40000000, /* (IC)Tx - interrupt on
completion */
desc_tx_last = 0x20000000, /* (LS)Tx - Last segment of the
frame */
desc_tx_first = 0x10000000, /* (FS)Tx - First segment of the
frame */
desc_tx_disable_crc = 0x08000000, /* (DC)Tx - Add CRC disabled
(first segment only) */
desc_tx_disable_padd = 0x04000000, /* (DP)disable padding,
added by - reyaz */
desc_tx_cis_mask = 0x00c00000, /* Tx checksum offloading
control mask */
desc_tx_cis_bypass = 0x00000000, /* Checksum bypass */
desc_tx_cis_ipv4_hdr_cs = 0x00400000, /* IPv4 header checksum */
desc_tx_cis_tcp_only_cs = 0x00800000, /* TCP/UDP/ICMP checksum.
Pseudo header checksum
is assumed to be present */
desc_tx_cis_tcp_pseudo_cs = 0x00c00000, /* TCP/UDP/ICMP checksum fully
in hardware including
pseudo header */
desc_tx_desc_end_of_ring = 0x00200000, /* (TER)End of descriptor ring*/
desc_tx_desc_chain = 0x00100000, /* (TCH)Second buffer address
is chain address */
desc_rx_chk_bit0 = 0x00000001, /* Rx Payload Checksum Error */
desc_rx_chk_bit7 = 0x00000080, /* (IPC CS ERROR)Rx - Ipv4
header checksum error */
desc_rx_chk_bit5 = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_rx_ts_avail = 0x00000080, /* Time stamp available */
desc_rx_frame_type = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_tx_ipv4_chk_error = 0x00010000, /* (IHE) Tx Ip header error */
desc_tx_timeout = 0x00004000, /* (JT)Tx - Transmit
jabber timeout */
desc_tx_frame_flushed = 0x00002000, /* (FF)Tx - DMA/MTL flushed
the frame due to SW flush */
desc_tx_pay_chk_error = 0x00001000, /* (PCE) Tx Payload checksum
Error */
desc_tx_lost_carrier = 0x00000800, /* (LC)Tx - carrier lost
during tramsmission */
desc_tx_no_carrier = 0x00000400, /* (NC)Tx - no carrier signal
from the tranceiver */
desc_tx_late_collision = 0x00000200, /* (LC)Tx - transmission aborted
due to collision */
desc_tx_exc_collisions = 0x00000100, /* (EC)Tx - transmission aborted
after 16 collisions */
desc_tx_vlan_frame = 0x00000080, /* (VF)Tx - VLAN-type frame */
desc_tx_coll_mask = 0x00000078, /* (CC)Tx - Collision count */
desc_tx_coll_shift = 3,
desc_tx_exc_deferral = 0x00000004, /* (ED)Tx - excessive deferral */
desc_tx_underflow = 0x00000002, /* (UF)Tx - late data arrival
from the memory */
desc_tx_deferred = 0x00000001, /* (DB)Tx - frame
transmision deferred */
/*
* This explains the RDES1/TDES1 bits layout
* ------------------------------------------------------
* RDES1/TDES1 | Control Bits | Byte Count Buf 2 | Byte Count Buf 1 |
* ------------------------------------------------------
*/
/* dma_descriptor_length */ /* length word of DMA descriptor */
desc_rx_dis_int_compl = 0x80000000, /* (Disable Rx int on completion) */
desc_rx_desc_end_of_ring = 0x00008000, /* (RER)End of descriptor ring */
desc_rx_desc_chain = 0x00004000, /* (RCH)Second buffer address
is chain address */
desc_size2_mask = 0x1FFF0000, /* (RBS2/TBS2) Buffer 2 size */
desc_size2_shift = 16,
desc_size1_mask = 0x00001FFF, /* (RBS1/TBS1) Buffer 1 size */
desc_size1_shift = 0,
/*
* This explains the RDES4 Extended Status bits layout
* --------------------------------------------------------
* RDES4 | Extended Status |
* --------------------------------------------------------
*/
desc_rx_ts_dropped = 0x00004000, /* PTP snapshot available */
desc_rx_ptp_ver = 0x00002000, /* When set indicates IEEE1584
Version 2 (else Ver1) */
desc_rx_ptp_frame_type = 0x00001000, /* PTP frame type Indicates PTP
sent over ethernet */
desc_rx_ptp_message_type = 0x00000F00, /* Message Type */
desc_rx_ptp_no = 0x00000000, /* 0000 => No PTP message rcvd */
desc_rx_ptp_sync = 0x00000100, /* 0001 => Sync (all clock
types) received */
desc_rx_ptp_follow_up = 0x00000200, /* 0010 => Follow_Up (all clock
types) received */
desc_rx_ptp_delay_req = 0x00000300, /* 0011 => Delay_Req (all clock
types) received */
desc_rx_ptp_delay_resp = 0x00000400, /* 0100 => Delay_Resp (all clock
types) received */
desc_rx_ptp_pdelay_req = 0x00000500, /* 0101 => Pdelay_Req (in P
to P tras clk) or Announce
in Ord and Bound clk */
desc_rx_ptp_pdelay_resp = 0x00000600, /* 0110 => Pdealy_Resp(in P to
P trans clk) or Management in
Ord and Bound clk */
desc_rx_ptp_pdelay_resp_fp = 0x00000700,/* 0111 => Pdelay_Resp_Follow_Up
(in P to P trans clk) or
Signaling in Ord and Bound
clk */
desc_rx_ptp_ipv6 = 0x00000080, /* Received Packet is in IPV6 */
desc_rx_ptp_ipv4 = 0x00000040, /* Received Packet is in IPV4 */
desc_rx_chk_sum_bypass = 0x00000020, /* When set indicates checksum
offload engine is bypassed */
desc_rx_ip_payload_error = 0x00000010, /* When set indicates 16bit IP
payload CS is in error */
desc_rx_ip_header_error = 0x00000008, /* When set indicates 16bit IPV4
hdr CS is err or IP datagram
version is not consistent
with Ethernet type value */
desc_rx_ip_payload_type = 0x00000007, /* Indicate the type of payload
encapsulated in IPdatagram
processed by COE (Rx) */
desc_rx_ip_payload_unknown = 0x00000000,/* Unknown or didnot process
IP payload */
desc_rx_ip_payload_udp = 0x00000001, /* UDP */
desc_rx_ip_payload_tcp = 0x00000002, /* TCP */
desc_rx_ip_payload_icmp = 0x00000003, /* ICMP */
};
/*
* dma_desc
* DMA Descriptor Structure
*
* The structure is common for both receive and transmit descriptors.
*/
struct dma_desc {
uint32_t status; /* Status */
uint32_t length; /* Buffer 1 and Buffer 2 length */
uint32_t buffer1; /* Network Buffer 1 pointer (DMA-able)*/
uint32_t data1; /* This holds virtual address of
buffer1, not used by DMA */
/* This data below is used only by driver */
uint32_t extstatus; /* Extended status of a Rx Descriptor */
uint32_t reserved1; /* Reserved word */
uint32_t timestamplow; /* Lower 32 bits of the 64
bit timestamp value */
uint32_t timestamphigh; /* Higher 32 bits of the 64
bit timestamp value */
};
/*
* syn_dp_gmac_tx_checksum_offload_tcp_pseudo
* The checksum offload engine is enabled to do complete checksum computation.
*/
static inline void syn_dp_gmac_tx_checksum_offload_tcp_pseudo(struct dma_desc *desc)
{
desc->status = ((desc->status & (~desc_tx_cis_mask)) | desc_tx_cis_tcp_pseudo_cs);
}
/*
* syn_dp_gmac_tx_desc_init_ring
* Initialize the tx descriptors for ring or chain mode operation.
*/
static inline void syn_dp_gmac_tx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
{
struct dma_desc *last_desc = desc + no_of_desc - 1;
memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
last_desc->status = desc_tx_desc_end_of_ring;
}
/*
* syn_dp_gmac_rx_desc_init_ring
* Initialize the rx descriptors for ring or chain mode operation.
*/
static inline void syn_dp_gmac_rx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
{
struct dma_desc *last_desc = desc + no_of_desc - 1;
memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
last_desc->length = desc_rx_desc_end_of_ring;
}
/*
* syn_dp_gmac_is_rx_desc_valid
* Checks whether the rx descriptor is valid.
*/
static inline bool syn_dp_gmac_is_rx_desc_valid(uint32_t status)
{
return (status & (desc_rx_error | desc_rx_first | desc_rx_last)) ==
(desc_rx_first | desc_rx_last);
}
/*
* syn_dp_gmac_get_rx_desc_frame_length
* Returns the byte length of received frame including CRC.
*/
static inline uint32_t syn_dp_gmac_get_rx_desc_frame_length(uint32_t status)
{
return (status & desc_rx_frame_length_mask) >> desc_rx_frame_length_shift;
}
/*
* syn_dp_gmac_is_desc_owned_by_dma
* Checks whether the descriptor is owned by DMA.
*/
static inline bool syn_dp_gmac_is_desc_owned_by_dma(struct dma_desc *desc)
{
return (desc->status & desc_own_by_dma) == desc_own_by_dma;
}
/*
* syn_dp_gmac_is_desc_empty
* Checks whether the descriptor is empty.
*/
static inline bool syn_dp_gmac_is_desc_empty(struct dma_desc *desc)
{
/*
* If length of both buffer1 & buffer2 are zero then desc is empty
*/
return (desc->length & desc_size1_mask) == 0;
}
/*
* syn_dp_gmac_get_tx_collision_count
* Gives the transmission collision count.
*/
static inline uint32_t syn_dp_gmac_get_tx_collision_count(uint32_t status)
{
return (status & desc_tx_coll_mask) >> desc_tx_coll_shift;
}
#endif /* __SYN_DESC__ */

View File

@@ -0,0 +1,195 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include "nss_dp_dev.h"
#include "syn_data_plane.h"
#include "syn_reg.h"
/*
* syn_dp_setup_rx_desc_queue
* This sets up the receive Descriptor queue in ring or chain mode.
*/
static int syn_dp_setup_rx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
uint32_t no_of_desc, uint32_t desc_mode)
{
struct dma_desc *first_desc = NULL;
dma_addr_t dma_addr;
dev_info->rx_desc_count = 0;
BUG_ON(desc_mode != RINGMODE);
BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
netdev_dbg(netdev, "total size of memory required for Rx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
if (first_desc == NULL) {
netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
return -ENOMEM;
}
dev_info->rx_desc_count = no_of_desc;
dev_info->rx_desc = first_desc;
dev_info->rx_desc_dma = dma_addr;
netdev_dbg(netdev, "Rx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n",
no_of_desc, first_desc, (void *)dma_addr);
syn_dp_gmac_rx_desc_init_ring(dev_info->rx_desc, no_of_desc);
dev_info->rx_next = 0;
dev_info->rx_busy = 0;
dev_info->rx_next_desc = first_desc;
dev_info->rx_busy_desc = first_desc;
dev_info->busy_rx_desc = 0;
return 0;
}
/*
* syn_dp_setup_tx_desc_queue
* This sets up the transmit Descriptor queue in ring or chain mode.
*/
static int syn_dp_setup_tx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
uint32_t no_of_desc, uint32_t desc_mode)
{
struct dma_desc *first_desc = NULL;
dma_addr_t dma_addr;
dev_info->tx_desc_count = 0;
BUG_ON(desc_mode != RINGMODE);
BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
netdev_dbg(netdev, "Total size of memory required for Tx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
if (first_desc == NULL) {
netdev_dbg(netdev, "Error in Tx Descriptors memory allocation\n");
return -ENOMEM;
}
dev_info->tx_desc_count = no_of_desc;
dev_info->tx_desc = first_desc;
dev_info->tx_desc_dma = dma_addr;
netdev_dbg(netdev, "Tx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n"
, no_of_desc, first_desc, (void *)dma_addr);
syn_dp_gmac_tx_desc_init_ring(dev_info->tx_desc, dev_info->tx_desc_count);
dev_info->tx_next = 0;
dev_info->tx_busy = 0;
dev_info->tx_next_desc = first_desc;
dev_info->tx_busy_desc = first_desc;
dev_info->busy_tx_desc = 0;
return 0;
}
/*
* syn_dp_setup_rings
* Perform initial setup of Tx/Rx rings
*/
int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info)
{
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
int err;
err = syn_dp_setup_rx_desc_queue(netdev, dev, dev_info, SYN_DP_RX_DESC_SIZE, RINGMODE);
if (err) {
netdev_dbg(netdev, "nss_dp_gmac: rx descriptor setup unsuccessfull, err code: %d", err);
return NSS_DP_FAILURE;
}
err = syn_dp_setup_tx_desc_queue(netdev, dev, dev_info, SYN_DP_TX_DESC_SIZE, RINGMODE);
if (err) {
netdev_dbg(netdev, "nss_dp_gmac: tx descriptor setup unsuccessfull, err code: %d", err);
return NSS_DP_FAILURE;
}
syn_dp_rx_refill(gmac_dev, dev_info);
syn_init_tx_desc_base(nghd, dev_info->tx_desc_dma);
syn_init_rx_desc_base(nghd, dev_info->rx_desc_dma);
return NSS_DP_SUCCESS;
}
/*
* syn_dp_cleanup_rings
* Cleanup Synopsys GMAC rings
*/
int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info)
{
uint32_t rx_skb_index;
struct dma_desc *rxdesc;
uint32_t tx_skb_index;
struct dma_desc *txdesc;
int i;
struct sk_buff *skb;
/*
* Rx Ring cleaning
* We are assuming that the NAPI poll was already completed.
* No need of a lock here since the NAPI and interrupts have been disabled now
*/
rx_skb_index = dev_info->rx_busy;
for (i = 0; i < dev_info->busy_rx_desc; i++) {
rx_skb_index = rx_skb_index & (dev_info->rx_desc_count - 1);
rxdesc = dev_info->rx_busy_desc;
dma_unmap_single(&(gmac_dev->netdev->dev), rxdesc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
skb = dev_info->rx_skb_list[rx_skb_index];
if (unlikely(skb != NULL)) {
dev_kfree_skb(skb);
dev_info->rx_skb_list[rx_skb_index] = NULL;
}
}
dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_RX_DESC_SIZE),
dev_info->rx_desc, dev_info->rx_desc_dma);
/*
* Tx Ring cleaning
*/
spin_lock_bh(&dev_info->data_lock);
tx_skb_index = dev_info->tx_busy;
for (i = 0; i < dev_info->busy_tx_desc; i++) {
tx_skb_index = tx_skb_index & (dev_info->tx_desc_count - 1);
txdesc = dev_info->tx_busy_desc;
dma_unmap_single(&(gmac_dev->netdev->dev), txdesc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
skb = dev_info->tx_skb_list[tx_skb_index];
if (unlikely(skb != NULL)) {
dev_kfree_skb(skb);
dev_info->tx_skb_list[tx_skb_index] = NULL;
}
}
spin_unlock_bh(&dev_info->data_lock);
dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_TX_DESC_SIZE),
dev_info->tx_desc, dev_info->tx_desc_dma);
return 0;
}

View File

@@ -0,0 +1,425 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "syn_data_plane.h"
#include "syn_reg.h"
/*
* syn_dp_reset_rx_qptr
* Reset the descriptor after Rx is over.
*/
static inline void syn_dp_reset_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
/* Index of descriptor the DMA just completed.
* May be useful when data is spread over multiple buffers/descriptors
*/
uint32_t rxnext = dev_info->rx_busy;
struct dma_desc *rxdesc = dev_info->rx_busy_desc;
BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
dev_info->rx_busy = (rxnext + 1) & (dev_info->rx_desc_count - 1);
dev_info->rx_busy_desc = dev_info->rx_desc + dev_info->rx_busy;
dev_info->rx_skb_list[rxnext] = NULL;
rxdesc->status = 0;
rxdesc->length &= desc_rx_desc_end_of_ring;
rxdesc->buffer1 = 0;
rxdesc->data1 = 0;
rxdesc->reserved1 = 0;
/*
* This returns one descriptor to processor. So busy count will be decremented by one.
*/
dev_info->busy_rx_desc--;
}
/*
* syn_dp_set_rx_qptr
* Prepares the descriptor to receive packets.
*/
static inline int32_t syn_dp_set_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb)
{
uint32_t rxnext = dev_info->rx_next;
struct dma_desc *rxdesc = dev_info->rx_next_desc;
uint32_t rx_skb_index = rxnext;
BUG_ON(dev_info->busy_rx_desc >= dev_info->rx_desc_count);
BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
BUG_ON(!syn_dp_gmac_is_desc_empty(rxdesc));
BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(rxdesc));
if (Length1 > SYN_DP_MAX_DESC_BUFF) {
rxdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
rxdesc->length |= ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
} else {
rxdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
}
rxdesc->buffer1 = Buffer1;
dev_info->rx_skb_list[rx_skb_index] = skb;
/* Program second buffer address if using two buffers. */
if (Length1 > SYN_DP_MAX_DESC_BUFF)
rxdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
else
rxdesc->data1 = 0;
rxdesc->extstatus = 0;
rxdesc->timestamplow = 0;
rxdesc->timestamphigh = 0;
/*
* Ensure all write completed before setting own by dma bit so when gmac
* HW takeover this descriptor, all the fields are filled correctly
*/
wmb();
rxdesc->status = desc_own_by_dma;
dev_info->rx_next = (rxnext + 1) & (dev_info->rx_desc_count - 1);
dev_info->rx_next_desc = dev_info->rx_desc + dev_info->rx_next;
/*
* 1 descriptor will be given to HW. So busy count incremented by 1.
*/
dev_info->busy_rx_desc++;
return rxnext;
}
/*
* syn_dp_rx_refill
* Refill the RX descrptor
*/
void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
struct net_device *netdev = gmac_dev->netdev;
struct device *dev = &gmac_dev->pdev->dev;
int empty_count = SYN_DP_RX_DESC_SIZE - dev_info->busy_rx_desc;
dma_addr_t dma_addr;
int i;
struct sk_buff *skb;
for (i = 0; i < empty_count; i++) {
skb = __netdev_alloc_skb(netdev, SYN_DP_MINI_JUMBO_FRAME_MTU, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
netdev_dbg(netdev, "Unable to allocate skb, will try next time\n");
break;
}
skb_reserve(skb, NET_IP_ALIGN);
dma_addr = dma_map_single(dev, skb->data, SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_kfree_skb(skb);
netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
break;
}
syn_dp_set_rx_qptr(gmac_dev, dev_info, dma_addr, SYN_DP_MINI_JUMBO_FRAME_MTU, skb);
}
}
/*
* syn_dp_rx()
* Process RX packets
*/
int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget)
{
struct dma_desc *desc = NULL;
int frame_length, busy;
uint32_t status;
struct sk_buff *rx_skb;
uint32_t rx_skb_index;
if (!dev_info->busy_rx_desc) {
/* no desc are held by gmac dma, we are done */
return 0;
}
busy = dev_info->busy_rx_desc;
if (busy > budget)
busy = budget;
do {
desc = dev_info->rx_busy_desc;
if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
/* desc still hold by gmac dma, so we are done */
break;
}
status = desc->status;
rx_skb_index = dev_info->rx_busy;
rx_skb = dev_info->rx_skb_list[rx_skb_index];
dma_unmap_single(&(gmac_dev->netdev->dev), desc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
spin_lock_bh(&dev_info->stats_lock);
if (likely(syn_dp_gmac_is_rx_desc_valid(status))) {
/* We have a pkt to process get the frame length */
frame_length = syn_dp_gmac_get_rx_desc_frame_length(status);
/* Get rid of FCS: 4 */
frame_length -= ETH_FCS_LEN;
/* Valid packet, collect stats */
dev_info->stats.stats.rx_packets++;
dev_info->stats.stats.rx_bytes += frame_length;
/* type_trans and deliver to linux */
skb_put(rx_skb, frame_length);
rx_skb->protocol = eth_type_trans(rx_skb, gmac_dev->netdev);
rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
napi_gro_receive(&gmac_dev->napi, rx_skb);
} else {
dev_info->stats.stats.rx_errors++;
dev_kfree_skb(rx_skb);
if (status & (desc_rx_crc | desc_rx_collision |
desc_rx_overflow | desc_rx_dribbling |
desc_rx_length_error)) {
dev_info->stats.stats.mmc_rx_crc_errors += (status & desc_rx_crc) ? 1 : 0;
dev_info->stats.stats.rx_late_collision_errors += (status & desc_rx_collision) ? 1 : 0;
dev_info->stats.stats.mmc_rx_overflow_errors += (status & desc_rx_overflow) ? 1 : 0;
dev_info->stats.stats.rx_dribble_bit_errors += (status & desc_rx_dribbling) ? 1 : 0;
dev_info->stats.stats.rx_length_errors += (status & desc_rx_length_error) ? 1 : 0;
}
}
spin_unlock_bh(&dev_info->stats_lock);
syn_dp_reset_rx_qptr(gmac_dev, dev_info);
busy--;
} while (busy > 0);
return budget - busy;
}
/*
* syn_dp_reset_tx_qptr
* Reset the descriptor after Tx is over.
*/
static inline void syn_dp_reset_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
uint32_t txover = dev_info->tx_busy;
struct dma_desc *txdesc = dev_info->tx_busy_desc;
BUG_ON(txdesc != (dev_info->tx_desc + txover));
dev_info->tx_busy = (txover + 1) & (dev_info->tx_desc_count - 1);
dev_info->tx_busy_desc = dev_info->tx_desc + dev_info->tx_busy;
dev_info->tx_skb_list[txover] = NULL;
txdesc->status &= desc_tx_desc_end_of_ring;
txdesc->length = 0;
txdesc->buffer1 = 0;
txdesc->data1 = 0;
txdesc->reserved1 = 0;
/*
* Busy tx descriptor is reduced by one as
* it will be handed over to Processor now.
*/
dev_info->busy_tx_desc--;
}
/*
* syn_dp_set_tx_qptr
* Populate the tx desc structure with the buffer address.
*/
static inline struct dma_desc *syn_dp_set_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb, uint32_t offload_needed,
uint32_t tx_cntl, uint32_t set_dma)
{
uint32_t txnext = dev_info->tx_next;
struct dma_desc *txdesc = dev_info->tx_next_desc;
uint32_t tx_skb_index = txnext;
BUG_ON(dev_info->busy_tx_desc > dev_info->tx_desc_count);
BUG_ON(txdesc != (dev_info->tx_desc + txnext));
BUG_ON(!syn_dp_gmac_is_desc_empty(txdesc));
BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(txdesc));
if (Length1 > SYN_DP_MAX_DESC_BUFF) {
txdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
txdesc->length |=
((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
} else {
txdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
}
txdesc->status |= tx_cntl;
txdesc->buffer1 = Buffer1;
dev_info->tx_skb_list[tx_skb_index] = skb;
/* Program second buffer address if using two buffers. */
if (Length1 > SYN_DP_MAX_DESC_BUFF)
txdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
else
txdesc->data1 = 0;
if (likely(offload_needed)) {
syn_dp_gmac_tx_checksum_offload_tcp_pseudo(txdesc);
}
/*
* Ensure all write completed before setting own by dma bit so when gmac
* HW takeover this descriptor, all the fields are filled correctly
*/
wmb();
txdesc->status |= set_dma;
dev_info->tx_next = (txnext + 1) & (dev_info->tx_desc_count - 1);
dev_info->tx_next_desc = dev_info->tx_desc + dev_info->tx_next;
return txdesc;
}
/*
* syn_dp_tx_queue_desc
* Queue TX descriptor to the TX ring
*/
static void syn_dp_tx_desc_queue(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb, dma_addr_t dma_addr)
{
unsigned int len = skb->len;
spin_lock_bh(&dev_info->data_lock);
syn_dp_set_tx_qptr(gmac_dev, dev_info, dma_addr, len, skb, (skb->ip_summed == CHECKSUM_PARTIAL),
(desc_tx_last | desc_tx_first | desc_tx_int_enable), desc_own_by_dma);
dev_info->busy_tx_desc++;
spin_unlock_bh(&dev_info->data_lock);
}
/*
* syn_dp_process_tx_complete
* Xmit complete, clear descriptor and free the skb
*/
void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
int busy, len;
uint32_t status;
struct dma_desc *desc = NULL;
struct sk_buff *skb;
uint32_t tx_skb_index;
spin_lock_bh(&dev_info->data_lock);
busy = dev_info->busy_tx_desc;
if (!busy) {
/* No desc are hold by gmac dma, we are done */
spin_unlock_bh(&dev_info->data_lock);
return;
}
do {
desc = dev_info->tx_busy_desc;
if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
/* desc still hold by gmac dma, so we are done */
break;
}
len = (desc->length & desc_size1_mask) >> desc_size1_shift;
dma_unmap_single(&(gmac_dev->pdev->dev), desc->buffer1, len, DMA_TO_DEVICE);
status = desc->status;
if (status & desc_tx_last) {
/* TX is done for this whole skb, we can free it */
/* Get the skb from the tx skb pool */
tx_skb_index = dev_info->tx_busy;
skb = dev_info->tx_skb_list[tx_skb_index];
BUG_ON(!skb);
dev_kfree_skb(skb);
spin_lock_bh(&dev_info->stats_lock);
if (unlikely(status & desc_tx_error)) {
/* Some error happen, collect statistics */
dev_info->stats.stats.tx_errors++;
dev_info->stats.stats.tx_jabber_timeout_errors += (status & desc_tx_timeout) ? 1 : 0;
dev_info->stats.stats.tx_frame_flushed_errors += (status & desc_tx_frame_flushed) ? 1 : 0;
dev_info->stats.stats.tx_loss_of_carrier_errors += (status & desc_tx_lost_carrier) ? 1 : 0;
dev_info->stats.stats.tx_no_carrier_errors += (status & desc_tx_no_carrier) ? 1 : 0;
dev_info->stats.stats.tx_late_collision_errors += (status & desc_tx_late_collision) ? 1 : 0;
dev_info->stats.stats.tx_excessive_collision_errors += (status & desc_tx_exc_collisions) ? 1 : 0;
dev_info->stats.stats.tx_excessive_deferral_errors += (status & desc_tx_exc_deferral) ? 1 : 0;
dev_info->stats.stats.tx_underflow_errors += (status & desc_tx_underflow) ? 1 : 0;
dev_info->stats.stats.tx_ip_header_errors += (status & desc_tx_ipv4_chk_error) ? 1 : 0;
dev_info->stats.stats.tx_ip_payload_errors += (status & desc_tx_pay_chk_error) ? 1 : 0;
} else {
/* No error, recored tx pkts/bytes and
* collision
*/
dev_info->stats.stats.tx_packets++;
dev_info->stats.stats.tx_collisions += syn_dp_gmac_get_tx_collision_count(status);
dev_info->stats.stats.tx_bytes += len;
}
spin_unlock_bh(&dev_info->stats_lock);
}
syn_dp_reset_tx_qptr(gmac_dev, dev_info);
busy--;
} while (busy > 0);
spin_unlock_bh(&dev_info->data_lock);
}
/*
* syn_dp_tx
* TX routine for Synopsys GMAC
*/
int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb)
{
struct net_device *netdev = gmac_dev->netdev;
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
unsigned len = skb->len;
dma_addr_t dma_addr;
/*
* If we don't have enough tx descriptor for this pkt, return busy.
*/
if ((SYN_DP_TX_DESC_SIZE - dev_info->busy_tx_desc) < 1) {
netdev_dbg(netdev, "Not enough descriptors available");
return -1;
}
dma_addr = dma_map_single(&gmac_dev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&gmac_dev->pdev->dev, dma_addr))) {
netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
return -1;
}
/*
* Queue packet to the GMAC rings
*/
syn_dp_tx_desc_queue(gmac_dev, dev_info, skb, dma_addr);
syn_resume_dma_tx(nghd);
return 0;
}

View File

@@ -0,0 +1,132 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __NSS_DP_DEV_H__
#define __NSS_DP_DEV_H__
#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/switch.h>
#include "nss_dp_api_if.h"
#include "nss_dp_hal_if.h"
#define NSS_DP_ACL_DEV_ID 0
struct nss_dp_global_ctx;
/*
* nss data plane device structure
*/
struct nss_dp_dev {
uint32_t macid; /* Sequence# of Mac on the platform */
uint32_t vsi; /* vsi number */
unsigned long flags; /* Status flags */
unsigned long drv_flags; /* Driver specific feature flags */
/* Phy related stuff */
struct phy_device *phydev; /* Phy device */
struct mii_bus *miibus; /* MII bus */
uint32_t phy_mii_type; /* RGMII/SGMII/QSGMII */
uint32_t phy_mdio_addr; /* Mdio address */
bool link_poll; /* Link polling enable? */
uint32_t forced_speed; /* Forced speed? */
uint32_t forced_duplex; /* Forced duplex? */
uint32_t link_state; /* Current link state */
uint32_t pause; /* Current flow control settings */
struct net_device *netdev;
struct platform_device *pdev;
struct napi_struct napi;
struct nss_dp_data_plane_ctx *dpc;
/* context when NSS owns GMACs */
struct nss_dp_data_plane_ops *data_plane_ops;
/* ops for each data plane */
struct nss_dp_global_ctx *ctx; /* Global NSS DP context */
struct nss_gmac_hal_dev *gmac_hal_ctx; /* context of gmac hal */
struct nss_gmac_hal_ops *gmac_hal_ops; /* GMAC HAL OPS */
/* switchdev related attributes */
#ifdef CONFIG_NET_SWITCHDEV
u8 stp_state; /* STP state of this physical port */
unsigned long brport_flags; /* bridge port flags */
#endif
};
/*
* nss data plane global context
*/
struct nss_dp_global_ctx {
struct nss_dp_dev *nss_dp[NSS_DP_HAL_MAX_PORTS];
struct nss_gmac_hal_ops *gmac_hal_ops[GMAC_HAL_TYPE_MAX];
/* GMAC HAL OPS */
bool common_init_done; /* Flag to hold common init state */
uint8_t slowproto_acl_bm; /* Port bitmap to allow slow protocol packets */
};
/* Global data */
extern struct nss_dp_global_ctx dp_global_ctx;
extern struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss data plane link state
*/
enum nss_dp_link_state {
__NSS_DP_LINK_UP, /* Indicate link is UP */
__NSS_DP_LINK_DOWN /* Indicate link is down */
};
/*
* nss data plane status
*/
enum nss_dp_state {
__NSS_DP_UP, /* set to indicate the interface is UP */
__NSS_DP_RXCSUM, /* Rx checksum enabled */
__NSS_DP_AUTONEG, /* Autonegotiation Enabled */
__NSS_DP_LINKPOLL, /* Poll link status */
};
/*
* nss data plane private flags
*/
enum nss_dp_priv_flags {
__NSS_DP_PRIV_FLAG_INIT_DONE,
__NSS_DP_PRIV_FLAG_IRQ_REQUESTED,
__NSS_DP_PRIV_FLAG_MAX,
};
#define NSS_DP_PRIV_FLAG(x) (1 << __NSS_DP_PRIV_FLAG_ ## x)
/*
* nss_dp_set_ethtool_ops()
*/
void nss_dp_set_ethtool_ops(struct net_device *netdev);
/*
* nss data plane switchdev helpers
*/
#ifdef CONFIG_NET_SWITCHDEV
void nss_dp_switchdev_setup(struct net_device *dev);
bool nss_dp_is_phy_dev(struct net_device *dev);
#endif
#endif /* __NSS_DP_DEV_H__ */

View File

@@ -0,0 +1,192 @@
/*
**************************************************************************
* Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include "nss_dp_hal.h"
/*
* nss_dp_reset_netdev_features()
* Resets the netdev features
*/
static inline void nss_dp_reset_netdev_features(struct net_device *netdev)
{
netdev->features = 0;
netdev->hw_features = 0;
netdev->vlan_features = 0;
netdev->wanted_features = 0;
}
/*
* nss_dp_receive()
* Called by overlay drivers to deliver packets to nss-dp
*/
void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb,
struct napi_struct *napi)
{
struct nss_dp_dev *dp_dev = netdev_priv(netdev);
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netdev_dbg(netdev, "Rx on port%d, packet len %d, CSUM %d\n",
dp_dev->macid, skb->len, skb->ip_summed);
#ifdef CONFIG_NET_SWITCHDEV
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = netdev->offload_fwd_mark;
#else
/*
* TODO: Implement ndo_get_devlink_port()
*/
skb->offload_fwd_mark = 0;
#endif
#endif
napi_gro_receive(napi, skb);
}
EXPORT_SYMBOL(nss_dp_receive);
/*
* nss_dp_is_in_open_state()
* Return if a data plane is opened or not
*/
bool nss_dp_is_in_open_state(struct net_device *netdev)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (test_bit(__NSS_DP_UP, &dp_dev->flags))
return true;
return false;
}
EXPORT_SYMBOL(nss_dp_is_in_open_state);
/*
* nss_dp_override_data_plane()
* API to allow overlay drivers to override the data plane
*/
int nss_dp_override_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ops *dp_ops,
struct nss_dp_data_plane_ctx *dpc)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_ops->open || !dp_ops->close || !dp_ops->link_state
|| !dp_ops->mac_addr || !dp_ops->change_mtu || !dp_ops->xmit
|| !dp_ops->set_features || !dp_ops->pause_on_off || !dp_ops->deinit) {
netdev_dbg(netdev, "All the op functions must be present, reject this registeration\n");
return NSS_DP_FAILURE;
}
/*
* If this data plane is up, close the netdev to force TX/RX stop, and
* also reset the features
*/
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev->netdev_ops->ndo_stop(netdev);
nss_dp_reset_netdev_features(netdev);
}
/*
* Free up the resources used by the data plane
*/
if (dp_dev->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE)) {
if (dp_dev->data_plane_ops->deinit(dpc)) {
netdev_dbg(netdev, "Data plane init failed\n");
return -ENOMEM;
}
dp_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(INIT_DONE);
}
/*
* Override the data_plane_ctx, data_plane_ops
*/
dp_dev->dpc = dpc;
dp_dev->data_plane_ops = dp_ops;
return NSS_DP_SUCCESS;
}
EXPORT_SYMBOL(nss_dp_override_data_plane);
/*
* nss_dp_start_data_plane()
* Data plane to inform netdev it is ready to start
*/
void nss_dp_start_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ctx *dpc)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev_dbg(netdev, "This netdev already up, something is wrong\n");
return;
}
if (dp_dev->dpc != dpc) {
netdev_dbg(netdev, "Cookie %px does not match, reject\n", dpc);
return;
}
netdev->netdev_ops->ndo_open(dp_dev->netdev);
}
EXPORT_SYMBOL(nss_dp_start_data_plane);
/*
* nss_dp_restore_data_plane()
* Called by overlay drivers to detach itself from nss-dp
*/
void nss_dp_restore_data_plane(struct net_device *netdev)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
/*
* If this data plane is up, close the netdev to force TX/RX stop, and
* also reset the features
*/
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev->netdev_ops->ndo_stop(netdev);
nss_dp_reset_netdev_features(netdev);
}
dp_dev->data_plane_ops = nss_dp_hal_get_data_plane_ops();
dp_dev->dpc = &dp_global_data_plane_ctx[dp_dev->macid - NSS_DP_START_IFNUM];
/*
* TODO: Re-initialize EDMA dataplane
*/
}
EXPORT_SYMBOL(nss_dp_restore_data_plane);
/*
* nss_dp_get_netdev_by_nss_if_num()
* return the net device of the corrsponding id if exist
*/
struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num)
{
struct nss_dp_dev *dp_dev;
if ((if_num > NSS_DP_HAL_MAX_PORTS) || (if_num < NSS_DP_START_IFNUM)) {
pr_err("Invalid if_num %d\n", if_num);
return NULL;
}
dp_dev = dp_global_ctx.nss_dp[if_num - NSS_DP_START_IFNUM];
if (!dp_dev)
return NULL;
return dp_dev->netdev;
}
EXPORT_SYMBOL(nss_dp_get_netdev_by_nss_if_num);

View File

@@ -0,0 +1,378 @@
/*
**************************************************************************
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include "nss_dp_dev.h"
#include "fal/fal_port_ctrl.h"
/*
* nss_dp_get_ethtool_stats()
*/
static void nss_dp_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getethtoolstats(dp_priv->gmac_hal_ctx, data);
}
/*
* nss_dp_get_strset_count()
*/
static int32_t nss_dp_get_strset_count(struct net_device *netdev, int32_t sset)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
return dp_priv->gmac_hal_ops->getssetcount(dp_priv->gmac_hal_ctx, sset);
}
/*
* nss_dp_get_strings()
*/
static void nss_dp_get_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getstrings(dp_priv->gmac_hal_ctx, stringset,
data);
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/*
* nss_dp_get_settings()
*/
static int32_t nss_dp_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/*
* If there is a PHY attached, get the status from Kernel helper
*/
if (dp_priv->phydev)
return phy_ethtool_gset(dp_priv->phydev, cmd);
return -EIO;
}
/*
* nss_dp_set_settings()
*/
static int32_t nss_dp_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv->phydev)
return -EIO;
return phy_ethtool_sset(dp_priv->phydev, cmd);
}
#endif
/*
* nss_dp_get_pauseparam()
*/
static void nss_dp_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
pause->rx_pause = dp_priv->pause & FLOW_CTRL_RX ? 1 : 0;
pause->tx_pause = dp_priv->pause & FLOW_CTRL_TX ? 1 : 0;
pause->autoneg = AUTONEG_ENABLE;
}
/*
* nss_dp_set_pauseparam()
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/* set flow control settings */
dp_priv->pause = 0;
if (pause->rx_pause)
dp_priv->pause |= FLOW_CTRL_RX;
if (pause->tx_pause)
dp_priv->pause |= FLOW_CTRL_TX;
if (!dp_priv->phydev)
return 0;
/* Update flow control advertisment */
dp_priv->phydev->advertising &=
~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
if (pause->rx_pause)
dp_priv->phydev->advertising |=
(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
if (pause->tx_pause)
dp_priv->phydev->advertising |= ADVERTISED_Asym_Pause;
genphy_config_aneg(dp_priv->phydev);
return 0;
}
#else
static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
/* set flow control settings */
dp_priv->pause = 0;
if (pause->rx_pause)
dp_priv->pause |= FLOW_CTRL_RX;
if (pause->tx_pause)
dp_priv->pause |= FLOW_CTRL_TX;
if (!dp_priv->phydev)
return 0;
/* Update flow control advertisment */
linkmode_copy(advertising, dp_priv->phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
if (pause->rx_pause) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
}
if (pause->tx_pause)
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
linkmode_copy(dp_priv->phydev->advertising, advertising);
genphy_config_aneg(dp_priv->phydev);
return 0;
}
#endif
/*
* nss_dp_fal_to_ethtool_linkmode_xlate()
* Translate linkmode from FAL type to ethtool type.
*/
static inline void nss_dp_fal_to_ethtool_linkmode_xlate(uint32_t *xlate_to, uint32_t *xlate_from)
{
uint32_t pos;
while (*xlate_from) {
pos = ffs(*xlate_from);
switch (1 << (pos - 1)) {
case FAL_PHY_EEE_10BASE_T:
*xlate_to |= SUPPORTED_10baseT_Full;
break;
case FAL_PHY_EEE_100BASE_T:
*xlate_to |= SUPPORTED_100baseT_Full;
break;
case FAL_PHY_EEE_1000BASE_T:
*xlate_to |= SUPPORTED_1000baseT_Full;
break;
case FAL_PHY_EEE_2500BASE_T:
*xlate_to |= SUPPORTED_2500baseX_Full;
break;
case FAL_PHY_EEE_5000BASE_T:
/*
* Ethtool does not support enumeration for 5G.
*/
break;
case FAL_PHY_EEE_10000BASE_T:
*xlate_to |= SUPPORTED_10000baseT_Full;
break;
}
*xlate_from &= (~(1 << (pos - 1)));
}
}
/*
* nss_dp_get_eee()
* Get EEE settings.
*/
static int32_t nss_dp_get_eee(struct net_device *netdev, struct ethtool_eee *eee)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
fal_port_eee_cfg_t port_eee_cfg;
uint32_t port_id;
sw_error_t ret;
memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t));
port_id = dp_priv->macid;
ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret);
return -EIO;
}
/*
* Translate the FAL linkmode types to ethtool linkmode types.
*/
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->supported, &port_eee_cfg.capability);
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->advertised, &port_eee_cfg.advertisement);
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->lp_advertised, &port_eee_cfg.link_partner_advertisement);
eee->eee_enabled = port_eee_cfg.enable;
eee->eee_active = port_eee_cfg.eee_status;
eee->tx_lpi_enabled = port_eee_cfg.lpi_tx_enable;
eee->tx_lpi_timer = port_eee_cfg.lpi_sleep_timer;
return 0;
}
/*
* nss_dp_set_eee()
* Set EEE settings.
*/
static int32_t nss_dp_set_eee(struct net_device *netdev, struct ethtool_eee *eee)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
fal_port_eee_cfg_t port_eee_cfg, port_eee_cur_cfg;
uint32_t port_id, pos;
sw_error_t ret;
memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t));
memset(&port_eee_cur_cfg, 0, sizeof(fal_port_eee_cfg_t));
port_id = dp_priv->macid;
/*
* Get current EEE configuration.
*/
ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cur_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret);
return -EIO;
}
port_eee_cfg.enable = eee->eee_enabled;
/*
* Translate the ethtool speed types to FAL speed types.
*/
while (eee->advertised) {
pos = ffs(eee->advertised);
switch (1 << (pos - 1)) {
case ADVERTISED_10baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_10BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 10baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_100baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_100BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_100BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 100baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_1000baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_1000BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_1000BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 1000baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_2500baseX_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_2500BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_2500BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 2500baseX_Full is not supported\n");
return -EIO;
case ADVERTISED_10000baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10000BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_10000BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 10000baseT_Full is not supported\n");
return -EIO;
default:
netdev_dbg(netdev, "Advertised value is not supported\n");
return -EIO;
}
eee->advertised &= (~(1 << (pos - 1)));
}
port_eee_cfg.lpi_tx_enable = eee->tx_lpi_enabled;
port_eee_cfg.lpi_sleep_timer = eee->tx_lpi_timer;
ret = fal_port_interface_eee_cfg_set(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not configure EEE err = %d\n", ret);
return -EIO;
}
return 0;
}
/*
* Ethtool operations
*/
struct ethtool_ops nss_dp_ethtool_ops = {
.get_strings = &nss_dp_get_strings,
.get_sset_count = &nss_dp_get_strset_count,
.get_ethtool_stats = &nss_dp_get_ethtool_stats,
.get_link = &ethtool_op_get_link,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.get_settings = &nss_dp_get_settings,
.set_settings = &nss_dp_set_settings,
#else
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
#endif
.get_pauseparam = &nss_dp_get_pauseparam,
.set_pauseparam = &nss_dp_set_pauseparam,
.get_eee = &nss_dp_get_eee,
.set_eee = &nss_dp_set_eee,
};
/*
* nss_dp_set_ethtool_ops()
* Set ethtool operations
*/
void nss_dp_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &nss_dp_ethtool_ops;
}

View File

@@ -0,0 +1,830 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#if defined(NSS_DP_PPE_SUPPORT)
#include <ref/ref_vsi.h>
#endif
#include <net/switchdev.h>
#include "nss_dp_hal.h"
/*
* Number of TX/RX queue supported is based on the number of host CPU
*/
#define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
#define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
/* ipq40xx_mdio_data */
struct ipq40xx_mdio_data {
struct mii_bus *mii_bus;
void __iomem *membase;
int phy_irq[PHY_MAX_ADDR];
};
/* Global data */
struct nss_dp_global_ctx dp_global_ctx;
struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss_dp_do_ioctl()
*/
static int32_t nss_dp_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int32_t cmd)
{
int ret = -EINVAL;
struct nss_dp_dev *dp_priv;
if (!netdev || !ifr)
return ret;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (dp_priv->phydev)
return phy_mii_ioctl(dp_priv->phydev, ifr, cmd);
return ret;
}
/*
* nss_dp_change_mtu()
*/
static int32_t nss_dp_change_mtu(struct net_device *netdev, int32_t newmtu)
{
int ret = -EINVAL;
struct nss_dp_dev *dp_priv;
if (!netdev)
return ret;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/* Let the underlying data plane decide if the newmtu is applicable */
if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, newmtu)) {
netdev_dbg(netdev, "Data plane change mtu failed\n");
return ret;
}
netdev->mtu = newmtu;
return 0;
}
/*
* nss_dp_set_mac_address()
*/
static int32_t nss_dp_set_mac_address(struct net_device *netdev, void *macaddr)
{
struct nss_dp_dev *dp_priv;
struct sockaddr *addr = (struct sockaddr *)macaddr;
int ret = 0;
if (!netdev)
return -EINVAL;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n",
addr->sa_family, addr->sa_data[0], addr->sa_data[1],
addr->sa_data[2], addr->sa_data[3], addr->sa_data[4],
addr->sa_data[5]);
ret = eth_prepare_mac_addr_change(netdev, macaddr);
if (ret)
return ret;
if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, macaddr)) {
netdev_dbg(netdev, "Data plane set MAC address failed\n");
return -EAGAIN;
}
eth_commit_mac_addr_change(netdev, macaddr);
dp_priv->gmac_hal_ops->setmacaddr(dp_priv->gmac_hal_ctx,
(uint8_t *)addr->sa_data);
return 0;
}
/*
* nss_dp_get_stats64()
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static struct rtnl_link_stats64 *nss_dp_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nss_dp_dev *dp_priv;
if (!netdev)
return stats;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats);
return stats;
}
#else
static void nss_dp_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nss_dp_dev *dp_priv;
if (!netdev)
return;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats);
}
#endif
/*
* nss_dp_xmit()
*/
static netdev_tx_t nss_dp_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nss_dp_dev *dp_priv;
if (!skb || !netdev)
return NETDEV_TX_OK;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
netdev_dbg(netdev, "Tx packet, len %d\n", skb->len);
return dp_priv->data_plane_ops->xmit(dp_priv->dpc, skb);
}
/*
* nss_dp_close()
*/
static int nss_dp_close(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
netif_stop_queue(netdev);
netif_carrier_off(netdev);
/* Notify data plane link is going down */
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) {
netdev_dbg(netdev, "Data plane set link failed\n");
return -EAGAIN;
}
if (dp_priv->phydev)
phy_stop(dp_priv->phydev);
dp_priv->link_state = __NSS_DP_LINK_DOWN;
#if defined(NSS_DP_PPE_SUPPORT)
/* Notify data plane to unassign VSI */
if (dp_priv->data_plane_ops->vsi_unassign(dp_priv->dpc, dp_priv->vsi)) {
netdev_dbg(netdev, "Data plane vsi unassign failed\n");
return -EAGAIN;
}
#endif
/*
* Notify data plane to close
*/
if (dp_priv->data_plane_ops->close(dp_priv->dpc)) {
netdev_dbg(netdev, "Data plane close failed\n");
return -EAGAIN;
}
clear_bit(__NSS_DP_UP, &dp_priv->flags);
return 0;
}
/*
* nss_dp_open()
*/
static int nss_dp_open(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
netif_carrier_off(netdev);
/*
* Call data plane init if it has not been done yet
*/
if (!(dp_priv->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE))) {
if (dp_priv->data_plane_ops->init(dp_priv->dpc)) {
netdev_dbg(netdev, "Data plane init failed\n");
return -ENOMEM;
}
dp_priv->drv_flags |= NSS_DP_PRIV_FLAG(INIT_DONE);
}
/*
* Inform the Linux Networking stack about the hardwar capability of
* checksum offloading and other features. Each data_plane is
* responsible to maintain the feature set it supports
*/
dp_priv->data_plane_ops->set_features(dp_priv->dpc);
set_bit(__NSS_DP_UP, &dp_priv->flags);
#if defined(NSS_DP_PPE_SUPPORT)
if (dp_priv->data_plane_ops->vsi_assign(dp_priv->dpc, dp_priv->vsi)) {
netdev_dbg(netdev, "Data plane vsi assign failed\n");
return -EAGAIN;
}
#endif
if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, netdev->dev_addr)) {
netdev_dbg(netdev, "Data plane set MAC address failed\n");
return -EAGAIN;
}
if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, netdev->mtu)) {
netdev_dbg(netdev, "Data plane change mtu failed\n");
return -EAGAIN;
}
if (dp_priv->data_plane_ops->open(dp_priv->dpc, 0, 0, 0)) {
netdev_dbg(netdev, "Data plane open failed\n");
return -EAGAIN;
}
netif_start_queue(netdev);
if (!dp_priv->link_poll) {
/* Notify data plane link is up */
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) {
netdev_dbg(netdev, "Data plane set link failed\n");
return -EAGAIN;
}
dp_priv->link_state = __NSS_DP_LINK_UP;
netif_carrier_on(netdev);
} else {
dp_priv->link_state = __NSS_DP_LINK_DOWN;
phy_start(dp_priv->phydev);
phy_start_aneg(dp_priv->phydev);
}
return 0;
}
#ifdef CONFIG_RFS_ACCEL
/*
* nss_dp_rx_flow_steer()
* Steer the flow rule to NSS
*/
static int nss_dp_rx_flow_steer(struct net_device *netdev, const struct sk_buff *_skb,
uint16_t rxq, uint32_t flow)
{
struct nss_dp_dev *dp_priv;
struct netdev_rx_queue *rxqueue;
struct rps_sock_flow_table *sock_flow_table;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rxflow;
struct sk_buff *skb = (struct sk_buff *)_skb;
uint16_t index;
uint32_t hash;
uint32_t rfscpu;
uint32_t rxcpu;
if (!netdev)
return -EINVAL;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
rxqueue = netdev->_rx;
if (skb_rx_queue_recorded(skb)) {
index = skb_get_rx_queue(skb);
rxqueue += index;
}
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table) {
netdev_dbg(netdev, "RX queue RPS flow table not found\n");
return -EINVAL;
}
hash = skb_get_hash(skb);
rxflow = &flow_table->flows[hash & flow_table->mask];
rxcpu = (uint32_t)rxflow->cpu;
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (!sock_flow_table) {
netdev_dbg(netdev, "Global RPS flow table not found\n");
return -EINVAL;
}
rfscpu = sock_flow_table->ents[hash & sock_flow_table->mask];
rfscpu &= rps_cpu_mask;
if (rxcpu == rfscpu)
return 0;
/*
* check rx_flow_steer is defined in data plane ops
*/
if (!dp_priv->data_plane_ops->rx_flow_steer) {
netdev_dbg(netdev, "Data plane ops not defined for flow steer\n");
return -EINVAL;
}
/*
* Delete the old flow rule
*/
if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rxcpu, false)) {
netdev_dbg(netdev, "Data plane delete flow rule failed\n");
return -EAGAIN;
}
/*
* Add the new flow rule
*/
if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rfscpu, true)) {
netdev_dbg(netdev, "Data plane add flow rule failed\n");
return -EAGAIN;
}
return 0;
}
#endif
/*
* nss_dp_select_queue()
* Select tx queue
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
#else
static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev)
#endif
{
int cpu = get_cpu();
put_cpu();
/*
* The number of queue is matching the number of CPUs so get_cpu will
* always match a valid queue
*/
return cpu;
}
/*
* Netdevice operations
*/
static const struct net_device_ops nss_dp_netdev_ops = {
.ndo_open = nss_dp_open,
.ndo_stop = nss_dp_close,
.ndo_start_xmit = nss_dp_xmit,
.ndo_get_stats64 = nss_dp_get_stats64,
.ndo_set_mac_address = nss_dp_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nss_dp_change_mtu,
.ndo_do_ioctl = nss_dp_do_ioctl,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
#endif
.ndo_select_queue = nss_dp_select_queue,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = nss_dp_rx_flow_steer,
#endif
};
/*
* nss_dp_of_get_pdata()
*/
static int32_t nss_dp_of_get_pdata(struct device_node *np,
struct net_device *netdev,
struct gmac_hal_platform_data *hal_pdata)
{
uint8_t *maddr;
struct nss_dp_dev *dp_priv;
struct resource memres_devtree = {0};
dp_priv = netdev_priv(netdev);
if (of_property_read_u32(np, "qcom,id", &dp_priv->macid)) {
pr_err("%s: error reading id\n", np->name);
return -EFAULT;
}
if (dp_priv->macid > NSS_DP_HAL_MAX_PORTS || !dp_priv->macid) {
pr_err("%s: invalid macid %d\n", np->name, dp_priv->macid);
return -EFAULT;
}
if (of_property_read_u32(np, "qcom,mactype", &hal_pdata->mactype)) {
pr_err("%s: error reading mactype\n", np->name);
return -EFAULT;
}
if (of_address_to_resource(np, 0, &memres_devtree) != 0)
return -EFAULT;
netdev->base_addr = memres_devtree.start;
hal_pdata->reg_len = resource_size(&memres_devtree);
hal_pdata->netdev = netdev;
hal_pdata->macid = dp_priv->macid;
dp_priv->phy_mii_type = of_get_phy_mode(np);
dp_priv->link_poll = of_property_read_bool(np, "qcom,link-poll");
if (of_property_read_u32(np, "qcom,phy-mdio-addr",
&dp_priv->phy_mdio_addr) && dp_priv->link_poll) {
pr_err("%s: mdio addr required if link polling is enabled\n",
np->name);
return -EFAULT;
}
of_property_read_u32(np, "qcom,forced-speed", &dp_priv->forced_speed);
of_property_read_u32(np, "qcom,forced-duplex", &dp_priv->forced_duplex);
maddr = (uint8_t *)of_get_mac_address(np);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0))
if (IS_ERR((void *)maddr)) {
maddr = NULL;
}
#endif
if (maddr && is_valid_ether_addr(maddr)) {
ether_addr_copy(netdev->dev_addr, maddr);
} else {
random_ether_addr(netdev->dev_addr);
pr_info("GMAC%d(%px) Invalid MAC@ - using %pM\n", dp_priv->macid,
dp_priv, netdev->dev_addr);
}
return 0;
}
/*
* nss_dp_mdio_attach()
*/
static struct mii_bus *nss_dp_mdio_attach(struct platform_device *pdev)
{
struct device_node *mdio_node;
struct platform_device *mdio_plat;
struct ipq40xx_mdio_data *mdio_data;
/*
* Find mii_bus using "mdio-bus" handle.
*/
mdio_node = of_parse_phandle(pdev->dev.of_node, "mdio-bus", 0);
if (mdio_node) {
return of_mdio_find_bus(mdio_node);
}
mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq40xx-mdio");
if (!mdio_node) {
dev_err(&pdev->dev, "cannot find mdio node by phandle\n");
return NULL;
}
mdio_plat = of_find_device_by_node(mdio_node);
if (!mdio_plat) {
dev_err(&pdev->dev, "cannot find platform device from mdio node\n");
of_node_put(mdio_node);
return NULL;
}
mdio_data = dev_get_drvdata(&mdio_plat->dev);
if (!mdio_data) {
dev_err(&pdev->dev, "cannot get mii bus reference from device data\n");
of_node_put(mdio_node);
return NULL;
}
return mdio_data->mii_bus;
}
#ifdef CONFIG_NET_SWITCHDEV
/*
* nss_dp_is_phy_dev()
* Check if it is dp device
*/
bool nss_dp_is_phy_dev(struct net_device *dev)
{
return (dev->netdev_ops == &nss_dp_netdev_ops);
}
#endif
/*
* nss_dp_adjust_link()
*/
void nss_dp_adjust_link(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = netdev_priv(netdev);
int current_state = dp_priv->link_state;
if (!test_bit(__NSS_DP_UP, &dp_priv->flags))
return;
if (dp_priv->phydev->link && (current_state == __NSS_DP_LINK_UP))
return;
if (!dp_priv->phydev->link && (current_state == __NSS_DP_LINK_DOWN))
return;
if (current_state == __NSS_DP_LINK_DOWN) {
netdev_info(netdev, "PHY Link up speed: %d\n",
dp_priv->phydev->speed);
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) {
netdev_dbg(netdev, "Data plane set link up failed\n");
return;
}
dp_priv->link_state = __NSS_DP_LINK_UP;
netif_carrier_on(netdev);
} else {
netdev_info(netdev, "PHY Link is down\n");
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) {
netdev_dbg(netdev, "Data plane set link down failed\n");
return;
}
dp_priv->link_state = __NSS_DP_LINK_DOWN;
netif_carrier_off(netdev);
}
}
/*
* nss_dp_probe()
*/
static int32_t nss_dp_probe(struct platform_device *pdev)
{
struct net_device *netdev;
struct nss_dp_dev *dp_priv;
struct device_node *np = pdev->dev.of_node;
struct gmac_hal_platform_data gmac_hal_pdata;
int32_t ret = 0;
uint8_t phy_id[MII_BUS_ID_SIZE + 3];
#if defined(NSS_DP_PPE_SUPPORT)
uint32_t vsi_id;
fal_port_t port_id;
#endif
/* TODO: See if we need to do some SoC level common init */
netdev = alloc_etherdev_mqs(sizeof(struct nss_dp_dev),
NSS_DP_NETDEV_TX_QUEUE_NUM, NSS_DP_NETDEV_RX_QUEUE_NUM);
if (!netdev) {
pr_info("alloc_etherdev() failed\n");
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
/* max_mtu is set to 1500 in ether_setup() */
netdev->max_mtu = ETH_MAX_MTU;
#endif
dp_priv = netdev_priv(netdev);
memset((void *)dp_priv, 0, sizeof(struct nss_dp_dev));
dp_priv->pdev = pdev;
dp_priv->netdev = netdev;
netdev->watchdog_timeo = 5 * HZ;
netdev->netdev_ops = &nss_dp_netdev_ops;
nss_dp_set_ethtool_ops(netdev);
#ifdef CONFIG_NET_SWITCHDEV
nss_dp_switchdev_setup(netdev);
#endif
ret = nss_dp_of_get_pdata(np, netdev, &gmac_hal_pdata);
if (ret != 0) {
goto fail;
}
/* Use data plane ops as per the configured SoC */
dp_priv->data_plane_ops = nss_dp_hal_get_data_plane_ops();
if (!dp_priv->data_plane_ops) {
netdev_dbg(netdev, "Dataplane ops not found.\n");
goto fail;
}
dp_priv->dpc = &dp_global_data_plane_ctx[dp_priv->macid-1];
dp_priv->dpc->dev = netdev;
dp_priv->ctx = &dp_global_ctx;
/* TODO:locks init */
/*
* HAL's init function will return the pointer to the HAL context
* (private to hal), which dp will store in its data structures.
* The subsequent hal_ops calls expect the DP to pass the HAL
* context pointer as an argument
*/
dp_priv->gmac_hal_ops = nss_dp_hal_get_gmac_ops(gmac_hal_pdata.mactype);
if (!dp_priv->gmac_hal_ops) {
netdev_dbg(netdev, "Unsupported Mac type: %d\n", gmac_hal_pdata.mactype);
goto fail;
}
dp_priv->gmac_hal_ctx = dp_priv->gmac_hal_ops->init(&gmac_hal_pdata);
if (!(dp_priv->gmac_hal_ctx)) {
netdev_dbg(netdev, "gmac hal init failed\n");
goto fail;
}
if (dp_priv->link_poll) {
dp_priv->miibus = nss_dp_mdio_attach(pdev);
if (!dp_priv->miibus) {
netdev_dbg(netdev, "failed to find miibus\n");
goto fail;
}
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
dp_priv->miibus->id, dp_priv->phy_mdio_addr);
SET_NETDEV_DEV(netdev, &pdev->dev);
dp_priv->phydev = phy_connect(netdev, phy_id,
&nss_dp_adjust_link,
dp_priv->phy_mii_type);
if (IS_ERR(dp_priv->phydev)) {
netdev_dbg(netdev, "failed to connect to phy device\n");
goto fail;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
dp_priv->phydev->advertising |=
(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
dp_priv->phydev->supported |=
(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
#else
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->supported);
#endif
}
#if defined(NSS_DP_PPE_SUPPORT)
/* Get port's default VSI */
port_id = dp_priv->macid;
if (ppe_port_vsi_get(0, port_id, &vsi_id)) {
netdev_dbg(netdev, "failed to get port's default VSI\n");
goto fail;
}
dp_priv->vsi = vsi_id;
#endif
/* TODO: Features: CSUM, tx/rx offload... configure */
/* Register the network interface */
ret = register_netdev(netdev);
if (ret) {
netdev_dbg(netdev, "Error registering netdevice %s\n",
netdev->name);
dp_priv->gmac_hal_ops->exit(dp_priv->gmac_hal_ctx);
goto fail;
}
dp_global_ctx.nss_dp[dp_priv->macid - 1] = dp_priv;
dp_global_ctx.slowproto_acl_bm = 0;
netdev_dbg(netdev, "Init NSS DP GMAC%d (base = 0x%lx)\n", dp_priv->macid, netdev->base_addr);
return 0;
fail:
free_netdev(netdev);
return -EFAULT;
}
/*
* nss_dp_remove()
*/
static int nss_dp_remove(struct platform_device *pdev)
{
uint32_t i;
struct nss_dp_dev *dp_priv;
struct nss_gmac_hal_ops *hal_ops;
for (i = 0; i < NSS_DP_HAL_MAX_PORTS; i++) {
dp_priv = dp_global_ctx.nss_dp[i];
if (!dp_priv)
continue;
hal_ops = dp_priv->gmac_hal_ops;
if (dp_priv->phydev)
phy_disconnect(dp_priv->phydev);
unregister_netdev(dp_priv->netdev);
hal_ops->exit(dp_priv->gmac_hal_ctx);
free_netdev(dp_priv->netdev);
dp_global_ctx.nss_dp[i] = NULL;
}
return 0;
}
static struct of_device_id nss_dp_dt_ids[] = {
{ .compatible = "qcom,nss-dp" },
{},
};
MODULE_DEVICE_TABLE(of, nss_dp_dt_ids);
static struct platform_driver nss_dp_drv = {
.probe = nss_dp_probe,
.remove = nss_dp_remove,
.driver = {
.name = "nss-dp",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(nss_dp_dt_ids),
},
};
/*
* nss_dp_init()
*/
int __init nss_dp_init(void)
{
int ret;
/*
* Bail out on not supported platform
* TODO: Handle this properly with SoC ops
*/
if (!of_machine_is_compatible("qcom,ipq807x") &&
!of_machine_is_compatible("qcom,ipq8074") &&
!of_machine_is_compatible("qcom,ipq6018") &&
!of_machine_is_compatible("qcom,ipq5018"))
return 0;
/*
* TODO Move this to soc_ops
*/
dp_global_ctx.common_init_done = false;
if (!nss_dp_hal_init()) {
pr_err("DP hal init failed.\n");
return -EFAULT;
}
ret = platform_driver_register(&nss_dp_drv);
if (ret)
pr_info("NSS DP platform drv register failed\n");
dp_global_ctx.common_init_done = true;
pr_info("**********************************************************\n");
pr_info("* NSS Data Plane driver\n");
pr_info("**********************************************************\n");
return ret;
}
/*
* nss_dp_exit()
*/
void __exit nss_dp_exit(void)
{
/*
* TODO Move this to soc_ops
*/
if (dp_global_ctx.common_init_done) {
nss_dp_hal_cleanup();
dp_global_ctx.common_init_done = false;
}
platform_driver_unregister(&nss_dp_drv);
}
module_init(nss_dp_init);
module_exit(nss_dp_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("NSS Data Plane Network Driver");

View File

@@ -0,0 +1,337 @@
/*
**************************************************************************
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include <net/switchdev.h>
#include <linux/if_bridge.h>
#include <net/switchdev.h>
#include "nss_dp_dev.h"
#include "fal/fal_stp.h"
#include "fal/fal_ctrlpkt.h"
#define NSS_DP_SWITCH_ID 0
#define NSS_DP_SW_ETHTYPE_PID 0 /* PPE ethtype profile ID for slow protocols */
#define ETH_P_NONE 0
/*
* nss_dp_set_slow_proto_filter()
* Enable/Disable filter to allow Ethernet slow-protocol
*/
static void nss_dp_set_slow_proto_filter(struct nss_dp_dev *dp_priv, bool filter_enable)
{
sw_error_t ret = 0;
fal_ctrlpkt_profile_t profile;
fal_ctrlpkt_action_t action;
memset(&profile, 0, sizeof(profile));
/*
* Action is redirect cpu
*/
action.action = FAL_MAC_RDT_TO_CPU;
action.sg_bypass = A_FALSE;
/*
* Bypass stp
*/
action.in_stp_bypass = A_TRUE;
action.in_vlan_fltr_bypass = A_FALSE;
action.l2_filter_bypass = A_FALSE;
profile.action = action;
profile.ethtype_profile_bitmap = 0x1;
/*
* Set port map
*/
profile.port_map = (1 << dp_priv->macid);
if (filter_enable) {
ret = fal_mgmtctrl_ctrlpkt_profile_add(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to add profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
return;
}
/*
* Enable filter to allow ethernet slow-protocol,
* if this is the first port being disabled by STP
*/
if (!dp_priv->ctx->slowproto_acl_bm) {
ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_SLOW);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to set ethertype profile: 0x%x, ret: %d\n", ETH_P_SLOW, ret);
ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
}
return;
}
}
/*
* Add port to port bitmap
*/
dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm | (1 << dp_priv->macid);
} else {
ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
return;
}
/*
* Delete port from port bitmap
*/
dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm & (~(1 << dp_priv->macid));
/*
* If all ports are in STP-enabled state, then we do not need
* the filter to allow ethernet slow protocol packets
*/
if (!dp_priv->ctx->slowproto_acl_bm) {
ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_NONE);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to reset ethertype profile: 0x%x ret: %d\n", ETH_P_NONE, ret);
}
}
}
}
/*
* nss_dp_stp_state_set()
* Set bridge port STP state to the port of NSS data plane.
*/
static int nss_dp_stp_state_set(struct nss_dp_dev *dp_priv, u8 state)
{
sw_error_t err;
fal_stp_state_t stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = FAL_STP_DISABLED;
/*
* Dynamic bond interfaces which are bridge slaves need to receive
* ethernet slow protocol packets for LACP protocol even in STP
* disabled state
*/
nss_dp_set_slow_proto_filter(dp_priv, true);
break;
case BR_STATE_LISTENING:
stp_state = FAL_STP_LISTENING;
break;
case BR_STATE_BLOCKING:
stp_state = FAL_STP_BLOCKING;
break;
case BR_STATE_LEARNING:
stp_state = FAL_STP_LEARNING;
break;
case BR_STATE_FORWARDING:
stp_state = FAL_STP_FORWARDING;
/*
* Remove the filter for allowing ethernet slow protocol packets
* for bond interfaces
*/
nss_dp_set_slow_proto_filter(dp_priv, false);
break;
default:
return -EOPNOTSUPP;
}
err = fal_stp_port_state_set(NSS_DP_SWITCH_ID, 0, dp_priv->macid,
stp_state);
if (err) {
netdev_dbg(dp_priv->netdev, "failed to set ftp state\n");
/*
* Restore the slow proto filters
*/
if (state == BR_STATE_DISABLED)
nss_dp_set_slow_proto_filter(dp_priv, false);
else if (state == BR_STATE_FORWARDING)
nss_dp_set_slow_proto_filter(dp_priv, true);
return -EINVAL;
}
return 0;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/*
* nss_dp_attr_get()
* Get port information to update switchdev attribute for NSS data plane.
*/
static int nss_dp_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = 1;
attr->u.ppid.id[0] = NSS_DP_SWITCH_ID;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags = dp_priv->brport_flags;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
/*
* nss_dp_attr_set()
* Get switchdev attribute and set to the device of NSS data plane.
*/
static int nss_dp_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
if (switchdev_trans_ph_prepare(trans))
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
dp_priv->brport_flags = attr->u.brport_flags;
netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags);
return 0;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
return nss_dp_stp_state_set(dp_priv, attr->u.stp_state);
default:
return -EOPNOTSUPP;
}
}
/*
* nss_dp_switchdev_ops
* Switchdev operations of NSS data plane.
*/
static const struct switchdev_ops nss_dp_switchdev_ops = {
.switchdev_port_attr_get = nss_dp_attr_get,
.switchdev_port_attr_set = nss_dp_attr_set,
};
/*
* nss_dp_switchdev_setup()
* Set up NSS data plane switchdev operations.
*/
void nss_dp_switchdev_setup(struct net_device *dev)
{
dev->switchdev_ops = &nss_dp_switchdev_ops;
switchdev_port_fwd_mark_set(dev, NULL, false);
}
#else
/*
* nss_dp_port_attr_set()
* Sets attributes
*/
static int nss_dp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
if (switchdev_trans_ph_prepare(trans))
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
dp_priv->brport_flags = attr->u.brport_flags;
netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags);
return 0;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
return nss_dp_stp_state_set(dp_priv, attr->u.stp_state);
default:
return -EOPNOTSUPP;
}
}
/*
* nss_dp_switchdev_port_attr_set_event()
* Attribute set event
*/
static int nss_dp_switchdev_port_attr_set_event(struct net_device *netdev,
struct switchdev_notifier_port_attr_info *port_attr_info)
{
int err;
err = nss_dp_port_attr_set(netdev, port_attr_info->attr,
port_attr_info->trans);
port_attr_info->handled = true;
return notifier_from_errno(err);
}
/*
* nss_dp_switchdev_event()
* Switch dev event on netdevice
*/
static int nss_dp_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
/*
* Handle switchdev event only for physical devices
*/
if (!nss_dp_is_phy_dev(dev)) {
return NOTIFY_DONE;
}
if (event == SWITCHDEV_PORT_ATTR_SET)
nss_dp_switchdev_port_attr_set_event(dev, ptr);
return NOTIFY_DONE;
}
static struct notifier_block nss_dp_switchdev_notifier = {
.notifier_call = nss_dp_switchdev_event,
};
static bool switch_init_done;
/*
* nss_dp_switchdev_setup()
* Setup switch dev
*/
void nss_dp_switchdev_setup(struct net_device *dev)
{
int err;
if (switch_init_done) {
return;
}
err = register_switchdev_blocking_notifier(&nss_dp_switchdev_notifier);
if (err) {
netdev_dbg(dev, "%px:Failed to register switchdev notifier\n", dev);
}
switch_init_done = true;
}
#endif

View File

@@ -0,0 +1,111 @@
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=qca-nss-drv
PKG_BRANCH:=master
PKG_RELEASE:=2
NSS_CLIENTS_DIR:=$(TOPDIR)/qca/src/qca-nss-clients
include $(INCLUDE_DIR)/package.mk
define KernelPackage/qca-nss-drv
SECTION:=kernel
CATEGORY:=Kernel modules
SUBMENU:=Network Devices
DEPENDS:=@TARGET_ipq807x +kmod-qca-nss-dp
TITLE:=Kernel driver for NSS (core driver)
FILES:=$(PKG_BUILD_DIR)/qca-nss-drv.ko
AUTOLOAD:=$(call AutoLoad,32,qca-nss-drv)
endef
define KernelPackage/qca-nss-drv/install
$(INSTALL_DIR) $(1)/lib/debug
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_DIR) $(1)/etc/sysctl.d
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_BIN) ./files/qca-nss-drv.debug $(1)/lib/debug/qca-nss-drv
$(INSTALL_BIN) ./files/qca-nss-drv.init $(1)/etc/init.d/qca-nss-drv
$(INSTALL_BIN) ./files/qca-nss-drv.sysctl $(1)/etc/sysctl.d/qca-nss-drv.conf
$(INSTALL_BIN) ./files/qca-nss-drv.conf $(1)/etc/config/nss
endef
define KernelPackage/qca-nss-drv/Description
This package contains a NSS driver for QCA chipset
endef
define Build/InstallDev
mkdir -p $(1)/usr/include/qca-nss-drv
$(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-drv/
ifneq (, $(findstring $(subtarget), "ipq807x" "ipq807x_64" "ipq60xx" "ipq60xx_64" "ipq50xx" "ipq50xx_64"))
$(RM) $(1)/usr/include/qca-nss-drv/nss_ipsecmgr.h
$(INSTALL_DIR) $(1)/usr/include/qca-nss-clients
# $(CP) $(NSS_CLIENTS_DIR)/exports/nss_ipsecmgr.h $(1)/usr/include/qca-nss-clients/.
endif
endef
EXTRA_CFLAGS+= -I$(STAGING_DIR)/usr/include/qca-nss-gmac -I$(STAGING_DIR)/usr/include/qca-nss-dp
# Keeping default as ipq806x for branches that does not have subtarget framework
subtarget:=$(SUBTARGET)
ifeq ($(CONFIG_KERNEL_IPQ_MEM_PROFILE),256)
EXTRA_CFLAGS+= -DNSS_MEM_PROFILE_LOW
endif
ifeq ($(CONFIG_KERNEL_IPQ_MEM_PROFILE),512)
EXTRA_CFLAGS+= -DNSS_MEM_PROFILE_MEDIUM
endif
ifeq ($(CONFIG_KERNEL_SKB_FIXED_SIZE_2K),y)
EXTRA_CFLAGS+= -DNSS_SKB_FIXED_SIZE_2K
endif
DRV_MAKE_OPTS:=
ifeq ($(CONFIG_KERNEL_IPQ_MEM_PROFILE),256)
DRV_MAKE_OPTS+=NSS_DRV_C2C_ENABLE=n \
NSS_DRV_CAPWAP_ENABLE=n \
NSS_DRV_CLMAP_ENABLE=n \
NSS_DRV_CRYPTO_ENABLE=n \
NSS_DRV_DTLS_ENABLE=n \
NSS_DRV_GRE_ENABLE=n \
NSS_DRV_GRE_REDIR_ENABLE=n \
NSS_DRV_GRE_TUNNEL_ENABLE=n \
NSS_DRV_IGS_ENABLE=n \
NSS_DRV_IPSEC_ENABLE=n \
NSS_DRV_LAG_ENABLE=n \
NSS_DRV_L2TP_ENABLE=n \
NSS_DRV_MAPT_ENABLE=n \
NSS_DRV_OAM_ENABLE=n \
NSS_DRV_PPTP_ENABLE=n \
NSS_DRV_PORTID_ENABLE=n \
NSS_DRV_PVXLAN_ENABLE=n \
NSS_DRV_QRFS_ENABLE=n \
NSS_DRV_QVPN_ENABLE=n \
NSS_DRV_RMNET_ENABLE=n \
NSS_DRV_SHAPER_ENABLE=n \
NSS_DRV_SJACK_ENABLE=n \
NSS_DRV_TLS_ENABLE=n \
NSS_DRV_TRUSTSEC_ENABLE=n \
NSS_DRV_TSTAMP_ENABLE=n \
NSS_DRV_TUN6RD_ENABLE=n \
NSS_DRV_TUNIPIP6_ENABLE=n \
NSS_DRV_VXLAN_ENABLE=n
endif
define Build/Configure
$(LN) arch/nss_$(subtarget).h $(PKG_BUILD_DIR)/exports/nss_arch.h
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" $(strip $(DRV_MAKE_OPTS)) \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
M="$(PKG_BUILD_DIR)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(subtarget)" \
modules
endef
$(eval $(call KernelPackage,qca-nss-drv))

View File

@@ -0,0 +1,6 @@
config nss_firmware 'qca_nss_0'
config nss_firmware 'qca_nss_1'
config general
option enable_rps '1'

View File

@@ -0,0 +1,26 @@
#!/bin/sh /sbin/sysdebug
#
# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
log cat /sys/kernel/debug/qca-nss-drv/stats/pppoe
log cat /sys/kernel/debug/qca-nss-drv/stats/n2h
log cat /sys/kernel/debug/qca-nss-drv/stats/ipv6
log cat /sys/kernel/debug/qca-nss-drv/stats/ipv4
log cat /sys/kernel/debug/qca-nss-drv/stats/gmac
log cat /sys/kernel/debug/qca-nss-drv/stats/drv
log cat /sys/kernel/debug/qca-nss-drv/stats/wifi
log cat /sys/kernel/debug/qca-nss-drv/stats/wifi_if
log cat /sys/kernel/debug/qca-nss-drv/stats/eth_rx

View File

@@ -0,0 +1,70 @@
#!/bin/sh
#
# Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
KERNEL=`uname -r`
case "${KERNEL}" in
3.4*)
select_or_load=load_nss_fw
;;
*)
select_or_load=select_nss_fw
;;
esac
load_nss_fw () {
ls -l $1 | awk ' { print $9,$5 } '> /dev/console
echo 1 > /sys/class/firmware/$DEVICENAME/loading
cat $1 > /sys/class/firmware/$DEVICENAME/data
echo 0 > /sys/class/firmware/$DEVICENAME/loading
}
select_nss_fw () {
rm -f /lib/firmware/$DEVICENAME
ln -s $1 /lib/firmware/$DEVICENAME
ls -l /lib/firmware/$DEVICENAME | awk ' { print $9,$5 } '> /dev/console
}
[ "$ACTION" != "add" ] && exit
# dev name for UCI, since it doesn't let you use . or -
SDEVNAME=$(echo ${DEVICENAME} | sed s/[.-]/_/g)
SELECTED_FW=$(uci get nss.${SDEVNAME}.firmware 2>/dev/null)
[ -e "${SELECTED_FW}" ] && {
$select_or_load ${SELECTED_FW}
exit
}
case $DEVICENAME in
qca-nss0* | qca-nss.0*)
if [ -e /lib/firmware/qca-nss0-enterprise.bin ] ; then
$select_or_load /lib/firmware/qca-nss0-enterprise.bin
else
$select_or_load /lib/firmware/qca-nss0-retail.bin
fi
exit
;;
qca-nss1* | qca-nss.1*)
if [ -e /lib/firmware/qca-nss1-enterprise.bin ] ; then
$select_or_load /lib/firmware/qca-nss1-enterprise.bin
else
$select_or_load /lib/firmware/qca-nss1-retail.bin
fi
exit
;;
esac

View File

@@ -0,0 +1,50 @@
#!/bin/sh /etc/rc.common
#
# Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
START=70
enable_rps() {
irq_nss_rps=`grep nss_queue1 /proc/interrupts | cut -d ':' -f 1 | tr -d ' '`
for entry in $irq_nss_rps
do
echo 2 > /proc/irq/$entry/smp_affinity
done
irq_nss_rps=`grep nss_queue2 /proc/interrupts | cut -d ':' -f 1 | tr -d ' '`
for entry in $irq_nss_rps
do
echo 4 > /proc/irq/$entry/smp_affinity
done
irq_nss_rps=`grep nss_queue3 /proc/interrupts | cut -d ':' -f 1 | tr -d ' '`
for entry in $irq_nss_rps
do
echo 8 > /proc/irq/$entry/smp_affinity
done
# Enable NSS RPS
sysctl -w dev.nss.rps.enable=1 >/dev/null 2>/dev/null
}
start() {
local rps_enabled="$(uci_get nss @general[0] enable_rps)"
if [ "$rps_enabled" -eq 1 ]; then
enable_rps
fi
}

Some files were not shown because too many files have changed in this diff Show More