diff --git a/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch new file mode 100644 index 00000000..5d493c1d --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch @@ -0,0 +1,35045 @@ +From 340daa3e4a9851ab640062065eff4501e6f7cc61 Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Fri, 23 Sep 2016 13:45:59 +0800 +Subject: [PATCH 1/2] Add fsl-dpaa2 and fsl-mc support based on 3.18.25 + +This patch integrated a ton of patches to support DPAA2.0 & MC +networking which is used on LS2080A/LS2088A RDB. +--- + MAINTAINERS | 27 + + arch/arm64/include/asm/io.h | 1 + + arch/arm64/include/asm/pgtable.h | 1 + + drivers/net/ethernet/freescale/Kconfig | 8 +- + drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +- + drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 2 +- + .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 4 +- + .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +- + drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 4 +- + drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +- + drivers/net/ethernet/freescale/gianfar.c | 2 +- + drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +- + drivers/net/ethernet/freescale/ucc_geth.c | 2 +- + drivers/net/ethernet/freescale/xgmac_mdio.c | 194 +- + drivers/net/phy/Kconfig | 5 + + drivers/net/phy/Makefile | 1 + + drivers/net/phy/aquantia.c | 201 ++ + drivers/net/phy/fsl_10gkr.c | 1467 ++++++++++ + drivers/net/phy/teranetics.c | 135 + + drivers/staging/Kconfig | 4 + + drivers/staging/Makefile | 2 + + drivers/staging/fsl-dpaa2/Kconfig | 12 + + drivers/staging/fsl-dpaa2/Makefile | 6 + + drivers/staging/fsl-dpaa2/ethernet/Kconfig | 36 + + drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 + + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 317 +++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 + + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2836 ++++++++++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 377 +++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 861 ++++++ + drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++ + drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 ++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 +++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 ++++++++++++++++++ + drivers/staging/fsl-dpaa2/mac/Kconfig | 24 + + drivers/staging/fsl-dpaa2/mac/Makefile | 10 + + drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++ + drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 +++ + drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++ + drivers/staging/fsl-dpaa2/mac/mac.c | 694 +++++ + drivers/staging/fsl-mc/Kconfig | 1 + + drivers/staging/fsl-mc/Makefile | 2 + + drivers/staging/fsl-mc/TODO | 13 + + drivers/staging/fsl-mc/bus/Kconfig | 45 + + drivers/staging/fsl-mc/bus/Makefile | 24 + + drivers/staging/fsl-mc/bus/dpbp.c | 459 ++++ + drivers/staging/fsl-mc/bus/dpcon.c | 407 +++ + drivers/staging/fsl-mc/bus/dpio/Makefile | 9 + + drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 401 +++ + drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 + + drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 ++++ + drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 ++++++ + drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 ++++ + drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 ++ + drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 + + drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 ++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 + + drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 +++++++++ + drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 ++ + drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 ++ + drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 +++ + drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 + + drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++ + drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 + + drivers/staging/fsl-mc/bus/dpmcp.c | 318 +++ + drivers/staging/fsl-mc/bus/dpmcp.h | 323 +++ + drivers/staging/fsl-mc/bus/dpmng-cmd.h | 47 + + drivers/staging/fsl-mc/bus/dpmng.c | 85 + + drivers/staging/fsl-mc/bus/dprc-cmd.h | 87 + + drivers/staging/fsl-mc/bus/dprc-driver.c | 1084 ++++++++ + drivers/staging/fsl-mc/bus/dprc.c | 1218 +++++++++ + drivers/staging/fsl-mc/bus/mc-allocator.c | 716 +++++ + drivers/staging/fsl-mc/bus/mc-bus.c | 1347 ++++++++++ + drivers/staging/fsl-mc/bus/mc-ioctl.h | 25 + + drivers/staging/fsl-mc/bus/mc-restool.c | 312 +++ + drivers/staging/fsl-mc/bus/mc-sys.c | 677 +++++ + drivers/staging/fsl-mc/include/dpbp-cmd.h | 62 + + drivers/staging/fsl-mc/include/dpbp.h | 438 +++ + drivers/staging/fsl-mc/include/dpcon-cmd.h | 162 ++ + drivers/staging/fsl-mc/include/dpcon.h | 407 +++ + drivers/staging/fsl-mc/include/dpmac-cmd.h | 192 ++ + drivers/staging/fsl-mc/include/dpmac.h | 528 ++++ + drivers/staging/fsl-mc/include/dpmng.h | 80 + + drivers/staging/fsl-mc/include/dprc.h | 990 +++++++ + drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 ++++++ + drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 +++++ + drivers/staging/fsl-mc/include/mc-cmd.h | 133 + + drivers/staging/fsl-mc/include/mc-private.h | 168 ++ + drivers/staging/fsl-mc/include/mc-sys.h | 128 + + drivers/staging/fsl-mc/include/mc.h | 244 ++ + drivers/staging/fsl-mc/include/net.h | 481 ++++ + scripts/Makefile.dtbinst | 51 + + 94 files changed, 33975 insertions(+), 84 deletions(-) + create mode 100644 drivers/net/phy/aquantia.c + create mode 100644 drivers/net/phy/fsl_10gkr.c + create mode 100644 drivers/net/phy/teranetics.c + create mode 100644 drivers/staging/fsl-dpaa2/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c + create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig + create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c + create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h + create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c + create mode 100644 drivers/staging/fsl-mc/Kconfig + create mode 100644 drivers/staging/fsl-mc/Makefile + create mode 100644 drivers/staging/fsl-mc/TODO + create mode 100644 drivers/staging/fsl-mc/bus/Kconfig + create mode 100644 drivers/staging/fsl-mc/bus/Makefile + create mode 100644 drivers/staging/fsl-mc/bus/dpbp.c + create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h + create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c + create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dpmng.c + create mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h + create mode 100644 drivers/staging/fsl-mc/bus/dprc-driver.c + create mode 100644 drivers/staging/fsl-mc/bus/dprc.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-allocator.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-bus.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h + create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c + create mode 100644 drivers/staging/fsl-mc/bus/mc-sys.c + create mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpbp.h + create mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpcon.h + create mode 100644 drivers/staging/fsl-mc/include/dpmac-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/dpmac.h + create mode 100644 drivers/staging/fsl-mc/include/dpmng.h + create mode 100644 drivers/staging/fsl-mc/include/dprc.h + create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h + create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h + create mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h + create mode 100644 drivers/staging/fsl-mc/include/mc-private.h + create mode 100644 drivers/staging/fsl-mc/include/mc-sys.h + create mode 100644 drivers/staging/fsl-mc/include/mc.h + create mode 100644 drivers/staging/fsl-mc/include/net.h + create mode 100644 scripts/Makefile.dtbinst + +diff --git a/MAINTAINERS b/MAINTAINERS +index 1ae7362..63a796c 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -3973,6 +3973,33 @@ F: sound/soc/fsl/fsl* + F: sound/soc/fsl/imx* + F: sound/soc/fsl/mpc8610_hpcd.c + ++FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER ++M: J. German Rivera ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-mc/ ++ ++FREESCALE DPAA2 ETH DRIVER ++M: Ioana Radulescu ++M: Bogdan Hamciuc ++M: Cristian Sovaiala ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-dpaa2/ethernet/ ++ ++FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER ++M: Lijun Pan ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-mc/bus/mc-ioctl.h ++F: drivers/staging/fsl-mc/bus/mc-restool.c ++ ++FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER ++M: Alex Marginean ++L: linux-kernel@vger.kernel.org ++S: Maintained ++F: drivers/staging/fsl-dpaa2/mac/ ++ + FREEVXFS FILESYSTEM + M: Christoph Hellwig + W: ftp://ftp.openlinux.org/pub/people/hch/vxfs +diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h +index 75825b6..f58e31a 100644 +--- a/arch/arm64/include/asm/io.h ++++ b/arch/arm64/include/asm/io.h +@@ -249,6 +249,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); + #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) + #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) + #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) ++#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS)) + #define iounmap __iounmap + + #define ARCH_HAS_IOREMAP_WC +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 41a43bf..009f690 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); + #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) + #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) ++#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) + + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig +index 2703083..0c1c97d 100644 +--- a/drivers/net/ethernet/freescale/Kconfig ++++ b/drivers/net/ethernet/freescale/Kconfig +@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE + default y + depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ + M523x || M527x || M5272 || M528x || M520x || M532x || \ +- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) ++ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ ++ ARCH_LAYERSCAPE + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from +@@ -58,18 +59,17 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" + + config FSL_PQ_MDIO + tristate "Freescale PQ MDIO" +- depends on FSL_SOC + select PHYLIB + ---help--- + This driver supports the MDIO bus used by the gianfar and UCC drivers. + + config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" +- depends on FSL_SOC + select PHYLIB + select OF_MDIO + ---help--- +- This driver supports the MDIO bus on the Fman 10G Ethernet MACs. ++ This driver supports the MDIO bus on the Fman 10G Ethernet MACs and ++ on mEMAC (which supports both Clauses 22 and 45) + + config UCC_GETH + tristate "Freescale QE Gigabit Ethernet" +diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c +index ff55fbb..76ff046 100644 +--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c ++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c +@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) + } + #endif + +-static struct of_device_id mpc52xx_fec_match[] = { ++static const struct of_device_id mpc52xx_fec_match[] = { + { .compatible = "fsl,mpc5200b-fec", }, + { .compatible = "fsl,mpc5200-fec", }, + { .compatible = "mpc5200-fec", }, +diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +index e052890..1e647be 100644 +--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c ++++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) + return 0; + } + +-static struct of_device_id mpc52xx_fec_mdio_match[] = { ++static const struct of_device_id mpc52xx_fec_mdio_match[] = { + { .compatible = "fsl,mpc5200b-mdio", }, + { .compatible = "fsl,mpc5200-mdio", }, + { .compatible = "mpc5200b-fec-phy", }, +diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +index c92c3b7..dc0da6c 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c ++++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +@@ -886,7 +886,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { + #endif + }; + +-static struct of_device_id fs_enet_match[]; ++static const struct of_device_id fs_enet_match[]; + static int fs_enet_probe(struct platform_device *ofdev) + { + const struct of_device_id *match; +@@ -1047,7 +1047,7 @@ static int fs_enet_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_match[] = { ++static const struct of_device_id fs_enet_match[] = { + #ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +index 3d3fde6..9ec396b 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_mdio_bb_match[] = { ++static const struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +index ebf5d64..72205b0 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, + + } + +-static struct of_device_id fs_enet_mdio_fec_match[]; ++static const struct of_device_id fs_enet_mdio_fec_match[]; + static int fs_enet_mdio_probe(struct platform_device *ofdev) + { + const struct of_device_id *match; +@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) + return 0; + } + +-static struct of_device_id fs_enet_mdio_fec_match[] = { ++static const struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, +diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c +index 964c6bf..f94fa63 100644 +--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c ++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c +@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) + + #endif + +-static struct of_device_id fsl_pq_mdio_match[] = { ++static const struct of_device_id fsl_pq_mdio_match[] = { + #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index 4fdf0aa..a4a7396 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -3455,7 +3455,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) + phy_print_status(phydev); + } + +-static struct of_device_id gfar_match[] = ++static const struct of_device_id gfar_match[] = + { + { + .type = "network", +diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c +index bb56800..c7c75de 100644 +--- a/drivers/net/ethernet/freescale/gianfar_ptp.c ++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c +@@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) + return 0; + } + +-static struct of_device_id match_table[] = { ++static const struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, + }; +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index 3cf0478..741a7d4 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) + return 0; + } + +-static struct of_device_id ucc_geth_match[] = { ++static const struct of_device_id ucc_geth_match[] = { + { + .type = "network", + .compatible = "ucc_geth", +diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c +index 6e7db66..7b8fe86 100644 +--- a/drivers/net/ethernet/freescale/xgmac_mdio.c ++++ b/drivers/net/ethernet/freescale/xgmac_mdio.c +@@ -32,31 +32,62 @@ struct tgec_mdio_controller { + __be32 mdio_addr; /* MDIO address */ + } __packed; + ++#define MDIO_STAT_ENC BIT(6) + #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +-#define MDIO_STAT_BSY (1 << 0) +-#define MDIO_STAT_RD_ER (1 << 1) ++#define MDIO_STAT_BSY BIT(0) ++#define MDIO_STAT_RD_ER BIT(1) + #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) + #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +-#define MDIO_CTL_PRE_DIS (1 << 10) +-#define MDIO_CTL_SCAN_EN (1 << 11) +-#define MDIO_CTL_POST_INC (1 << 14) +-#define MDIO_CTL_READ (1 << 15) ++#define MDIO_CTL_PRE_DIS BIT(10) ++#define MDIO_CTL_SCAN_EN BIT(11) ++#define MDIO_CTL_POST_INC BIT(14) ++#define MDIO_CTL_READ BIT(15) + + #define MDIO_DATA(x) (x & 0xffff) +-#define MDIO_DATA_BSY (1 << 31) ++#define MDIO_DATA_BSY BIT(31) ++ ++struct mdio_fsl_priv { ++ struct tgec_mdio_controller __iomem *mdio_base; ++ bool is_little_endian; ++}; ++ ++static u32 xgmac_read32(void __iomem *regs, ++ bool is_little_endian) ++{ ++ if (is_little_endian) ++ return ioread32(regs); ++ else ++ return ioread32be(regs); ++} ++ ++static void xgmac_write32(u32 value, ++ void __iomem *regs, ++ bool is_little_endian) ++{ ++ if (is_little_endian) ++ iowrite32(value, regs); ++ else ++ iowrite32be(value, regs); ++} + + /* + * Wait until the MDIO bus is free + */ + static int xgmac_wait_until_free(struct device *dev, +- struct tgec_mdio_controller __iomem *regs) ++ struct tgec_mdio_controller __iomem *regs, ++ bool is_little_endian) + { +- uint32_t status; ++ unsigned int timeout; + + /* Wait till the bus is free */ +- status = spin_event_timeout( +- !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); +- if (!status) { ++ timeout = TIMEOUT; ++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & ++ MDIO_STAT_BSY) && timeout) { ++ cpu_relax(); ++ timeout--; ++ } ++ ++ if (!timeout) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } +@@ -68,14 +99,20 @@ static int xgmac_wait_until_free(struct device *dev, + * Wait till the MDIO read or write operation is complete + */ + static int xgmac_wait_until_done(struct device *dev, +- struct tgec_mdio_controller __iomem *regs) ++ struct tgec_mdio_controller __iomem *regs, ++ bool is_little_endian) + { +- uint32_t status; ++ unsigned int timeout; + + /* Wait till the MDIO write is complete */ +- status = spin_event_timeout( +- !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); +- if (!status) { ++ timeout = TIMEOUT; ++ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & ++ MDIO_STAT_BSY) && timeout) { ++ cpu_relax(); ++ timeout--; ++ } ++ ++ if (!timeout) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } +@@ -90,32 +127,47 @@ static int xgmac_wait_until_done(struct device *dev, + */ + static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) + { +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- uint16_t dev_addr = regnum >> 16; ++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; ++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; ++ uint16_t dev_addr; ++ u32 mdio_ctl, mdio_stat; + int ret; ++ bool endian = priv->is_little_endian; ++ ++ mdio_stat = xgmac_read32(®s->mdio_stat, endian); ++ if (regnum & MII_ADDR_C45) { ++ /* Clause 45 (ie 10G) */ ++ dev_addr = (regnum >> 16) & 0x1f; ++ mdio_stat |= MDIO_STAT_ENC; ++ } else { ++ /* Clause 22 (ie 1G) */ ++ dev_addr = regnum & 0x1f; ++ mdio_stat &= ~MDIO_STAT_ENC; ++ } + +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); ++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the port and dev addr */ +- out_be32(®s->mdio_ctl, +- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); ++ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); ++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ +- out_be32(®s->mdio_addr, regnum & 0xffff); ++ if (regnum & MII_ADDR_C45) { ++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); +- if (ret) +- return ret; ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); ++ if (ret) ++ return ret; ++ } + + /* Write the value to the register */ +- out_be32(®s->mdio_data, MDIO_DATA(value)); ++ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); + +- ret = xgmac_wait_until_done(&bus->dev, regs); ++ ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + +@@ -129,74 +181,70 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val + */ + static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) + { +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- uint16_t dev_addr = regnum >> 16; ++ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; ++ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; ++ uint16_t dev_addr; ++ uint32_t mdio_stat; + uint32_t mdio_ctl; + uint16_t value; + int ret; ++ bool endian = priv->is_little_endian; ++ ++ mdio_stat = xgmac_read32(®s->mdio_stat, endian); ++ if (regnum & MII_ADDR_C45) { ++ dev_addr = (regnum >> 16) & 0x1f; ++ mdio_stat |= MDIO_STAT_ENC; ++ } else { ++ dev_addr = regnum & 0x1f; ++ mdio_stat &= ~MDIO_STAT_ENC; ++ } + +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); ++ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); +- out_be32(®s->mdio_ctl, mdio_ctl); ++ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ +- out_be32(®s->mdio_addr, regnum & 0xffff); ++ if (regnum & MII_ADDR_C45) { ++ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + +- ret = xgmac_wait_until_free(&bus->dev, regs); +- if (ret) +- return ret; ++ ret = xgmac_wait_until_free(&bus->dev, regs, endian); ++ if (ret) ++ return ret; ++ } + + /* Initiate the read */ +- out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); ++ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); + +- ret = xgmac_wait_until_done(&bus->dev, regs); ++ ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ +- if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { ++ if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + +- value = in_be32(®s->mdio_data) & 0xffff; ++ value = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; + } + +-/* Reset the MIIM registers, and wait for the bus to free */ +-static int xgmac_mdio_reset(struct mii_bus *bus) +-{ +- struct tgec_mdio_controller __iomem *regs = bus->priv; +- int ret; +- +- mutex_lock(&bus->mdio_lock); +- +- /* Setup the MII Mgmt clock speed */ +- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); +- +- ret = xgmac_wait_until_free(&bus->dev, regs); +- +- mutex_unlock(&bus->mdio_lock); +- +- return ret; +-} +- + static int xgmac_mdio_probe(struct platform_device *pdev) + { + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; ++ struct mdio_fsl_priv *priv; + int ret; + + ret = of_address_to_resource(np, 0, &res); +@@ -205,25 +253,30 @@ static int xgmac_mdio_probe(struct platform_device *pdev) + return ret; + } + +- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); ++ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; +- bus->reset = xgmac_mdio_reset; +- bus->irq = bus->priv; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ +- bus->priv = of_iomap(np, 0); +- if (!bus->priv) { ++ priv = bus->priv; ++ priv->mdio_base = of_iomap(np, 0); ++ if (!priv->mdio_base) { + ret = -ENOMEM; + goto err_ioremap; + } + ++ if (of_get_property(pdev->dev.of_node, ++ "little-endian", NULL)) ++ priv->is_little_endian = true; ++ else ++ priv->is_little_endian = false; ++ + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); +@@ -235,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) + return 0; + + err_registration: +- iounmap(bus->priv); ++ iounmap(priv->mdio_base); + + err_ioremap: + mdiobus_free(bus); +@@ -254,10 +307,13 @@ static int xgmac_mdio_remove(struct platform_device *pdev) + return 0; + } + +-static struct of_device_id xgmac_mdio_match[] = { ++static const struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, ++ { ++ .compatible = "fsl,fman-memac-mdio", ++ }, + {}, + }; + MODULE_DEVICE_TABLE(of, xgmac_mdio_match); +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 75472cf..2973c60 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -14,6 +14,11 @@ if PHYLIB + + comment "MII PHY device drivers" + ++config AQUANTIA_PHY ++ tristate "Drivers for the Aquantia PHYs" ++ ---help--- ++ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 ++ + config AT803X_PHY + tristate "Drivers for Atheros AT803X PHYs" + ---help--- +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index eb3b18b..b5c8f9f 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -3,6 +3,7 @@ + libphy-objs := phy.o phy_device.o mdio_bus.o + + obj-$(CONFIG_PHYLIB) += libphy.o ++obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o + obj-$(CONFIG_MARVELL_PHY) += marvell.o + obj-$(CONFIG_DAVICOM_PHY) += davicom.o + obj-$(CONFIG_CICADA_PHY) += cicada.o +diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c +new file mode 100644 +index 0000000..d6111af +--- /dev/null ++++ b/drivers/net/phy/aquantia.c +@@ -0,0 +1,201 @@ ++/* ++ * Driver for Aquantia PHY ++ * ++ * Author: Shaohui Xie ++ * ++ * Copyright 2015 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PHY_ID_AQ1202 0x03a1b445 ++#define PHY_ID_AQ2104 0x03a1b460 ++#define PHY_ID_AQR105 0x03a1b4a2 ++#define PHY_ID_AQR405 0x03a1b4b0 ++ ++#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ ++ SUPPORTED_1000baseT_Full | \ ++ SUPPORTED_100baseT_Full | \ ++ PHY_DEFAULT_FEATURES) ++ ++static int aquantia_config_aneg(struct phy_device *phydev) ++{ ++ phydev->supported = PHY_AQUANTIA_FEATURES; ++ phydev->advertising = phydev->supported; ++ ++ return 0; ++} ++ ++static int aquantia_aneg_done(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); ++} ++ ++static int aquantia_config_intr(struct phy_device *phydev) ++{ ++ int err; ++ ++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { ++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); ++ } else { ++ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); ++ if (err < 0) ++ return err; ++ ++ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); ++ } ++ ++ return err; ++} ++ ++static int aquantia_ack_interrupt(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); ++ return (reg < 0) ? reg : 0; ++} ++ ++static int aquantia_read_status(struct phy_device *phydev) ++{ ++ int reg; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ if (reg & MDIO_STAT1_LSTATUS) ++ phydev->link = 1; ++ else ++ phydev->link = 0; ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); ++ mdelay(10); ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); ++ ++ switch (reg) { ++ case 0x9: ++ phydev->speed = SPEED_2500; ++ break; ++ case 0x5: ++ phydev->speed = SPEED_1000; ++ break; ++ case 0x3: ++ phydev->speed = SPEED_100; ++ break; ++ case 0x7: ++ default: ++ phydev->speed = SPEED_10000; ++ break; ++ } ++ phydev->duplex = DUPLEX_FULL; ++ ++ return 0; ++} ++ ++static struct phy_driver aquantia_driver[] = { ++{ ++ .phy_id = PHY_ID_AQ1202, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQ1202", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQ2104, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQ2104", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQR105, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR105", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ ++ .phy_id = PHY_ID_AQR405, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR405", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++}; ++ ++static int __init aquantia_init(void) ++{ ++ return phy_drivers_register(aquantia_driver, ++ ARRAY_SIZE(aquantia_driver)); ++} ++ ++static void __exit aquantia_exit(void) ++{ ++ return phy_drivers_unregister(aquantia_driver, ++ ARRAY_SIZE(aquantia_driver)); ++} ++ ++module_init(aquantia_init); ++module_exit(aquantia_exit); ++ ++static struct mdio_device_id __maybe_unused aquantia_tbl[] = { ++ { PHY_ID_AQ1202, 0xfffffff0 }, ++ { PHY_ID_AQ2104, 0xfffffff0 }, ++ { PHY_ID_AQR105, 0xfffffff0 }, ++ { PHY_ID_AQR405, 0xfffffff0 }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, aquantia_tbl); ++ ++MODULE_DESCRIPTION("Aquantia PHY driver"); ++MODULE_AUTHOR("Shaohui Xie "); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/net/phy/fsl_10gkr.c b/drivers/net/phy/fsl_10gkr.c +new file mode 100644 +index 0000000..3713726 +--- /dev/null ++++ b/drivers/net/phy/fsl_10gkr.c +@@ -0,0 +1,1467 @@ ++/* Freescale XFI 10GBASE-KR driver. ++ * Author: Shaohui Xie ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * Licensed under the GPL-2 or later. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FSL_XFI_PCS_PHY_ID 0x7C000012 ++#define FSL_XFI_PCS_PHY_ID2 0x0083e400 ++ ++/* Freescale XFI PCS MMD */ ++#define FSL_XFI_PMD 0x1 ++#define FSL_XFI_PCS 0x3 ++#define FSL_XFI_AN 0x7 ++#define FSL_XFI_VS1 0x1e ++ ++/* Freescale XFI PMD registers */ ++#define FSL_XFI_PMD_CTRL 0x0 ++#define FSL_XFI_KR_PMD_CTRL 0x0096 ++#define FSL_XFI_KR_PMD_STATUS 0x0097 ++#define FSL_XFI_KR_LP_CU 0x0098 ++#define FSL_XFI_KR_LP_STATUS 0x0099 ++#define FSL_XFI_KR_LD_CU 0x009a ++#define FSL_XFI_KR_LD_STATUS 0x009b ++ ++/* PMD define */ ++#define PMD_RESET 0x1 ++#define PMD_STATUS_SUP_STAT 0x4 ++#define PMD_STATUS_FRAME_LOCK 0x2 ++#define TRAIN_EN 0x3 ++#define TRAIN_DISABLE 0x1 ++#define RX_STAT 0x1 ++ ++/* Freescale XFI PCS registers */ ++#define FSL_XFI_PCS_CTRL 0x0 ++#define FSL_XFI_PCS_STATUS 0x1 ++ ++/* Freescale XFI Auto-Negotiation Registers */ ++#define FSL_XFI_AN_CTRL 0x0000 ++#define FSL_XFI_LNK_STATUS 0x0001 ++#define FSL_XFI_AN_AD_1 0x0011 ++#define FSL_XFI_BP_STATUS 0x0030 ++ ++#define XFI_AN_AD1 0x85 ++#define XF_AN_RESTART 0x1200 ++#define XFI_AN_LNK_STAT_UP 0x4 ++ ++/* Freescale XFI Vendor-Specific 1 Registers */ ++#define FSL_XFI_PCS_INTR_EVENT 0x0002 ++#define FSL_XFI_PCS_INTR_MASK 0x0003 ++#define FSL_XFI_AN_INTR_EVENT 0x0004 ++#define FSL_XFI_AN_INTR_MASK 0x0005 ++#define FSL_XFI_LT_INTR_EVENT 0x0006 ++#define FSL_XFI_LT_INTR_MASK 0x0007 ++ ++/* C(-1) */ ++#define BIN_M1 0 ++/* C(1) */ ++#define BIN_LONG 1 ++#define BIN_M1_SEL 6 ++#define BIN_Long_SEL 7 ++#define CDR_SEL_MASK 0x00070000 ++#define BIN_SNAPSHOT_NUM 5 ++#define BIN_M1_THRESHOLD 3 ++#define BIN_LONG_THRESHOLD 2 ++ ++#define PRE_COE_MASK 0x03c00000 ++#define POST_COE_MASK 0x001f0000 ++#define ZERO_COE_MASK 0x00003f00 ++#define PRE_COE_SHIFT 22 ++#define POST_COE_SHIFT 16 ++#define ZERO_COE_SHIFT 8 ++ ++#define PRE_COE_MAX 0x0 ++#define PRE_COE_MIN 0x8 ++#define POST_COE_MAX 0x0 ++#define POST_COE_MIN 0x10 ++#define ZERO_COE_MAX 0x30 ++#define ZERO_COE_MIN 0x0 ++ ++#define TECR0_INIT 0x24200000 ++#define RATIO_PREQ 0x3 ++#define RATIO_PST1Q 0xd ++#define RATIO_EQ 0x20 ++ ++#define GCR1_CTL_SNP_START_MASK 0x00002000 ++#define GCR1_SNP_START_MASK 0x00000040 ++#define RECR1_SNP_DONE_MASK 0x00000004 ++#define RECR1_CTL_SNP_DONE_MASK 0x00000002 ++#define TCSR1_SNP_DATA_MASK 0x0000ffc0 ++#define TCSR1_SNP_DATA_SHIFT 6 ++#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100 ++ ++#define RECR1_GAINK2_MASK 0x0f000000 ++#define RECR1_GAINK2_SHIFT 24 ++#define RECR1_GAINK3_MASK 0x000f0000 ++#define RECR1_GAINK3_SHIFT 16 ++#define RECR1_OFFSET_MASK 0x00003f80 ++#define RECR1_OFFSET_SHIFT 7 ++#define RECR1_BLW_MASK 0x00000f80 ++#define RECR1_BLW_SHIFT 7 ++#define EYE_CTRL_SHIFT 12 ++#define BASE_WAND_SHIFT 10 ++ ++#define XGKR_TIMEOUT 1050 ++#define AN_ABILITY_MASK 0x9 ++#define AN_10GKR_MASK 0x8 ++#define LT_10GKR_MASK 0x4 ++#define TRAIN_FAIL 0x8 ++ ++#define INCREMENT 1 ++#define DECREMENT 2 ++#define TIMEOUT_LONG 3 ++#define TIMEOUT_M1 3 ++ ++#define RX_READY_MASK 0x8000 ++#define PRESET_MASK 0x2000 ++#define INIT_MASK 0x1000 ++#define COP1_MASK 0x30 ++#define COP1_SHIFT 4 ++#define COZ_MASK 0xc ++#define COZ_SHIFT 2 ++#define COM1_MASK 0x3 ++#define COM1_SHIFT 0 ++#define REQUEST_MASK 0x3f ++#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \ ++ COP1_MASK | COZ_MASK | COM1_MASK) ++ ++#define FSL_SERDES_INSTANCE1_BASE 0xffe0ea000 ++#define FSL_SERDES_INSTANCE2_BASE 0xffe0eb000 ++#define FSL_LANE_A_BASE 0x800 ++#define FSL_LANE_B_BASE 0x840 ++#define FSL_LANE_C_BASE 0x880 ++#define FSL_LANE_D_BASE 0x8C0 ++#define FSL_LANE_E_BASE 0x900 ++#define FSL_LANE_F_BASE 0x940 ++#define FSL_LANE_G_BASE 0x980 ++#define FSL_LANE_H_BASE 0x9C0 ++#define GCR0_RESET_MASK 0x600000 ++ ++#define NEW_ALGORITHM_TRAIN_TX ++#ifdef NEW_ALGORITHM_TRAIN_TX ++#define FORCE_INC_COP1_NUMBER 0 ++#define FORCE_INC_COM1_NUMBER 1 ++#endif ++ ++enum fsl_xgkr_driver { ++ FSL_XGKR_REV1, ++ FSL_XGKR_REV2, ++ FSL_XGKR_INV ++}; ++ ++static struct phy_driver fsl_xgkr_driver[FSL_XGKR_INV]; ++ ++enum coe_filed { ++ COE_COP1, ++ COE_COZ, ++ COE_COM ++}; ++ ++enum coe_update { ++ COE_NOTUPDATED, ++ COE_UPDATED, ++ COE_MIN, ++ COE_MAX, ++ COE_INV ++}; ++ ++enum serdes_inst { ++ SERDES_1, ++ SERDES_2, ++ SERDES_MAX ++}; ++ ++enum lane_inst { ++ LANE_A, ++ LANE_B, ++ LANE_C, ++ LANE_D, ++ LANE_E, ++ LANE_F, ++ LANE_G, ++ LANE_H, ++ LANE_MAX ++}; ++ ++struct serdes_map { ++ const char *serdes_name; ++ unsigned long serdes_base; ++}; ++ ++struct lane_map { ++ const char *lane_name; ++ unsigned long lane_base; ++}; ++ ++const struct serdes_map s_map[SERDES_MAX] = { ++ {"serdes-1", FSL_SERDES_INSTANCE1_BASE}, ++ {"serdes-2", FSL_SERDES_INSTANCE2_BASE} ++}; ++ ++const struct lane_map l_map[LANE_MAX] = { ++ {"lane-a", FSL_LANE_A_BASE}, ++ {"lane-b", FSL_LANE_B_BASE}, ++ {"lane-c", FSL_LANE_C_BASE}, ++ {"lane-d", FSL_LANE_D_BASE}, ++ {"lane-e", FSL_LANE_E_BASE}, ++ {"lane-f", FSL_LANE_F_BASE}, ++ {"lane-g", FSL_LANE_G_BASE}, ++ {"lane-h", FSL_LANE_H_BASE} ++}; ++ ++struct per_lane_ctrl_status { ++ __be32 gcr0; /* 0x.000 - General Control Register 0 */ ++ __be32 gcr1; /* 0x.004 - General Control Register 1 */ ++ __be32 gcr2; /* 0x.008 - General Control Register 2 */ ++ __be32 resv1; /* 0x.00C - Reserved */ ++ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */ ++ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */ ++ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */ ++ __be32 resv2; /* 0x.01C - Reserved */ ++ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */ ++ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */ ++ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */ ++ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */ ++ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */ ++ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */ ++ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */ ++ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */ ++}; ++ ++struct training_state_machine { ++ bool bin_m1_late_early; ++ bool bin_long_late_early; ++ bool bin_m1_stop; ++ bool bin_long_stop; ++ bool tx_complete; ++ bool an_ok; ++ bool link_up; ++ bool running; ++ bool sent_init; ++ int m1_min_max_cnt; ++ int long_min_max_cnt; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ int pre_inc; ++ int post_inc; ++#endif ++}; ++ ++struct fsl_xgkr_inst { ++ void *reg_base; ++ struct mii_bus *bus; ++ struct phy_device *phydev; ++ struct training_state_machine t_s_m; ++ u32 ld_update; ++ u32 ld_status; ++ u32 ratio_preq; ++ u32 ratio_pst1q; ++ u32 adpt_eq; ++}; ++ ++struct fsl_xgkr_wk { ++ struct work_struct xgkr_wk; ++ struct list_head xgkr_list; ++ struct fsl_xgkr_inst *xgkr_inst; ++}; ++ ++LIST_HEAD(fsl_xgkr_list); ++ ++static struct timer_list xgkr_timer; ++static int fire_timer; ++static struct workqueue_struct *xgkr_wq; ++ ++static void init_state_machine(struct training_state_machine *s_m) ++{ ++ s_m->bin_m1_late_early = true; ++ s_m->bin_long_late_early = false; ++ s_m->bin_m1_stop = false; ++ s_m->bin_long_stop = false; ++ s_m->tx_complete = false; ++ s_m->an_ok = false; ++ s_m->link_up = false; ++ s_m->running = false; ++ s_m->sent_init = false; ++ s_m->m1_min_max_cnt = 0; ++ s_m->long_min_max_cnt = 0; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ s_m->pre_inc = FORCE_INC_COM1_NUMBER; ++ s_m->post_inc = FORCE_INC_COP1_NUMBER; ++#endif ++} ++ ++void tune_tecr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base; ++ u32 val; ++ ++ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; ++ ++ val = TECR0_INIT | ++ inst->adpt_eq << ZERO_COE_SHIFT | ++ inst->ratio_preq << PRE_COE_SHIFT | ++ inst->ratio_pst1q << POST_COE_SHIFT; ++ ++ /* reset the lane */ ++ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32be(val, ®_base->tecr0); ++ udelay(1); ++ /* unreset the lane */ ++ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void start_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_EN); ++} ++ ++static void stop_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); ++} ++ ++static void reset_gcr0(struct fsl_xgkr_inst *inst) ++{ ++ struct per_lane_ctrl_status *reg_base; ++ ++ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; ++ ++ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, ++ ®_base->gcr0); ++ udelay(1); ++} ++ ++static void reset_lt(struct phy_device *phydev) ++{ ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_PMD_CTRL, PMD_RESET); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_CU, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_STATUS, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU, 0); ++ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS, 0); ++} ++ ++static void start_an(struct phy_device *phydev) ++{ ++ reset_lt(phydev); ++ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_AD_1, XFI_AN_AD1); ++ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_CTRL, XF_AN_RESTART); ++} ++ ++static void ld_coe_status(struct fsl_xgkr_inst *inst) ++{ ++ phy_write_mmd(inst->phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_LD_STATUS, inst->ld_status); ++} ++ ++static void ld_coe_update(struct fsl_xgkr_inst *inst) ++{ ++ phy_write_mmd(inst->phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_LD_CU, inst->ld_update); ++} ++ ++static void init_inst(struct fsl_xgkr_inst *inst, int reset) ++{ ++ if (reset) { ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ tune_tecr0(inst); ++ } ++ ++ inst->ld_status &= RX_READY_MASK; ++ ld_coe_status(inst); ++ ++ /* init state machine */ ++ init_state_machine(&inst->t_s_m); ++ ++ inst->ld_update = 0; ++ ld_coe_update(inst); ++ ++ inst->ld_status &= ~RX_READY_MASK; ++ ld_coe_status(inst); ++} ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++static int get_median_gaink2(u32 *reg) ++{ ++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM]; ++ u32 rx_eq_snp; ++ struct per_lane_ctrl_status *reg_base; ++ int timeout; ++ int i, j, tmp, pos; ++ ++ reg_base = (struct per_lane_ctrl_status *)reg; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while (ioread32be(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* start snap shot */ ++ iowrite32be((ioread32be(®_base->gcr1) | ++ GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32be(®_base->recr1) & ++ RECR1_CTL_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ rx_eq_snp = ioread32be(®_base->recr1); ++ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >> ++ RECR1_GAINK2_SHIFT; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32be((ioread32be(®_base->gcr1) & ++ ~GCR1_CTL_SNP_START_MASK), ++ ®_base->gcr1); ++ } ++ ++ /* get median of the 5 snap shot */ ++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) { ++ tmp = gaink2_snap_shot[i]; ++ pos = i; ++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) { ++ if (gaink2_snap_shot[j] < tmp) { ++ tmp = gaink2_snap_shot[j]; ++ pos = j; ++ } ++ } ++ ++ gaink2_snap_shot[pos] = gaink2_snap_shot[i]; ++ gaink2_snap_shot[i] = tmp; ++ } ++ ++ return gaink2_snap_shot[2]; ++} ++#endif ++ ++static bool is_bin_early(int bin_sel, void __iomem *reg) ++{ ++ bool early = false; ++ int bin_snap_shot[BIN_SNAPSHOT_NUM]; ++ int i, negative_count = 0; ++ struct per_lane_ctrl_status *reg_base; ++ int timeout; ++ ++ reg_base = (struct per_lane_ctrl_status *)reg; ++ ++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { ++ /* wait RECR1_SNP_DONE_MASK has cleared */ ++ timeout = 100; ++ while ((ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */ ++ if (bin_sel == BIN_M1) { ++ iowrite32be((ioread32be(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_M1_SEL, ++ ®_base->tcsr1); ++ } else { ++ iowrite32be((ioread32be(®_base->tcsr1) & ++ ~CDR_SEL_MASK) | BIN_Long_SEL, ++ ®_base->tcsr1); ++ } ++ ++ /* start snap shot */ ++ iowrite32be(ioread32be(®_base->gcr1) | GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ ++ /* wait for SNP done */ ++ timeout = 100; ++ while (!(ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { ++ udelay(1); ++ timeout--; ++ if (timeout == 0) ++ break; ++ } ++ ++ /* read and save the snap shot */ ++ bin_snap_shot[i] = (ioread32be(®_base->tcsr1) & ++ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT; ++ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK) ++ negative_count++; ++ ++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ ++ iowrite32be(ioread32be(®_base->gcr1) & ~GCR1_SNP_START_MASK, ++ ®_base->gcr1); ++ } ++ ++ if (((bin_sel == BIN_M1) && negative_count > BIN_M1_THRESHOLD) || ++ ((bin_sel == BIN_LONG && negative_count > BIN_LONG_THRESHOLD))) { ++ early = true; ++ } ++ ++ return early; ++} ++ ++static void train_tx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ struct training_state_machine *s_m = &inst->t_s_m; ++ bool bin_m1_early, bin_long_early; ++ u32 lp_status, old_ld_update; ++ u32 status_cop1, status_coz, status_com1; ++ u32 req_cop1, req_coz, req_com1, req_preset, req_init; ++ u32 temp; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ u32 median_gaink2; ++#endif ++ ++recheck: ++ if (s_m->bin_long_stop && s_m->bin_m1_stop) { ++ s_m->tx_complete = true; ++ inst->ld_status |= RX_READY_MASK; ++ ld_coe_status(inst); ++ /* tell LP we are ready */ ++ phy_write_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS, RX_STAT); ++ return; ++ } ++ ++ /* We start by checking the current LP status. If we got any responses, ++ * we can clear up the appropriate update request so that the ++ * subsequent code may easily issue new update requests if needed. ++ */ ++ lp_status = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & ++ REQUEST_MASK; ++ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT; ++ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT; ++ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT; ++ ++ old_ld_update = inst->ld_update; ++ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT; ++ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT; ++ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT; ++ req_preset = old_ld_update & PRESET_MASK; ++ req_init = old_ld_update & INIT_MASK; ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.1 ++ * We may clear PRESET when all coefficients show UPDATED or MAX. ++ */ ++ if (req_preset) { ++ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) && ++ (status_coz == COE_UPDATED || status_coz == COE_MAX) && ++ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) { ++ inst->ld_update &= ~PRESET_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * We may clear INITIALIZE when no coefficients show NOT UPDATED. ++ */ ++ if (req_init) { ++ if (status_cop1 != COE_NOTUPDATED && ++ status_coz != COE_NOTUPDATED && ++ status_com1 != COE_NOTUPDATED) { ++ inst->ld_update &= ~INIT_MASK; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.2 ++ * we send initialize to the other side to ensure default settings ++ * for the LP. Naturally, we should do this only once. ++ */ ++ if (!s_m->sent_init) { ++ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) { ++ inst->ld_update |= INIT_MASK; ++ s_m->sent_init = true; ++ } ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We set coefficient requests to HOLD when we get the information ++ * about any updates On clearing our prior response, we also update ++ * our internal status. ++ */ ++ if (status_cop1 != COE_NOTUPDATED) { ++ if (req_cop1) { ++ inst->ld_update &= ~COP1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (s_m->post_inc) { ++ if (req_cop1 == INCREMENT && ++ status_cop1 == COE_MAX) { ++ s_m->post_inc = 0; ++ s_m->bin_long_stop = true; ++ s_m->bin_m1_stop = true; ++ } else { ++ s_m->post_inc -= 1; ++ } ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) || ++ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) { ++ s_m->long_min_max_cnt++; ++ if (s_m->long_min_max_cnt >= TIMEOUT_LONG) { ++ s_m->bin_long_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (status_coz != COE_NOTUPDATED) { ++ if (req_coz) ++ inst->ld_update &= ~COZ_MASK; ++ } ++ ++ if (status_com1 != COE_NOTUPDATED) { ++ if (req_com1) { ++ inst->ld_update &= ~COM1_MASK; ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (s_m->pre_inc) { ++ if (req_com1 == INCREMENT && ++ status_com1 == COE_MAX) ++ s_m->pre_inc = 0; ++ else ++ s_m->pre_inc -= 1; ++ ++ ld_coe_update(inst); ++ goto recheck; ++ } ++#endif ++ /* Stop If we have reached the limit for a parameter. */ ++ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) || ++ (req_com1 == INCREMENT && status_com1 == COE_MAX)) { ++ s_m->m1_min_max_cnt++; ++ if (s_m->m1_min_max_cnt >= TIMEOUT_M1) { ++ s_m->bin_m1_stop = true; ++ ld_coe_update(inst); ++ goto recheck; ++ } ++ } ++ } ++ } ++ ++ if (old_ld_update != inst->ld_update) { ++ ld_coe_update(inst); ++ /* Redo these status checks and updates until we have no more ++ * changes, to speed up the overall process. ++ */ ++ goto recheck; ++ } ++ ++ /* Do nothing if we have pending request. */ ++ if ((req_coz || req_com1 || req_cop1)) ++ return; ++ else if (lp_status) ++ /* No pending request but LP status was not reverted to ++ * not updated. ++ */ ++ return; ++ ++#ifdef NEW_ALGORITHM_TRAIN_TX ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (s_m->pre_inc) { ++ inst->ld_update = INCREMENT << COM1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ ++ if (status_cop1 != COE_MAX) { ++ median_gaink2 = get_median_gaink2(inst->reg_base); ++ if (median_gaink2 == 0xf) { ++ s_m->post_inc = 1; ++ } else { ++ /* Gaink2 median lower than "F" */ ++ s_m->bin_m1_stop = true; ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ } else { ++ /* C1 MAX */ ++ s_m->bin_m1_stop = true; ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ if (s_m->post_inc) { ++ inst->ld_update = INCREMENT << COP1_SHIFT; ++ ld_coe_update(inst); ++ return; ++ } ++ } ++#endif ++ ++ /* snapshot and select bin */ ++ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base); ++ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base); ++ ++ if (!s_m->bin_m1_stop && !s_m->bin_m1_late_early && bin_m1_early) { ++ s_m->bin_m1_stop = true; ++ goto recheck; ++ } ++ ++ if (!s_m->bin_long_stop && ++ s_m->bin_long_late_early && !bin_long_early) { ++ s_m->bin_long_stop = true; ++ goto recheck; ++ } ++ ++ /* IEEE802.3-2008, 72.6.10.2.3.3 ++ * We only request coefficient updates when no PRESET/INITIALIZE is ++ * pending! We also only request coefficient updates when the ++ * corresponding status is NOT UPDATED and nothing is pending. ++ */ ++ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { ++ if (!s_m->bin_long_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Avoid BinM1Late by requesting an ++ * immediate decrement. ++ */ ++ if (!bin_m1_early) { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ ld_coe_update(inst); ++ s_m->bin_m1_late_early = bin_m1_early; ++ return; ++ } ++ } ++ ++ /* BinLong correction means changing COP1 */ ++ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) { ++ /* Locate BinLong transition point (if any) ++ * while avoiding BinM1Late. ++ */ ++ if (bin_long_early) { ++ /* request increment c(1) */ ++ temp = INCREMENT << COP1_SHIFT; ++ inst->ld_update |= temp; ++ } else { ++ /* request decrement c(1) */ ++ temp = DECREMENT << COP1_SHIFT; ++ inst->ld_update |= temp; ++ } ++ ++ ld_coe_update(inst); ++ s_m->bin_long_late_early = bin_long_early; ++ } ++ /* We try to finish BinLong before we do BinM1 */ ++ return; ++ } ++ ++ if (!s_m->bin_m1_stop) { ++ /* BinM1 correction means changing COM1 */ ++ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { ++ /* Locate BinM1 transition point (if any) */ ++ if (bin_m1_early) { ++ /* request increment c(-1) */ ++ temp = INCREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ } else { ++ /* request decrement c(-1) */ ++ temp = DECREMENT << COM1_SHIFT; ++ inst->ld_update |= temp; ++ } ++ ++ ld_coe_update(inst); ++ s_m->bin_m1_late_early = bin_m1_early; ++ } ++ } ++ } ++} ++ ++static int check_an_link(struct phy_device *phydev) ++{ ++ int val; ++ int timeout = 100; ++ ++ while (timeout--) { ++ val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ if (val & XFI_AN_LNK_STAT_UP) ++ return 1; ++ usleep_range(100, 500); ++ } ++ ++ return 0; ++} ++ ++static int is_link_training_fail(struct phy_device *phydev) ++{ ++ int val; ++ ++ val = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS); ++ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) { ++ /* check LNK_STAT for sure */ ++ if (check_an_link(phydev)) ++ return 0; ++ return 1; ++ } ++ return 1; ++} ++ ++static int check_rx(struct phy_device *phydev) ++{ ++ return phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & ++ RX_READY_MASK; ++} ++ ++/* Coefficient values have hardware restrictions */ ++static int is_ld_valid(u32 *ld_coe) ++{ ++ u32 ratio_pst1q = *ld_coe; ++ u32 adpt_eq = *(ld_coe + 1); ++ u32 ratio_preq = *(ld_coe + 2); ++ ++ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48) ++ return 0; ++ ++ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >= ++ ((adpt_eq - ratio_pst1q - ratio_preq) * 17)) ++ return 0; ++ ++ if (ratio_preq > ratio_pst1q) ++ return 0; ++ ++ if (ratio_preq > 8) ++ return 0; ++ ++ if (adpt_eq < 26) ++ return 0; ++ ++ if (ratio_pst1q > 16) ++ return 0; ++ ++ return 1; ++} ++ ++#define VAL_INVALID 0xff ++ ++static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5, ++ 0x7, 0x9, 0xb, 0xc, VAL_INVALID}; ++static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, ++ 0x7, 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID}; ++ ++static int is_value_allowed(const u32 *val_table, u32 val) ++{ ++ int i; ++ ++ for (i = 0;; i++) { ++ if (*(val_table + i) == VAL_INVALID) ++ return 0; ++ if (*(val_table + i) == val) ++ return 1; ++ } ++} ++ ++static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request) ++{ ++ u32 ld_limit[3], ld_coe[3], step[3]; ++ ++ ld_coe[0] = inst->ratio_pst1q; ++ ld_coe[1] = inst->adpt_eq; ++ ld_coe[2] = inst->ratio_preq; ++ ++ /* Information specific to the Freescale SerDes for 10GBase-KR: ++ * Incrementing C(+1) means *decrementing* RATIO_PST1Q ++ * Incrementing C(0) means incrementing ADPT_EQ ++ * Incrementing C(-1) means *decrementing* RATIO_PREQ ++ */ ++ step[0] = -1; ++ step[1] = 1; ++ step[2] = -1; ++ ++ switch (request) { ++ case INCREMENT: ++ ld_limit[0] = POST_COE_MAX; ++ ld_limit[1] = ZERO_COE_MAX; ++ ld_limit[2] = PRE_COE_MAX; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] += step[field]; ++ else ++ /* MAX */ ++ return 2; ++ break; ++ case DECREMENT: ++ ld_limit[0] = POST_COE_MIN; ++ ld_limit[1] = ZERO_COE_MIN; ++ ld_limit[2] = PRE_COE_MIN; ++ if (ld_coe[field] != ld_limit[field]) ++ ld_coe[field] -= step[field]; ++ else ++ /* MIN */ ++ return 1; ++ break; ++ default: ++ break; ++ } ++ ++ if (is_ld_valid(ld_coe)) { ++ /* accept new ld */ ++ inst->ratio_pst1q = ld_coe[0]; ++ inst->adpt_eq = ld_coe[1]; ++ inst->ratio_preq = ld_coe[2]; ++ /* only some values for preq and pst1q can be used. ++ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc. ++ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10. ++ */ ++ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) { ++ dev_dbg(&inst->phydev->dev, ++ "preq skipped value: %d.\n", ld_coe[2]); ++ return 0; ++ } ++ ++ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) { ++ dev_dbg(&inst->phydev->dev, ++ "pst1q skipped value: %d.\n", ld_coe[0]); ++ return 0; ++ } ++ ++ tune_tecr0(inst); ++ } else { ++ if (request == DECREMENT) ++ /* MIN */ ++ return 1; ++ if (request == INCREMENT) ++ /* MAX */ ++ return 2; ++ } ++ ++ return 0; ++} ++ ++static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld) ++{ ++ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX}; ++ u32 mask, val; ++ ++ switch (field) { ++ case COE_COP1: ++ mask = COP1_MASK; ++ val = ld_coe[new_ld] << COP1_SHIFT; ++ break; ++ case COE_COZ: ++ mask = COZ_MASK; ++ val = ld_coe[new_ld] << COZ_SHIFT; ++ break; ++ case COE_COM: ++ mask = COM1_MASK; ++ val = ld_coe[new_ld] << COM1_SHIFT; ++ break; ++ default: ++ return; ++ break; ++ } ++ ++ inst->ld_status &= ~mask; ++ inst->ld_status |= val; ++} ++ ++static void check_request(struct fsl_xgkr_inst *inst, int request) ++{ ++ int cop1_req, coz_req, com_req; ++ int old_status, new_ld_sta; ++ ++ cop1_req = (request & COP1_MASK) >> COP1_SHIFT; ++ coz_req = (request & COZ_MASK) >> COZ_SHIFT; ++ com_req = (request & COM1_MASK) >> COM1_SHIFT; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED! ++ */ ++ old_status = inst->ld_status; ++ ++ if (cop1_req && !(inst->ld_status & COP1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req); ++ min_max_updated(inst, COE_COP1, new_ld_sta); ++ } ++ ++ if (coz_req && !(inst->ld_status & COZ_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COZ, coz_req); ++ min_max_updated(inst, COE_COZ, new_ld_sta); ++ } ++ ++ if (com_req && !(inst->ld_status & COM1_MASK)) { ++ new_ld_sta = inc_dec(inst, COE_COM, com_req); ++ min_max_updated(inst, COE_COM, new_ld_sta); ++ } ++ ++ if (old_status != inst->ld_status) ++ ld_coe_status(inst); ++ ++} ++ ++static void preset(struct fsl_xgkr_inst *inst) ++{ ++ /* These are all MAX values from the IEEE802.3 perspective! */ ++ inst->ratio_pst1q = POST_COE_MAX; ++ inst->adpt_eq = ZERO_COE_MAX; ++ inst->ratio_preq = PRE_COE_MAX; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_MAX << COP1_SHIFT | ++ COE_MAX << COZ_SHIFT | ++ COE_MAX << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void initialize(struct fsl_xgkr_inst *inst) ++{ ++ inst->ratio_preq = RATIO_PREQ; ++ inst->ratio_pst1q = RATIO_PST1Q; ++ inst->adpt_eq = RATIO_EQ; ++ ++ tune_tecr0(inst); ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ inst->ld_status |= COE_UPDATED << COP1_SHIFT | ++ COE_UPDATED << COZ_SHIFT | ++ COE_UPDATED << COM1_SHIFT; ++ ld_coe_status(inst); ++} ++ ++static void train_rx(struct fsl_xgkr_inst *inst) ++{ ++ struct phy_device *phydev = inst->phydev; ++ int request, old_ld_status; ++ ++ /* get request from LP */ ++ request = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU) & ++ (LD_ALL_MASK); ++ old_ld_status = inst->ld_status; ++ ++ /* IEEE802.3-2008, 72.6.10.2.5 ++ * Ensure we always go to NOT UDPATED for status reporting in ++ * response to HOLD requests. ++ * IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * ... but only if PRESET/INITIALIZE are not active to ensure ++ * we keep status until they are released! ++ */ ++ if (!(request & (PRESET_MASK | INIT_MASK))) { ++ if (!(request & COP1_MASK)) ++ inst->ld_status &= ~COP1_MASK; ++ ++ if (!(request & COZ_MASK)) ++ inst->ld_status &= ~COZ_MASK; ++ ++ if (!(request & COM1_MASK)) ++ inst->ld_status &= ~COM1_MASK; ++ ++ if (old_ld_status != inst->ld_status) ++ ld_coe_status(inst); ++ ++ } ++ ++ /* As soon as the LP shows ready, no need to do any more updates. */ ++ if (check_rx(phydev)) { ++ /* LP receiver is ready */ ++ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) { ++ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); ++ ld_coe_status(inst); ++ } ++ } else { ++ /* IEEE802.3-2008, 72.6.10.2.3.1/2 ++ * only act on PRESET/INITIALIZE if all status is NOT UPDATED. ++ */ ++ if (request & (PRESET_MASK | INIT_MASK)) { ++ if (!(inst->ld_status & ++ (COP1_MASK | COZ_MASK | COM1_MASK))) { ++ if (request & PRESET_MASK) ++ preset(inst); ++ ++ if (request & INIT_MASK) ++ initialize(inst); ++ } ++ } ++ ++ /* LP Coefficient are not in HOLD */ ++ if (request & REQUEST_MASK) ++ check_request(inst, request & REQUEST_MASK); ++ } ++} ++ ++static void xgkr_wq_state_machine(struct work_struct *work) ++{ ++ struct fsl_xgkr_wk *wk = container_of(work, ++ struct fsl_xgkr_wk, xgkr_wk); ++ struct fsl_xgkr_inst *inst = wk->xgkr_inst; ++ struct training_state_machine *s_m = &inst->t_s_m; ++ struct phy_device *phydev = inst->phydev; ++ int val = 0, i; ++ int an_state, lt_state; ++ unsigned long dead_line; ++ int rx_ok, tx_ok; ++ ++ if (s_m->link_up) { ++ /* check abnormal link down events when link is up, for ex. ++ * the cable is pulled out or link partner is down. ++ */ ++ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ if (!(an_state & XFI_AN_LNK_STAT_UP)) { ++ dev_info(&phydev->dev, ++ "Detect hotplug, restart training!\n"); ++ init_inst(inst, 1); ++ start_an(phydev); ++ } ++ s_m->running = false; ++ return; ++ } ++ ++ if (!s_m->an_ok) { ++ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_BP_STATUS); ++ if (!(an_state & AN_10GKR_MASK)) { ++ s_m->running = false; ++ return; ++ } else ++ s_m->an_ok = true; ++ } ++ ++ dev_info(&phydev->dev, "is training.\n"); ++ ++ start_lt(phydev); ++ for (i = 0; i < 2;) { ++ /* i < 1 also works, but start one more try immediately when ++ * failed can adjust our training frequency to match other ++ * devices. This can help the link being established more ++ * quickly. ++ */ ++ dead_line = jiffies + msecs_to_jiffies(500); ++ while (time_before(jiffies, dead_line)) { ++ val = phy_read_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS); ++ if (val & TRAIN_FAIL) { ++ /* LT failed already, reset lane to avoid ++ * it run into hanging, then start LT again. ++ */ ++ reset_gcr0(inst); ++ start_lt(phydev); ++ } else if (val & PMD_STATUS_SUP_STAT && ++ val & PMD_STATUS_FRAME_LOCK) ++ break; ++ usleep_range(100, 500); ++ } ++ ++ if (!(val & PMD_STATUS_FRAME_LOCK && ++ val & PMD_STATUS_SUP_STAT)) { ++ i++; ++ continue; ++ } ++ ++ /* init process */ ++ rx_ok = tx_ok = false; ++ /* the LT should be finished in 500ms, failed or OK. */ ++ dead_line = jiffies + msecs_to_jiffies(500); ++ ++ while (time_before(jiffies, dead_line)) { ++ /* check if the LT is already failed */ ++ lt_state = phy_read_mmd(phydev, FSL_XFI_PMD, ++ FSL_XFI_KR_PMD_STATUS); ++ if (lt_state & TRAIN_FAIL) { ++ reset_gcr0(inst); ++ break; ++ } ++ ++ rx_ok = check_rx(phydev); ++ tx_ok = s_m->tx_complete; ++ ++ if (rx_ok && tx_ok) ++ break; ++ ++ if (!rx_ok) ++ train_rx(inst); ++ ++ if (!tx_ok) ++ train_tx(inst); ++ usleep_range(100, 500); ++ } ++ ++ i++; ++ /* check LT result */ ++ if (is_link_training_fail(phydev)) { ++ /* reset state machine */ ++ init_inst(inst, 0); ++ continue; ++ } else { ++ stop_lt(phydev); ++ s_m->running = false; ++ s_m->link_up = true; ++ dev_info(&phydev->dev, "LT training is SUCCEEDED!\n"); ++ break; ++ } ++ } ++ ++ if (!s_m->link_up) { ++ /* reset state machine */ ++ init_inst(inst, 0); ++ } ++} ++ ++static void xgkr_timer_handle(unsigned long arg) ++{ ++ struct list_head *pos; ++ struct fsl_xgkr_wk *wk; ++ struct fsl_xgkr_inst *xgkr_inst; ++ struct phy_device *phydev; ++ struct training_state_machine *s_m; ++ ++ list_for_each(pos, &fsl_xgkr_list) { ++ wk = list_entry(pos, struct fsl_xgkr_wk, xgkr_list); ++ xgkr_inst = wk->xgkr_inst; ++ phydev = xgkr_inst->phydev; ++ s_m = &xgkr_inst->t_s_m; ++ ++ if (!s_m->running && (!s_m->an_ok || s_m->link_up)) { ++ s_m->running = true; ++ queue_work(xgkr_wq, (struct work_struct *)wk); ++ } ++ } ++ ++ if (!list_empty(&fsl_xgkr_list)) ++ mod_timer(&xgkr_timer, ++ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); ++} ++ ++static int fsl_xgkr_bind_serdes(const char *lane_name, ++ struct phy_device *phydev) ++{ ++ unsigned long serdes_base; ++ unsigned long lane_base; ++ int i; ++ ++ for (i = 0; i < SERDES_MAX; i++) { ++ if (strstr(lane_name, s_map[i].serdes_name)) { ++ serdes_base = s_map[i].serdes_base; ++ break; ++ } ++ } ++ ++ if (i == SERDES_MAX) ++ goto serdes_err; ++ ++ for (i = 0; i < LANE_MAX; i++) { ++ if (strstr(lane_name, l_map[i].lane_name)) { ++ lane_base = l_map[i].lane_base; ++ break; ++ } ++ } ++ ++ if (i == LANE_MAX) ++ goto lane_err; ++ ++ phydev->priv = ioremap(serdes_base + lane_base, ++ sizeof(struct per_lane_ctrl_status)); ++ if (!phydev->priv) ++ return -ENOMEM; ++ ++ return 0; ++ ++serdes_err: ++ dev_err(&phydev->dev, "Unknown SerDes name"); ++ return -EINVAL; ++lane_err: ++ dev_err(&phydev->dev, "Unknown Lane name"); ++ return -EINVAL; ++} ++ ++static int fsl_xgkr_probe(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_inst *xgkr_inst; ++ struct fsl_xgkr_wk *xgkr_wk; ++ struct device_node *child; ++ const char *lane_name; ++ int len; ++ ++ child = phydev->dev.of_node; ++ ++ /* if there is lane-instance property, 10G-KR need to run */ ++ lane_name = of_get_property(child, "lane-instance", &len); ++ if (!lane_name || (fsl_xgkr_bind_serdes(lane_name, phydev))) ++ return 0; ++ ++ xgkr_inst = kzalloc(sizeof(struct fsl_xgkr_inst), GFP_KERNEL); ++ if (!xgkr_inst) ++ goto mem_err1; ++ ++ xgkr_inst->reg_base = phydev->priv; ++ ++ xgkr_inst->bus = phydev->bus; ++ ++ xgkr_inst->phydev = phydev; ++ ++ init_inst(xgkr_inst, 1); ++ ++ xgkr_wk = kzalloc(sizeof(struct fsl_xgkr_wk), GFP_KERNEL); ++ if (!xgkr_wk) ++ goto mem_err2; ++ ++ xgkr_wk->xgkr_inst = xgkr_inst; ++ phydev->priv = xgkr_wk; ++ ++ list_add(&xgkr_wk->xgkr_list, &fsl_xgkr_list); ++ ++ if (!fire_timer) { ++ setup_timer(&xgkr_timer, xgkr_timer_handle, ++ (unsigned long)&fsl_xgkr_list); ++ mod_timer(&xgkr_timer, ++ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); ++ fire_timer = 1; ++ xgkr_wq = create_workqueue("fsl_xgkr"); ++ } ++ INIT_WORK((struct work_struct *)xgkr_wk, xgkr_wq_state_machine); ++ ++ /* start auto-negotiation to detect link partner */ ++ start_an(phydev); ++ ++ return 0; ++mem_err2: ++ kfree(xgkr_inst); ++mem_err1: ++ dev_err(&phydev->dev, "failed to allocate memory!\n"); ++ return -ENOMEM; ++} ++ ++static int fsl_xgkr_config_init(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int fsl_xgkr_config_aneg(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static void fsl_xgkr_remove(struct phy_device *phydev) ++{ ++ struct fsl_xgkr_wk *wk = (struct fsl_xgkr_wk *)phydev->priv; ++ struct fsl_xgkr_inst *xgkr_inst = wk->xgkr_inst; ++ struct list_head *this, *next; ++ struct fsl_xgkr_wk *tmp; ++ ++ list_for_each_safe(this, next, &fsl_xgkr_list) { ++ tmp = list_entry(this, struct fsl_xgkr_wk, xgkr_list); ++ if (tmp == wk) { ++ cancel_work_sync((struct work_struct *)wk); ++ list_del(this); ++ } ++ } ++ ++ if (list_empty(&fsl_xgkr_list)) ++ del_timer(&xgkr_timer); ++ ++ if (xgkr_inst->reg_base) ++ iounmap(xgkr_inst->reg_base); ++ ++ kfree(xgkr_inst); ++ kfree(wk); ++} ++ ++static int fsl_xgkr_read_status(struct phy_device *phydev) ++{ ++ int val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); ++ ++ phydev->speed = SPEED_10000; ++ phydev->duplex = 1; ++ ++ if (val & XFI_AN_LNK_STAT_UP) ++ phydev->link = 1; ++ else ++ phydev->link = 0; ++ ++ return 0; ++} ++ ++static int fsl_xgkr_match_phy_device(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID; ++} ++ ++static int fsl_xgkr_match_phy_device2(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID2; ++} ++ ++static struct phy_driver fsl_xgkr_driver[] = { ++ { ++ .phy_id = FSL_XFI_PCS_PHY_ID, ++ .name = "Freescale 10G KR Rev1", ++ .phy_id_mask = 0xffffffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = fsl_xgkr_probe, ++ .config_init = &fsl_xgkr_config_init, ++ .config_aneg = &fsl_xgkr_config_aneg, ++ .read_status = &fsl_xgkr_read_status, ++ .match_phy_device = fsl_xgkr_match_phy_device, ++ .remove = fsl_xgkr_remove, ++ .driver = { .owner = THIS_MODULE,}, ++ }, ++ { ++ .phy_id = FSL_XFI_PCS_PHY_ID2, ++ .name = "Freescale 10G KR Rev2", ++ .phy_id_mask = 0xffffffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = fsl_xgkr_probe, ++ .config_init = &fsl_xgkr_config_init, ++ .config_aneg = &fsl_xgkr_config_aneg, ++ .read_status = &fsl_xgkr_read_status, ++ .match_phy_device = fsl_xgkr_match_phy_device2, ++ .remove = fsl_xgkr_remove, ++ .driver = { .owner = THIS_MODULE,}, ++ }, ++}; ++ ++static int __init fsl_xgkr_init(void) ++{ ++ return phy_drivers_register(fsl_xgkr_driver, ++ ARRAY_SIZE(fsl_xgkr_driver)); ++} ++ ++static void __exit fsl_xgkr_exit(void) ++{ ++ phy_drivers_unregister(fsl_xgkr_driver, ++ ARRAY_SIZE(fsl_xgkr_driver)); ++} ++ ++module_init(fsl_xgkr_init); ++module_exit(fsl_xgkr_exit); ++ ++static struct mdio_device_id __maybe_unused freescale_tbl[] = { ++ { FSL_XFI_PCS_PHY_ID, 0xffffffff }, ++ { FSL_XFI_PCS_PHY_ID2, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, freescale_tbl); +diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c +new file mode 100644 +index 0000000..91e1bec +--- /dev/null ++++ b/drivers/net/phy/teranetics.c +@@ -0,0 +1,135 @@ ++/* ++ * Driver for Teranetics PHY ++ * ++ * Author: Shaohui Xie ++ * ++ * Copyright 2015 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public License ++ * version 2. This program is licensed "as is" without any warranty of any ++ * kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++MODULE_DESCRIPTION("Teranetics PHY driver"); ++MODULE_AUTHOR("Shaohui Xie "); ++MODULE_LICENSE("GPL v2"); ++ ++#define PHY_ID_TN2020 0x00a19410 ++#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 ++#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 ++#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 ++#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 ++#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 ++ ++#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ ++ MDIO_PHYXS_LNSTAT_SYNC1 | \ ++ MDIO_PHYXS_LNSTAT_SYNC2 | \ ++ MDIO_PHYXS_LNSTAT_SYNC3 | \ ++ MDIO_PHYXS_LNSTAT_ALIGN) ++ ++static int teranetics_config_init(struct phy_device *phydev) ++{ ++ phydev->supported = SUPPORTED_10000baseT_Full; ++ phydev->advertising = SUPPORTED_10000baseT_Full; ++ ++ return 0; ++} ++ ++static int teranetics_soft_reset(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int teranetics_aneg_done(struct phy_device *phydev) ++{ ++ int reg; ++ ++ /* auto negotiation state can only be checked when using copper ++ * port, if using fiber port, just lie it's done. ++ */ ++ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); ++ } ++ ++ return 1; ++} ++ ++static int teranetics_config_aneg(struct phy_device *phydev) ++{ ++ return 0; ++} ++ ++static int teranetics_read_status(struct phy_device *phydev) ++{ ++ int reg; ++ ++ phydev->link = 1; ++ ++ phydev->speed = SPEED_10000; ++ phydev->duplex = DUPLEX_FULL; ++ ++ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { ++ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); ++ if (reg < 0 || ++ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { ++ phydev->link = 0; ++ return 0; ++ } ++ ++ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); ++ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) ++ phydev->link = 0; ++ } ++ ++ return 0; ++} ++ ++static int teranetics_match_phy_device(struct phy_device *phydev) ++{ ++ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; ++} ++ ++static struct phy_driver teranetics_driver[] = { ++{ ++ .phy_id = PHY_ID_TN2020, ++ .phy_id_mask = 0xffffffff, ++ .name = "Teranetics TN2020", ++ .soft_reset = teranetics_soft_reset, ++ .aneg_done = teranetics_aneg_done, ++ .config_init = teranetics_config_init, ++ .config_aneg = teranetics_config_aneg, ++ .read_status = teranetics_read_status, ++ .match_phy_device = teranetics_match_phy_device, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++}; ++ ++static int __init teranetics_init(void) ++{ ++ return phy_drivers_register(teranetics_driver, ++ ARRAY_SIZE(teranetics_driver)); ++} ++ ++static void __exit teranetics_exit(void) ++{ ++ return phy_drivers_unregister(teranetics_driver, ++ ARRAY_SIZE(teranetics_driver)); ++} ++ ++module_init(teranetics_init); ++module_exit(teranetics_exit); ++ ++static struct mdio_device_id __maybe_unused teranetics_tbl[] = { ++ { PHY_ID_TN2020, 0xffffffff }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(mdio, teranetics_tbl); +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 4690ae9..43ff2b5 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -108,4 +108,8 @@ source "drivers/staging/skein/Kconfig" + + source "drivers/staging/unisys/Kconfig" + ++source "drivers/staging/fsl-mc/Kconfig" ++ ++source "drivers/staging/fsl-dpaa2/Kconfig" ++ + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index c780a0e..a9bd303 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ + obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ + obj-$(CONFIG_CRYPTO_SKEIN) += skein/ + obj-$(CONFIG_UNISYSSPAR) += unisys/ ++obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ ++obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ +diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig +new file mode 100644 +index 0000000..3fe47bc +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Kconfig +@@ -0,0 +1,12 @@ ++# ++# Freescale device configuration ++# ++ ++config FSL_DPAA2 ++ bool "Freescale DPAA2 devices" ++ depends on FSL_MC_BUS ++ ---help--- ++ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. ++# TODO move DPIO driver in-here? ++source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" ++source "drivers/staging/fsl-dpaa2/mac/Kconfig" +diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile +new file mode 100644 +index 0000000..bc687a1 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/Makefile +@@ -0,0 +1,6 @@ ++# ++# Makefile for the Freescale network device drivers. ++# ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ ++obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/Kconfig b/drivers/staging/fsl-dpaa2/ethernet/Kconfig +new file mode 100644 +index 0000000..df91da2 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig +@@ -0,0 +1,36 @@ ++# ++# Freescale DPAA Ethernet driver configuration ++# ++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++menuconfig FSL_DPAA2_ETH ++ tristate "Freescale DPAA2 Ethernet" ++ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO ++ select FSL_DPAA2_MAC ++ default y ++ ---help--- ++ Freescale Data Path Acceleration Architecture Ethernet ++ driver, using the Freescale MC bus driver. ++ ++if FSL_DPAA2_ETH ++ ++config FSL_DPAA2_ETH_USE_ERR_QUEUE ++ bool "Enable Rx error queue" ++ default n ++ ---help--- ++ Allow Rx error frames to be enqueued on an error queue ++ and processed by the driver (by default they are dropped ++ in hardware). ++ This may impact performance, recommended for debugging ++ purposes only. ++ ++config FSL_DPAA2_ETH_DEBUGFS ++ depends on DEBUG_FS && FSL_QBMAN_DEBUG ++ bool "Enable debugfs support" ++ default n ++ ---help--- ++ Enable advanced statistics through debugfs interface. ++endif +diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile +new file mode 100644 +index 0000000..74bff15 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile +@@ -0,0 +1,21 @@ ++# ++# Makefile for the Freescale DPAA Ethernet controllers ++# ++# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++ccflags-y += -DVERSION=\"\" ++ ++obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o ++ ++fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o ++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o ++ ++#Needed by the tracing framework ++CFLAGS_dpaa2-eth.o := -I$(src) ++ ++ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y) ++ GCOV_PROFILE := y ++endif +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c +new file mode 100644 +index 0000000..c397983 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c +@@ -0,0 +1,317 @@ ++ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "dpaa2-eth.h" ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" ++ ++static struct dentry *dpaa2_dbg_root; ++ ++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct rtnl_link_stats64 *stats; ++ struct dpaa2_eth_drv_stats *extras; ++ int i; ++ ++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", ++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", ++ "Tx SG", "Enq busy"); ++ ++ for_each_online_cpu(i) { ++ stats = per_cpu_ptr(priv->percpu_stats, i); ++ extras = per_cpu_ptr(priv->percpu_extras, i); ++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", ++ i, ++ stats->rx_packets, ++ stats->rx_errors, ++ extras->rx_sg_frames, ++ stats->tx_packets, ++ stats->tx_errors, ++ extras->tx_conf_frames, ++ extras->tx_sg_frames, ++ extras->tx_portal_busy); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_cpu_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_cpu_ops = { ++ .open = dpaa2_dbg_cpu_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static char *fq_type_to_str(struct dpaa2_eth_fq *fq) ++{ ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ return "Rx"; ++ case DPAA2_TX_CONF_FQ: ++ return "Tx conf"; ++ case DPAA2_RX_ERR_FQ: ++ return "Rx err"; ++ default: ++ return "N/A"; ++ } ++} ++ ++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_fq *fq; ++ u32 fcnt, bcnt; ++ int i, err; ++ ++ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s\n", ++ "VFQID", "CPU", "Type", "Frames", "Pending frames"); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); ++ if (err) ++ fcnt = 0; ++ ++ seq_printf(file, "%5d%16d%16s%16llu%16u\n", ++ fq->fqid, ++ fq->target_cpu, ++ fq_type_to_str(fq), ++ fq->stats.frames, ++ fcnt); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_fqs_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_fq_ops = { ++ .open = dpaa2_dbg_fqs_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", ++ "CHID", "CPU", "Deq busy", "Frames", "CDANs", ++ "Avg frm/CDAN"); ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", ++ ch->ch_id, ++ ch->nctx.desired_cpu, ++ ch->stats.dequeue_portal_busy, ++ ch->stats.frames, ++ ch->stats.cdan, ++ ch->stats.frames / ch->stats.cdan); ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) ++{ ++ int err; ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; ++ ++ err = single_open(file, dpaa2_dbg_ch_show, priv); ++ if (err < 0) ++ netdev_err(priv->net_dev, "single_open() failed\n"); ++ ++ return err; ++} ++ ++static const struct file_operations dpaa2_dbg_ch_ops = { ++ .open = dpaa2_dbg_ch_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *offset) ++{ ++ struct dpaa2_eth_priv *priv = file->private_data; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct dpaa2_eth_fq *fq; ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for_each_online_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ memset(percpu_stats, 0, sizeof(*percpu_stats)); ++ ++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); ++ memset(percpu_extras, 0, sizeof(*percpu_extras)); ++ } ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ memset(&fq->stats, 0, sizeof(fq->stats)); ++ } ++ ++ for_each_cpu(i, &priv->dpio_cpumask) { ++ ch = priv->channel[i]; ++ memset(&ch->stats, 0, sizeof(ch->stats)); ++ } ++ ++ return count; ++} ++ ++static const struct file_operations dpaa2_dbg_reset_ops = { ++ .open = simple_open, ++ .write = dpaa2_dbg_reset_write, ++}; ++ ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) ++{ ++ if (!dpaa2_dbg_root) ++ return; ++ ++ /* Create a directory for the interface */ ++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, ++ dpaa2_dbg_root); ++ if (!priv->dbg.dir) { ++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); ++ return; ++ } ++ ++ /* per-cpu stats file */ ++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_cpu_ops); ++ if (!priv->dbg.cpu_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_cpu_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_fq_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_fq_stats; ++ } ++ ++ /* per-fq stats file */ ++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_ch_ops); ++ if (!priv->dbg.fq_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_ch_stats; ++ } ++ ++ /* reset stats */ ++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR, ++ priv->dbg.dir, priv, ++ &dpaa2_dbg_reset_ops); ++ if (!priv->dbg.reset_stats) { ++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); ++ goto err_reset_stats; ++ } ++ ++ return; ++ ++err_reset_stats: ++ debugfs_remove(priv->dbg.ch_stats); ++err_ch_stats: ++ debugfs_remove(priv->dbg.fq_stats); ++err_fq_stats: ++ debugfs_remove(priv->dbg.cpu_stats); ++err_cpu_stats: ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) ++{ ++ debugfs_remove(priv->dbg.reset_stats); ++ debugfs_remove(priv->dbg.fq_stats); ++ debugfs_remove(priv->dbg.ch_stats); ++ debugfs_remove(priv->dbg.cpu_stats); ++ debugfs_remove(priv->dbg.dir); ++} ++ ++void dpaa2_eth_dbg_init(void) ++{ ++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); ++ if (!dpaa2_dbg_root) { ++ pr_err("DPAA2-ETH: debugfs create failed\n"); ++ return; ++ } ++ ++ pr_info("DPAA2-ETH: debugfs created\n"); ++} ++ ++void __exit dpaa2_eth_dbg_exit(void) ++{ ++ debugfs_remove(dpaa2_dbg_root); ++} ++ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h +new file mode 100644 +index 0000000..7ba706c +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h +@@ -0,0 +1,61 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPAA2_ETH_DEBUGFS_H ++#define DPAA2_ETH_DEBUGFS_H ++ ++#include ++#include "dpaa2-eth.h" ++ ++extern struct dpaa2_eth_priv *priv; ++ ++struct dpaa2_debugfs { ++ struct dentry *dir; ++ struct dentry *fq_stats; ++ struct dentry *ch_stats; ++ struct dentry *cpu_stats; ++ struct dentry *reset_stats; ++}; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++void dpaa2_eth_dbg_init(void); ++void dpaa2_eth_dbg_exit(void); ++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); ++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); ++#else ++static inline void dpaa2_eth_dbg_init(void) {} ++static inline void dpaa2_eth_dbg_exit(void) {} ++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} ++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} ++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ ++ ++#endif /* DPAA2_ETH_DEBUGFS_H */ ++ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +new file mode 100644 +index 0000000..3b040e8 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h +@@ -0,0 +1,185 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM dpaa2_eth ++ ++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _DPAA2_ETH_TRACE_H ++ ++#include ++#include ++#include "dpaa2-eth.h" ++#include ++ ++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" ++/* trace_printk format for raw buffer event class */ ++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" ++ ++/* This is used to declare a class of events. ++ * individual events of this type will be defined below. ++ */ ++ ++/* Store details about a frame descriptor */ ++DECLARE_EVENT_CLASS(dpaa2_eth_fd, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, fd), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(u64, fd_addr) ++ __field(u32, fd_len) ++ __field(u16, fd_offset) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->fd_addr = dpaa2_fd_get_addr(fd); ++ __entry->fd_len = dpaa2_fd_get_len(fd); ++ __entry->fd_offset = dpaa2_fd_get_offset(fd); ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_FMT, ++ __get_str(name), ++ __entry->fd_addr, ++ __entry->fd_len, ++ __entry->fd_offset) ++); ++ ++/* Now declare events of the above type. Format is: ++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class ++ */ ++ ++/* Tx (egress) fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Rx fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Tx confirmation fd */ ++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ++ TP_PROTO(struct net_device *netdev, ++ const struct dpaa2_fd *fd), ++ ++ TP_ARGS(netdev, fd) ++); ++ ++/* Log data about raw buffers. Useful for tracing DPBP content. */ ++TRACE_EVENT(dpaa2_eth_buf_seed, ++ /* Trace function prototype */ ++ TP_PROTO(struct net_device *netdev, ++ /* virtual address and size */ ++ void *vaddr, ++ size_t size, ++ /* dma map address and size */ ++ dma_addr_t dma_addr, ++ size_t map_size, ++ /* buffer pool id, if relevant */ ++ u16 bpid), ++ ++ /* Repeat argument list here */ ++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), ++ ++ /* A structure containing the relevant information we want ++ * to record. Declare name and type for each normal element, ++ * name, type and size for arrays. Use __string for variable ++ * length strings. ++ */ ++ TP_STRUCT__entry( ++ __field(void *, vaddr) ++ __field(size_t, size) ++ __field(dma_addr_t, dma_addr) ++ __field(size_t, map_size) ++ __field(u16, bpid) ++ __string(name, netdev->name) ++ ), ++ ++ /* The function that assigns values to the above declared ++ * fields ++ */ ++ TP_fast_assign( ++ __entry->vaddr = vaddr; ++ __entry->size = size; ++ __entry->dma_addr = dma_addr; ++ __entry->map_size = map_size; ++ __entry->bpid = bpid; ++ __assign_str(name, netdev->name); ++ ), ++ ++ /* This is what gets printed when the trace event is ++ * triggered. ++ */ ++ TP_printk(TR_BUF_FMT, ++ __get_str(name), ++ __entry->vaddr, ++ __entry->size, ++ &__entry->dma_addr, ++ __entry->map_size, ++ __entry->bpid) ++); ++ ++/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). ++ * The syntax is the same as for DECLARE_EVENT_CLASS(). ++ */ ++ ++#endif /* _DPAA2_ETH_TRACE_H */ ++ ++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE dpaa2-eth-trace ++#include +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +new file mode 100644 +index 0000000..27d1a91 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +@@ -0,0 +1,2836 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++#include "dpaa2-eth.h" ++ ++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files ++ * using trace events only need to #include ++ */ ++#define CREATE_TRACE_POINTS ++#include "dpaa2-eth-trace.h" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); ++ ++/* Oldest DPAA2 objects version we are compatible with */ ++#define DPAA2_SUPPORTED_DPNI_VERSION 6 ++#define DPAA2_SUPPORTED_DPBP_VERSION 2 ++#define DPAA2_SUPPORTED_DPCON_VERSION 2 ++ ++static void validate_rx_csum(struct dpaa2_eth_priv *priv, ++ u32 fd_status, ++ struct sk_buff *skb) ++{ ++ skb_checksum_none_assert(skb); ++ ++ /* HW checksum validation is disabled, nothing to do here */ ++ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) ++ return; ++ ++ /* Read checksum validation bits */ ++ if (!((fd_status & DPAA2_FAS_L3CV) && ++ (fd_status & DPAA2_FAS_L4CV))) ++ return; ++ ++ /* Inform the stack there's no need to compute L3/L4 csum anymore */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++} ++ ++/* Free a received FD. ++ * Not to be used for Tx conf FDs or on any other paths. ++ */ ++static void free_rx_fd(struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ void *vaddr) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ struct dpaa2_sg_entry *sgt; ++ void *sg_vaddr; ++ int i; ++ ++ /* If single buffer frame, just free the data buffer */ ++ if (fd_format == dpaa2_fd_single) ++ goto free_buf; ++ ++ /* For S/G frames, we first need to free all SG entries */ ++ sgt = vaddr + dpaa2_fd_get_offset(fd); ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ dpaa2_sg_le_to_cpu(&sgt[i]); ++ ++ addr = dpaa2_sg_get_addr(&sgt[i]); ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ sg_vaddr = phys_to_virt(addr); ++ put_page(virt_to_head_page(sg_vaddr)); ++ ++ if (dpaa2_sg_is_final(&sgt[i])) ++ break; ++ } ++ ++free_buf: ++ put_page(virt_to_head_page(vaddr)); ++} ++ ++/* Build a linear skb based on a single-buffer frame descriptor */ ++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ void *fd_vaddr) ++{ ++ struct sk_buff *skb = NULL; ++ u16 fd_offset = dpaa2_fd_get_offset(fd); ++ u32 fd_length = dpaa2_fd_get_len(fd); ++ ++ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ skb_reserve(skb, fd_offset); ++ skb_put(skb, fd_length); ++ ++ ch->buf_count--; ++ ++ return skb; ++} ++ ++/* Build a non linear (fragmented) skb based on a S/G table */ ++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ struct dpaa2_sg_entry *sgt) ++{ ++ struct sk_buff *skb = NULL; ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sg_vaddr; ++ dma_addr_t sg_addr; ++ u16 sg_offset; ++ u32 sg_length; ++ struct page *page, *head_page; ++ int page_offset; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { ++ struct dpaa2_sg_entry *sge = &sgt[i]; ++ ++ dpaa2_sg_le_to_cpu(sge); ++ ++ /* NOTE: We only support SG entries in dpaa2_sg_single format, ++ * but this is the only format we may receive from HW anyway ++ */ ++ ++ /* Get the address and length from the S/G entry */ ++ sg_addr = dpaa2_sg_get_addr(sge); ++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ ++ sg_vaddr = phys_to_virt(sg_addr); ++ sg_length = dpaa2_sg_get_len(sge); ++ ++ if (i == 0) { ++ /* We build the skb around the first data buffer */ ++ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ sg_offset = dpaa2_sg_get_offset(sge); ++ skb_reserve(skb, sg_offset); ++ skb_put(skb, sg_length); ++ } else { ++ /* Rest of the data buffers are stored as skb frags */ ++ page = virt_to_page(sg_vaddr); ++ head_page = virt_to_head_page(sg_vaddr); ++ ++ /* Offset in page (which may be compound). ++ * Data in subsequent SG entries is stored from the ++ * beginning of the buffer, so we don't need to add the ++ * sg_offset. ++ */ ++ page_offset = ((unsigned long)sg_vaddr & ++ (PAGE_SIZE - 1)) + ++ (page_address(page) - page_address(head_page)); ++ ++ skb_add_rx_frag(skb, i - 1, head_page, page_offset, ++ sg_length, DPAA2_ETH_RX_BUF_SIZE); ++ } ++ ++ if (dpaa2_sg_is_final(sge)) ++ break; ++ } ++ ++ /* Count all data buffers + SG table buffer */ ++ ch->buf_count -= i + 2; ++ ++ return skb; ++} ++ ++/* Main Rx frame processing routine */ ++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi) ++{ ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ u8 fd_format = dpaa2_fd_get_format(fd); ++ void *vaddr; ++ struct sk_buff *skb; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpaa2_fas *fas; ++ u32 status = 0; ++ ++ /* Tracing point */ ++ trace_dpaa2_rx_fd(priv->net_dev, fd); ++ ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(addr); ++ ++ prefetch(vaddr + priv->buf_layout.private_data_size); ++ prefetch(vaddr + dpaa2_fd_get_offset(fd)); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ if (fd_format == dpaa2_fd_single) { ++ skb = build_linear_skb(priv, ch, fd, vaddr); ++ } else if (fd_format == dpaa2_fd_sg) { ++ struct dpaa2_sg_entry *sgt = ++ vaddr + dpaa2_fd_get_offset(fd); ++ skb = build_frag_skb(priv, ch, sgt); ++ put_page(virt_to_head_page(vaddr)); ++ percpu_extras->rx_sg_frames++; ++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); ++ } else { ++ /* We don't support any other format */ ++ goto err_frame_format; ++ } ++ ++ if (unlikely(!skb)) ++ goto err_build_skb; ++ ++ prefetch(skb->data); ++ ++ /* Get the timestamp value */ ++ if (priv->ts_rx_en) { ++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); ++ u64 *ns = (u64 *)(vaddr + ++ priv->buf_layout.private_data_size + ++ sizeof(struct dpaa2_fas)); ++ ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); ++ shhwtstamps->hwtstamp = ns_to_ktime(*ns); ++ } ++ ++ /* Check if we need to validate the L4 csum */ ++ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) { ++ fas = (struct dpaa2_fas *) ++ (vaddr + priv->buf_layout.private_data_size); ++ status = le32_to_cpu(fas->status); ++ validate_rx_csum(priv, status, skb); ++ } ++ ++ skb->protocol = eth_type_trans(skb, priv->net_dev); ++ ++ percpu_stats->rx_packets++; ++ percpu_stats->rx_bytes += skb->len; ++ ++ if (priv->net_dev->features & NETIF_F_GRO) ++ napi_gro_receive(napi, skb); ++ else ++ netif_receive_skb(skb); ++ ++ return; ++err_frame_format: ++err_build_skb: ++ free_rx_fd(priv, fd, vaddr); ++ percpu_stats->rx_dropped++; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++/* Processing of Rx frames received on the error FQ ++ * We check and print the error bits and then free the frame ++ */ ++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t addr = dpaa2_fd_get_addr(fd); ++ void *vaddr; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_fas *fas; ++ u32 status = 0; ++ ++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(addr); ++ ++ if (fd->simple.frc & DPAA2_FD_FRC_FASV) { ++ fas = (struct dpaa2_fas *) ++ (vaddr + priv->buf_layout.private_data_size); ++ status = le32_to_cpu(fas->status); ++ if (net_ratelimit()) ++ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n", ++ status & DPAA2_ETH_RX_ERR_MASK); ++ } ++ free_rx_fd(priv, fd, vaddr); ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_stats->rx_errors++; ++} ++#endif ++ ++/* Consume all frames pull-dequeued into the store. This is the simplest way to ++ * make sure we don't accidentally issue another volatile dequeue which would ++ * overwrite (leak) frames already in the store. ++ * ++ * Observance of NAPI budget is not our concern, leaving that to the caller. ++ */ ++static int consume_frames(struct dpaa2_eth_channel *ch) ++{ ++ struct dpaa2_eth_priv *priv = ch->priv; ++ struct dpaa2_eth_fq *fq; ++ struct dpaa2_dq *dq; ++ const struct dpaa2_fd *fd; ++ int cleaned = 0; ++ int is_last; ++ ++ do { ++ dq = dpaa2_io_store_next(ch->store, &is_last); ++ if (unlikely(!dq)) { ++ /* If we're here, we *must* have placed a ++ * volatile dequeue comnmand, so keep reading through ++ * the store until we get some sort of valid response ++ * token (either a valid frame or an "empty dequeue") ++ */ ++ continue; ++ } ++ ++ fd = dpaa2_dq_fd(dq); ++ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); ++ fq->stats.frames++; ++ ++ fq->consume(priv, ch, fd, &ch->napi); ++ cleaned++; ++ } while (!is_last); ++ ++ return cleaned; ++} ++ ++/* Create a frame descriptor based on a fragmented skb */ ++static int build_sg_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ void *sgt_buf = NULL; ++ dma_addr_t addr; ++ int nr_frags = skb_shinfo(skb)->nr_frags; ++ struct dpaa2_sg_entry *sgt; ++ int i, j, err; ++ int sgt_buf_size; ++ struct scatterlist *scl, *crt_scl; ++ int num_sg; ++ int num_dma_bufs; ++ struct dpaa2_eth_swa *swa; ++ ++ /* Create and map scatterlist. ++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have ++ * to go beyond nr_frags+1. ++ * Note: We don't support chained scatterlists ++ */ ++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) ++ return -EINVAL; ++ ++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); ++ if (unlikely(!scl)) ++ return -ENOMEM; ++ ++ sg_init_table(scl, nr_frags + 1); ++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); ++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ if (unlikely(!num_dma_bufs)) { ++ err = -ENOMEM; ++ goto dma_map_sg_failed; ++ } ++ ++ /* Prepare the HW SGT structure */ ++ sgt_buf_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); ++ if (unlikely(!sgt_buf)) { ++ err = -ENOMEM; ++ goto sgt_buf_alloc_failed; ++ } ++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field here. ++ */ ++ memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); ++ ++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); ++ ++ /* Fill in the HW SGT structure. ++ * ++ * sgt_buf is zeroed out, so the following fields are implicit ++ * in all sgt entries: ++ * - offset is 0 ++ * - format is 'dpaa2_sg_single' ++ */ ++ for_each_sg(scl, crt_scl, num_dma_bufs, i) { ++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); ++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); ++ } ++ dpaa2_sg_set_final(&sgt[i - 1], true); ++ ++ /* Store the skb backpointer in the SGT buffer. ++ * Fit the scatterlist and the number of buffers alongside the ++ * skb backpointer in the SWA. We'll need all of them on Tx Conf. ++ */ ++ swa = (struct dpaa2_eth_swa *)sgt_buf; ++ swa->skb = skb; ++ swa->scl = scl; ++ swa->num_sg = num_sg; ++ swa->num_dma_bufs = num_dma_bufs; ++ ++ /* Hardware expects the SG table to be in little endian format */ ++ for (j = 0; j < i; j++) ++ dpaa2_sg_cpu_to_le(&sgt[j]); ++ ++ /* Separately map the SGT buffer */ ++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) { ++ err = -ENOMEM; ++ goto dma_map_single_failed; ++ } ++ dpaa2_fd_set_offset(fd, priv->tx_data_offset); ++ dpaa2_fd_set_format(fd, dpaa2_fd_sg); ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_len(fd, skb->len); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | ++ DPAA2_FD_CTRL_PTV1; ++ ++ return 0; ++ ++dma_map_single_failed: ++ kfree(sgt_buf); ++sgt_buf_alloc_failed: ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++dma_map_sg_failed: ++ kfree(scl); ++ return err; ++} ++ ++/* Create a frame descriptor based on a linear skb */ ++static int build_single_fd(struct dpaa2_eth_priv *priv, ++ struct sk_buff *skb, ++ struct dpaa2_fd *fd) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u8 *buffer_start; ++ struct sk_buff **skbh; ++ dma_addr_t addr; ++ ++ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - ++ DPAA2_ETH_TX_BUF_ALIGN, ++ DPAA2_ETH_TX_BUF_ALIGN); ++ ++ /* PTA from egress side is passed as is to the confirmation side so ++ * we need to clear some fields here in order to find consistent values ++ * on TX confirmation. We are clearing FAS (Frame Annotation Status) ++ * field here. ++ */ ++ memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); ++ ++ /* Store a backpointer to the skb at the beginning of the buffer ++ * (in the private data area) such that we can release it ++ * on Tx confirm ++ */ ++ skbh = (struct sk_buff **)buffer_start; ++ *skbh = skb; ++ ++ addr = dma_map_single(dev, buffer_start, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_TO_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ return -ENOMEM; ++ ++ dpaa2_fd_set_addr(fd, addr); ++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); ++ dpaa2_fd_set_len(fd, skb->len); ++ dpaa2_fd_set_format(fd, dpaa2_fd_single); ++ ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | ++ DPAA2_FD_CTRL_PTV1; ++ ++ return 0; ++} ++ ++/* FD freeing routine on the Tx path ++ * ++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb ++ * back-pointed to is also freed. ++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of ++ * dpaa2_eth_tx(). ++ * Optionally, return the frame annotation status word (FAS), which needs ++ * to be checked if we're on the confirmation path. ++ */ ++static void free_tx_fd(const struct dpaa2_eth_priv *priv, ++ const struct dpaa2_fd *fd, ++ u32 *status) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ dma_addr_t fd_addr; ++ struct sk_buff **skbh, *skb; ++ unsigned char *buffer_start; ++ int unmap_size; ++ struct scatterlist *scl; ++ int num_sg, num_dma_bufs; ++ struct dpaa2_eth_swa *swa; ++ bool fd_single; ++ struct dpaa2_fas *fas; ++ ++ fd_addr = dpaa2_fd_get_addr(fd); ++ skbh = phys_to_virt(fd_addr); ++ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single); ++ ++ if (fd_single) { ++ skb = *skbh; ++ buffer_start = (unsigned char *)skbh; ++ /* Accessing the skb buffer is safe before dma unmap, because ++ * we didn't map the actual skb shell. ++ */ ++ dma_unmap_single(dev, fd_addr, ++ skb_tail_pointer(skb) - buffer_start, ++ DMA_TO_DEVICE); ++ } else { ++ swa = (struct dpaa2_eth_swa *)skbh; ++ skb = swa->skb; ++ scl = swa->scl; ++ num_sg = swa->num_sg; ++ num_dma_bufs = swa->num_dma_bufs; ++ ++ /* Unmap the scatterlist */ ++ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); ++ kfree(scl); ++ ++ /* Unmap the SGT buffer */ ++ unmap_size = priv->tx_data_offset + ++ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); ++ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); ++ } ++ ++ /* Get the timestamp value */ ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { ++ struct skb_shared_hwtstamps shhwtstamps; ++ u64 *ns; ++ ++ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); ++ ++ ns = (u64 *)((void *)skbh + ++ priv->buf_layout.private_data_size + ++ sizeof(struct dpaa2_fas)); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ shhwtstamps.hwtstamp = ns_to_ktime(*ns); ++ skb_tstamp_tx(skb, &shhwtstamps); ++ } ++ ++ /* Read the status from the Frame Annotation after we unmap the first ++ * buffer but before we free it. The caller function is responsible ++ * for checking the status value. ++ */ ++ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { ++ fas = (struct dpaa2_fas *) ++ ((void *)skbh + priv->buf_layout.private_data_size); ++ *status = le32_to_cpu(fas->status); ++ } ++ ++ /* Free SGT buffer kmalloc'ed on tx */ ++ if (!fd_single) ++ kfree(skbh); ++ ++ /* Move on with skb release */ ++ dev_kfree_skb(skb); ++} ++ ++static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpaa2_fd fd; ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ u16 queue_mapping, flow_id; ++ int err, i; ++ ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ ++ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { ++ struct sk_buff *ns; ++ ++ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); ++ if (unlikely(!ns)) { ++ percpu_stats->tx_dropped++; ++ goto err_alloc_headroom; ++ } ++ dev_kfree_skb(skb); ++ skb = ns; ++ } ++ ++ /* We'll be holding a back-reference to the skb until Tx Confirmation; ++ * we don't want that overwritten by a concurrent Tx with a cloned skb. ++ */ ++ skb = skb_unshare(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) { ++ /* skb_unshare() has already freed the skb */ ++ percpu_stats->tx_dropped++; ++ return NETDEV_TX_OK; ++ } ++ ++ /* Setup the FD fields */ ++ memset(&fd, 0, sizeof(fd)); ++ ++ if (skb_is_nonlinear(skb)) { ++ err = build_sg_fd(priv, skb, &fd); ++ percpu_extras->tx_sg_frames++; ++ percpu_extras->tx_sg_bytes += skb->len; ++ } else { ++ err = build_single_fd(priv, skb, &fd); ++ } ++ ++ if (unlikely(err)) { ++ percpu_stats->tx_dropped++; ++ goto err_build_fd; ++ } ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_fd(net_dev, &fd); ++ ++ /* TxConf FQ selection primarily based on cpu affinity; this is ++ * non-migratable context, so it's safe to call smp_processor_id(). ++ */ ++ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; ++ flow_id = priv->fq[queue_mapping].flowid; ++ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { ++ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, ++ flow_id, &fd); ++ if (err != -EBUSY) ++ break; ++ } ++ percpu_extras->tx_portal_busy += i; ++ if (unlikely(err < 0)) { ++ percpu_stats->tx_errors++; ++ /* Clean up everything, including freeing the skb */ ++ free_tx_fd(priv, &fd, NULL); ++ } else { ++ percpu_stats->tx_packets++; ++ percpu_stats->tx_bytes += skb->len; ++ } ++ ++ return NETDEV_TX_OK; ++ ++err_build_fd: ++err_alloc_headroom: ++ dev_kfree_skb(skb); ++ ++ return NETDEV_TX_OK; ++} ++ ++/* Tx confirmation frame processing routine */ ++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ const struct dpaa2_fd *fd, ++ struct napi_struct *napi __always_unused) ++{ ++ struct rtnl_link_stats64 *percpu_stats; ++ struct dpaa2_eth_drv_stats *percpu_extras; ++ u32 status = 0; ++ ++ /* Tracing point */ ++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); ++ ++ percpu_extras = this_cpu_ptr(priv->percpu_extras); ++ percpu_extras->tx_conf_frames++; ++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); ++ ++ free_tx_fd(priv, fd, &status); ++ ++ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { ++ percpu_stats = this_cpu_ptr(priv->percpu_stats); ++ /* Tx-conf logically pertains to the egress path. */ ++ percpu_stats->tx_errors++; ++ } ++} ++ ++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ int err; ++ ++ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token, ++ enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_l3_chksum_validation() failed\n"); ++ return err; ++ } ++ ++ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token, ++ enable); ++ if (err) { ++ netdev_err(priv->net_dev, ++ "dpni_set_l4_chksum_validation failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) ++{ ++ struct dpaa2_eth_fq *fq; ++ struct dpni_tx_flow_cfg tx_flow_cfg; ++ int err; ++ int i; ++ ++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); ++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN | ++ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; ++ tx_flow_cfg.l3_chksum_gen = enable; ++ tx_flow_cfg.l4_chksum_gen = enable; ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ if (fq->type != DPAA2_TX_CONF_FQ) ++ continue; ++ ++ /* The Tx flowid is kept in the corresponding TxConf FQ. */ ++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, ++ &fq->flowid, &tx_flow_cfg); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n"); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++/* Perform a single release command to add buffers ++ * to the specified buffer pool ++ */ ++static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *buf; ++ dma_addr_t addr; ++ int i; ++ ++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { ++ /* Allocate buffer visible to WRIOP + skb shared info + ++ * alignment padding ++ */ ++ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); ++ if (unlikely(!buf)) ++ goto err_alloc; ++ ++ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); ++ ++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ if (unlikely(dma_mapping_error(dev, addr))) ++ goto err_map; ++ ++ buf_array[i] = addr; ++ ++ /* tracing point */ ++ trace_dpaa2_eth_buf_seed(priv->net_dev, ++ buf, DPAA2_ETH_BUF_RAW_SIZE, ++ addr, DPAA2_ETH_RX_BUF_SIZE, ++ bpid); ++ } ++ ++release_bufs: ++ /* In case the portal is busy, retry until successful. ++ * The buffer release function would only fail if the QBMan portal ++ * was busy, which implies portal contention (i.e. more CPUs than ++ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, ++ * there is little we can realistically do, short of giving up - ++ * in which case we'd risk depleting the buffer pool and never again ++ * receiving the Rx interrupt which would kick-start the refill logic. ++ * So just keep retrying, at the risk of being moved to ksoftirqd. ++ */ ++ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) ++ cpu_relax(); ++ return i; ++ ++err_map: ++ put_page(virt_to_head_page(buf)); ++err_alloc: ++ if (i) ++ goto release_bufs; ++ ++ return 0; ++} ++ ++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) ++{ ++ int i, j; ++ int new_count; ++ ++ /* This is the lazy seeding of Rx buffer pools. ++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls ++ * napi_alloc_frag(). The trouble with that is that it in turn ends up ++ * calling this_cpu_ptr(), which mandates execution in atomic context. ++ * Rather than splitting up the code, do a one-off preempt disable. ++ */ ++ preempt_disable(); ++ for (j = 0; j < priv->num_channels; j++) { ++ for (i = 0; i < DPAA2_ETH_NUM_BUFS; ++ i += DPAA2_ETH_BUFS_PER_CMD) { ++ new_count = add_bufs(priv, bpid); ++ priv->channel[j]->buf_count += new_count; ++ ++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { ++ preempt_enable(); ++ return -ENOMEM; ++ } ++ } ++ } ++ preempt_enable(); ++ ++ return 0; ++} ++ ++/** ++ * Drain the specified number of buffers from the DPNI's private buffer pool. ++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD ++ */ ++static void drain_bufs(struct dpaa2_eth_priv *priv, int count) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; ++ void *vaddr; ++ int ret, i; ++ ++ do { ++ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, ++ buf_array, count); ++ if (ret < 0) { ++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); ++ return; ++ } ++ for (i = 0; i < ret; i++) { ++ /* Same logic as on regular Rx path */ ++ dma_unmap_single(dev, buf_array[i], ++ DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ vaddr = phys_to_virt(buf_array[i]); ++ put_page(virt_to_head_page(vaddr)); ++ } ++ } while (ret); ++} ++ ++static void drain_pool(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); ++ drain_bufs(priv, 1); ++ ++ for (i = 0; i < priv->num_channels; i++) ++ priv->channel[i]->buf_count = 0; ++} ++ ++/* Function is called from softirq context only, so we don't need to guard ++ * the access to percpu count ++ */ ++static int refill_pool(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch, ++ u16 bpid) ++{ ++ int new_count; ++ ++ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) ++ return 0; ++ ++ do { ++ new_count = add_bufs(priv, bpid); ++ if (unlikely(!new_count)) { ++ /* Out of memory; abort for now, we'll try later on */ ++ break; ++ } ++ ch->buf_count += new_count; ++ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); ++ ++ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int pull_channel(struct dpaa2_eth_channel *ch) ++{ ++ int err; ++ int dequeues = -1; ++ ++ /* Retry while portal is busy */ ++ do { ++ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); ++ dequeues++; ++ cpu_relax(); ++ } while (err == -EBUSY); ++ ++ ch->stats.dequeue_portal_busy += dequeues; ++ if (unlikely(err)) ++ ch->stats.pull_err++; ++ ++ return err; ++} ++ ++/* NAPI poll routine ++ * ++ * Frames are dequeued from the QMan channel associated with this NAPI context. ++ * Rx, Tx confirmation and (if configured) Rx error frames all count ++ * towards the NAPI budget. ++ */ ++static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ++{ ++ struct dpaa2_eth_channel *ch; ++ int cleaned = 0, store_cleaned; ++ struct dpaa2_eth_priv *priv; ++ int err; ++ ++ ch = container_of(napi, struct dpaa2_eth_channel, napi); ++ priv = ch->priv; ++ ++ while (cleaned < budget) { ++ err = pull_channel(ch); ++ if (unlikely(err)) ++ break; ++ ++ /* Refill pool if appropriate */ ++ refill_pool(priv, ch, priv->dpbp_attrs.bpid); ++ ++ store_cleaned = consume_frames(ch); ++ cleaned += store_cleaned; ++ ++ /* If we have enough budget left for a full store, ++ * try a new pull dequeue, otherwise we're done here ++ */ ++ if (store_cleaned == 0 || ++ cleaned > budget - DPAA2_ETH_STORE_SIZE) ++ break; ++ } ++ ++ if (cleaned < budget) { ++ napi_complete_done(napi, cleaned); ++ /* Re-enable data available notifications */ ++ do { ++ err = dpaa2_io_service_rearm(NULL, &ch->nctx); ++ cpu_relax(); ++ } while (err == -EBUSY); ++ } ++ ++ ch->stats.frames += cleaned; ++ ++ return cleaned; ++} ++ ++static void enable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_enable(&ch->napi); ++ } ++} ++ ++static void disable_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ napi_disable(&ch->napi); ++ } ++} ++ ++static int link_state_update(struct dpaa2_eth_priv *priv) ++{ ++ struct dpni_link_state state; ++ int err; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (unlikely(err)) { ++ netdev_err(priv->net_dev, ++ "dpni_get_link_state() failed\n"); ++ return err; ++ } ++ ++ /* Chech link state; speed / duplex changes are not treated yet */ ++ if (priv->link_state.up == state.up) ++ return 0; ++ ++ priv->link_state = state; ++ if (state.up) { ++ netif_carrier_on(priv->net_dev); ++ netif_tx_start_all_queues(priv->net_dev); ++ } else { ++ netif_tx_stop_all_queues(priv->net_dev); ++ netif_carrier_off(priv->net_dev); ++ } ++ ++ netdev_info(priv->net_dev, "Link Event: state %s", ++ state.up ? "up" : "down"); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_open(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = seed_pool(priv, priv->dpbp_attrs.bpid); ++ if (err) { ++ /* Not much to do; the buffer pool, though not filled up, ++ * may still contain some buffers which would enable us ++ * to limp on. ++ */ ++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", ++ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid); ++ } ++ ++ /* We'll only start the txqs when the link is actually ready; make sure ++ * we don't race against the link up notification, which may come ++ * immediately after dpni_enable(); ++ */ ++ netif_tx_stop_all_queues(net_dev); ++ enable_ch_napi(priv); ++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will ++ * return true and cause 'ip link show' to report the LOWER_UP flag, ++ * even though the link notification wasn't even received. ++ */ ++ netif_carrier_off(net_dev); ++ ++ err = dpni_enable(priv->mc_io, 0, priv->mc_token); ++ if (err < 0) { ++ netdev_err(net_dev, "dpni_enable() failed\n"); ++ goto enable_err; ++ } ++ ++ /* If the DPMAC object has already processed the link up interrupt, ++ * we have to learn the link state ourselves. ++ */ ++ err = link_state_update(priv); ++ if (err < 0) { ++ netdev_err(net_dev, "Can't update link state\n"); ++ goto link_state_err; ++ } ++ ++ return 0; ++ ++link_state_err: ++enable_err: ++ disable_ch_napi(priv); ++ drain_pool(priv); ++ return err; ++} ++ ++/* The DPIO store must be empty when we call this, ++ * at the end of every NAPI cycle. ++ */ ++static u32 drain_channel(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *ch) ++{ ++ u32 drained = 0, total = 0; ++ ++ do { ++ pull_channel(ch); ++ drained = consume_frames(ch); ++ total += drained; ++ } while (drained); ++ ++ return total; ++} ++ ++static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *ch; ++ int i; ++ u32 drained = 0; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ drained += drain_channel(priv, ch); ++ } ++ ++ return drained; ++} ++ ++static int dpaa2_eth_stop(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int dpni_enabled; ++ int retries = 10; ++ u32 drained; ++ ++ netif_tx_stop_all_queues(net_dev); ++ netif_carrier_off(net_dev); ++ ++ /* Loop while dpni_disable() attempts to drain the egress FQs ++ * and confirm them back to us. ++ */ ++ do { ++ dpni_disable(priv->mc_io, 0, priv->mc_token); ++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); ++ if (dpni_enabled) ++ /* Allow the MC some slack */ ++ msleep(100); ++ } while (dpni_enabled && --retries); ++ if (!retries) { ++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); ++ /* Must go on and disable NAPI nonetheless, so we don't crash at ++ * the next "ifconfig up" ++ */ ++ } ++ ++ /* Wait for NAPI to complete on every core and disable it. ++ * In particular, this will also prevent NAPI from being rescheduled if ++ * a new CDAN is serviced, effectively discarding the CDAN. We therefore ++ * don't even need to disarm the channels, except perhaps for the case ++ * of a huge coalescing value. ++ */ ++ disable_ch_napi(priv); ++ ++ /* Manually drain the Rx and TxConf queues */ ++ drained = drain_ingress_frames(priv); ++ if (drained) ++ netdev_dbg(net_dev, "Drained %d frames.\n", drained); ++ ++ /* Empty the buffer pool */ ++ drain_pool(priv); ++ ++ return 0; ++} ++ ++static int dpaa2_eth_init(struct net_device *net_dev) ++{ ++ u64 supported = 0; ++ u64 not_supported = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u32 options = priv->dpni_attrs.options; ++ ++ /* Capabilities listing */ ++ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; ++ ++ if (options & DPNI_OPT_UNICAST_FILTER) ++ supported |= IFF_UNICAST_FLT; ++ else ++ not_supported |= IFF_UNICAST_FLT; ++ ++ if (options & DPNI_OPT_MULTICAST_FILTER) ++ supported |= IFF_MULTICAST; ++ else ++ not_supported |= IFF_MULTICAST; ++ ++ net_dev->priv_flags |= supported; ++ net_dev->priv_flags &= ~not_supported; ++ ++ /* Features */ ++ net_dev->features = NETIF_F_RXCSUM | ++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ++ NETIF_F_SG | NETIF_F_HIGHDMA | ++ NETIF_F_LLTX; ++ net_dev->hw_features = net_dev->features; ++ ++ return 0; ++} ++ ++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; ++ int err; ++ ++ err = eth_mac_addr(net_dev, addr); ++ if (err < 0) { ++ dev_err(dev, "eth_mac_addr() failed with error %d\n", err); ++ return err; ++ } ++ ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/** Fill in counters maintained by the GPP driver. These may be different from ++ * the hardware counters obtained by ethtool. ++ */ ++static struct rtnl_link_stats64 ++*dpaa2_eth_get_stats(struct net_device *net_dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct rtnl_link_stats64 *percpu_stats; ++ u64 *cpustats; ++ u64 *netstats = (u64 *)stats; ++ int i, j; ++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); ++ ++ for_each_possible_cpu(i) { ++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); ++ cpustats = (u64 *)percpu_stats; ++ for (j = 0; j < num; j++) ++ netstats[j] += cpustats[j]; ++ } ++ ++ return stats; ++} ++ ++static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) { ++ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n", ++ mtu, DPAA2_ETH_MAX_MTU); ++ return -EINVAL; ++ } ++ ++ /* Set the maximum Rx frame length to match the transmit side; ++ * account for L2 headers when computing the MFL ++ */ ++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, ++ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); ++ if (err) { ++ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); ++ return err; ++ } ++ ++ net_dev->mtu = mtu; ++ return 0; ++} ++ ++/* Copy mac unicast addresses from @net_dev to @priv. ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_uc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_uc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add ucast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++/* Copy mac multicast addresses from @net_dev to @priv ++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. ++ */ ++static void add_mc_hw_addr(const struct net_device *net_dev, ++ struct dpaa2_eth_priv *priv) ++{ ++ struct netdev_hw_addr *ha; ++ int err; ++ ++ netdev_for_each_mc_addr(ha, net_dev) { ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, ++ ha->addr); ++ if (err) ++ netdev_warn(priv->net_dev, ++ "Could not add mcast MAC %pM to the filtering table (err %d)\n", ++ ha->addr, err); ++ } ++} ++ ++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int uc_count = netdev_uc_count(net_dev); ++ int mc_count = netdev_mc_count(net_dev); ++ u8 max_uc = priv->dpni_attrs.max_unicast_filters; ++ u8 max_mc = priv->dpni_attrs.max_multicast_filters; ++ u32 options = priv->dpni_attrs.options; ++ u16 mc_token = priv->mc_token; ++ struct fsl_mc_io *mc_io = priv->mc_io; ++ int err; ++ ++ /* Basic sanity checks; these probably indicate a misconfiguration */ ++ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) ++ netdev_info(net_dev, ++ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n", ++ max_uc); ++ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) ++ netdev_info(net_dev, ++ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n", ++ max_mc); ++ ++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ ++ if (uc_count > max_uc) { ++ netdev_info(net_dev, ++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ uc_count, max_uc); ++ goto force_promisc; ++ } ++ if (mc_count > max_mc) { ++ netdev_info(net_dev, ++ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n", ++ mc_count, max_mc); ++ goto force_mc_promisc; ++ } ++ ++ /* Adjust promisc settings due to flag combinations */ ++ if (net_dev->flags & IFF_PROMISC) ++ goto force_promisc; ++ if (net_dev->flags & IFF_ALLMULTI) { ++ /* First, rebuild unicast filtering table. This should be done ++ * in promisc mode, in order to avoid frame loss while we ++ * progressively add entries to the table. ++ * We don't know whether we had been in promisc already, and ++ * making an MC call to find it is expensive; so set uc promisc ++ * nonetheless. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc\n"); ++ ++ /* Actual uc table reconstruction. */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc filters\n"); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Finally, clear uc promisc and set mc promisc as requested. */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear uc promisc\n"); ++ goto force_mc_promisc; ++ } ++ ++ /* Neither unicast, nor multicast promisc will be on... eventually. ++ * For now, rebuild mac filtering tables while forcing both of them on. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); ++ ++ /* Actual mac filtering tables reconstruction */ ++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mac filters\n"); ++ add_mc_hw_addr(net_dev, priv); ++ add_uc_hw_addr(net_dev, priv); ++ ++ /* Now we can clear both ucast and mcast promisc, without risking ++ * to drop legitimate frames anymore. ++ */ ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear ucast promisc\n"); ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); ++ if (err) ++ netdev_warn(net_dev, "Can't clear mcast promisc\n"); ++ ++ return; ++ ++force_promisc: ++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set ucast promisc\n"); ++force_mc_promisc: ++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); ++ if (err) ++ netdev_warn(net_dev, "Can't set mcast promisc\n"); ++} ++ ++static int dpaa2_eth_set_features(struct net_device *net_dev, ++ netdev_features_t features) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ netdev_features_t changed = features ^ net_dev->features; ++ bool enable; ++ int err; ++ ++ if (changed & NETIF_F_RXCSUM) { ++ enable = !!(features & NETIF_F_RXCSUM); ++ err = set_rx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { ++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); ++ err = set_tx_csum(priv, enable); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(dev); ++ struct hwtstamp_config config; ++ ++ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) ++ return -EFAULT; ++ ++ switch (config.tx_type) { ++ case HWTSTAMP_TX_OFF: ++ priv->ts_tx_en = false; ++ break; ++ case HWTSTAMP_TX_ON: ++ priv->ts_tx_en = true; ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { ++ priv->ts_rx_en = false; ++ } else { ++ priv->ts_rx_en = true; ++ /* TS is set for all frame types, not only those requested */ ++ config.rx_filter = HWTSTAMP_FILTER_ALL; ++ } ++ ++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? ++ -EFAULT : 0; ++} ++ ++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ if (cmd == SIOCSHWTSTAMP) ++ return dpaa2_eth_ts_ioctl(dev, rq, cmd); ++ ++ return -EINVAL; ++} ++ ++static const struct net_device_ops dpaa2_eth_ops = { ++ .ndo_open = dpaa2_eth_open, ++ .ndo_start_xmit = dpaa2_eth_tx, ++ .ndo_stop = dpaa2_eth_stop, ++ .ndo_init = dpaa2_eth_init, ++ .ndo_set_mac_address = dpaa2_eth_set_addr, ++ .ndo_get_stats64 = dpaa2_eth_get_stats, ++ .ndo_change_mtu = dpaa2_eth_change_mtu, ++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, ++ .ndo_set_features = dpaa2_eth_set_features, ++ .ndo_do_ioctl = dpaa2_eth_ioctl, ++}; ++ ++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_eth_channel *ch; ++ ++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); ++ ++ /* Update NAPI statistics */ ++ ch->stats.cdan++; ++ ++ napi_schedule_irqoff(&ch->napi); ++} ++ ++/* Verify that the FLIB API version of various MC objects is supported ++ * by our driver ++ */ ++static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) ++{ ++ char *name = ls_dev->obj_desc.type; ++ struct device *dev = &ls_dev->dev; ++ u16 supported_version, flib_version; ++ ++ if (strcmp(name, "dpni") == 0) { ++ flib_version = DPNI_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPNI_VERSION; ++ } else if (strcmp(name, "dpbp") == 0) { ++ flib_version = DPBP_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPBP_VERSION; ++ } else if (strcmp(name, "dpcon") == 0) { ++ flib_version = DPCON_VER_MAJOR; ++ supported_version = DPAA2_SUPPORTED_DPCON_VERSION; ++ } else { ++ dev_err(dev, "invalid object type (%s)\n", name); ++ return -EINVAL; ++ } ++ ++ /* Check that the FLIB-defined version matches the one reported by MC */ ++ if (mc_version != flib_version) { ++ dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", ++ name, mc_version, flib_version); ++ return -EINVAL; ++ } ++ ++ /* ... and that we actually support it */ ++ if (mc_version < supported_version) { ++ dev_err(dev, "Unsupported %s FLIB version (%d)\n", ++ name, mc_version); ++ return -EINVAL; ++ } ++ dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); ++ ++ return 0; ++} ++ ++/* Allocate and configure a DPCON object */ ++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) ++{ ++ struct fsl_mc_device *dpcon; ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpcon_attr attrs; ++ int err; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), ++ FSL_MC_POOL_DPCON, &dpcon); ++ if (err) { ++ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); ++ return NULL; ++ } ++ ++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(dpcon, attrs.version.major); ++ if (err) ++ goto err_dpcon_ver; ++ ++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ return dpcon; ++ ++err_enable: ++err_dpcon_ver: ++err_get_attr: ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++err_open: ++ fsl_mc_object_free(dpcon); ++ ++ return NULL; ++} ++ ++static void free_dpcon(struct dpaa2_eth_priv *priv, ++ struct fsl_mc_device *dpcon) ++{ ++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); ++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); ++ fsl_mc_object_free(dpcon); ++} ++ ++static struct dpaa2_eth_channel * ++alloc_channel(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_attr attr; ++ struct device *dev = priv->net_dev->dev.parent; ++ int err; ++ ++ channel = kzalloc(sizeof(*channel), GFP_ATOMIC); ++ if (!channel) ++ return NULL; ++ ++ channel->dpcon = setup_dpcon(priv); ++ if (!channel->dpcon) ++ goto err_setup; ++ ++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, ++ &attr); ++ if (err) { ++ dev_err(dev, "dpcon_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ channel->dpcon_id = attr.id; ++ channel->ch_id = attr.qbman_ch_id; ++ channel->priv = priv; ++ ++ return channel; ++ ++err_get_attr: ++ free_dpcon(priv, channel->dpcon); ++err_setup: ++ kfree(channel); ++ return NULL; ++} ++ ++static void free_channel(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_channel *channel) ++{ ++ free_dpcon(priv, channel->dpcon); ++ kfree(channel); ++} ++ ++/* DPIO setup: allocate and configure QBMan channels, setup core affinity ++ * and register data availability notifications ++ */ ++static int setup_dpio(struct dpaa2_eth_priv *priv) ++{ ++ struct dpaa2_io_notification_ctx *nctx; ++ struct dpaa2_eth_channel *channel; ++ struct dpcon_notification_cfg dpcon_notif_cfg; ++ struct device *dev = priv->net_dev->dev.parent; ++ int i, err; ++ ++ /* Don't allocate more channels than strictly necessary and assign ++ * them to cores starting from the first one available in ++ * cpu_online_mask. ++ * If the number of channels is lower than the number of cores, ++ * there will be no rx/tx conf processing on the last cores in the mask. ++ */ ++ cpumask_clear(&priv->dpio_cpumask); ++ for_each_online_cpu(i) { ++ /* Try to allocate a channel */ ++ channel = alloc_channel(priv); ++ if (!channel) ++ goto err_alloc_ch; ++ ++ priv->channel[priv->num_channels] = channel; ++ ++ nctx = &channel->nctx; ++ nctx->is_cdan = 1; ++ nctx->cb = cdan_cb; ++ nctx->id = channel->ch_id; ++ nctx->desired_cpu = i; ++ ++ /* Register the new context */ ++ err = dpaa2_io_service_register(NULL, nctx); ++ if (err) { ++ dev_info(dev, "No affine DPIO for core %d\n", i); ++ /* This core doesn't have an affine DPIO, but there's ++ * a chance another one does, so keep trying ++ */ ++ free_channel(priv, channel); ++ continue; ++ } ++ ++ /* Register DPCON notification with MC */ ++ dpcon_notif_cfg.dpio_id = nctx->dpio_id; ++ dpcon_notif_cfg.priority = 0; ++ dpcon_notif_cfg.user_ctx = nctx->qman64; ++ err = dpcon_set_notification(priv->mc_io, 0, ++ channel->dpcon->mc_handle, ++ &dpcon_notif_cfg); ++ if (err) { ++ dev_err(dev, "dpcon_set_notification failed()\n"); ++ goto err_set_cdan; ++ } ++ ++ /* If we managed to allocate a channel and also found an affine ++ * DPIO for this core, add it to the final mask ++ */ ++ cpumask_set_cpu(i, &priv->dpio_cpumask); ++ priv->num_channels++; ++ ++ if (priv->num_channels == dpaa2_eth_max_channels(priv)) ++ break; ++ } ++ ++ /* Tx confirmation queues can only be serviced by cpus ++ * with an affine DPIO/channel ++ */ ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ return 0; ++ ++err_set_cdan: ++ dpaa2_io_service_deregister(NULL, nctx); ++ free_channel(priv, channel); ++err_alloc_ch: ++ if (cpumask_empty(&priv->dpio_cpumask)) { ++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); ++ return -ENODEV; ++ } ++ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); ++ ++ return 0; ++} ++ ++static void free_dpio(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ /* deregister CDAN notifications and free channels */ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ dpaa2_io_service_deregister(NULL, &ch->nctx); ++ free_channel(priv, ch); ++ } ++} ++ ++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, ++ int cpu) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ if (priv->channel[i]->nctx.desired_cpu == cpu) ++ return priv->channel[i]; ++ ++ /* We should never get here. Issue a warning and return ++ * the first channel, because it's still better than nothing ++ */ ++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); ++ ++ return priv->channel[0]; ++} ++ ++static void set_fq_affinity(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpaa2_eth_fq *fq; ++ int rx_cpu, txc_cpu; ++ int i; ++ ++ /* For each FQ, pick one channel/CPU to deliver frames to. ++ * This may well change at runtime, either through irqbalance or ++ * through direct user intervention. ++ */ ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ switch (fq->type) { ++ case DPAA2_RX_FQ: ++ case DPAA2_RX_ERR_FQ: ++ fq->target_cpu = rx_cpu; ++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); ++ if (rx_cpu >= nr_cpu_ids) ++ rx_cpu = cpumask_first(&priv->dpio_cpumask); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ fq->target_cpu = txc_cpu; ++ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); ++ if (txc_cpu >= nr_cpu_ids) ++ txc_cpu = cpumask_first(&priv->txconf_cpumask); ++ break; ++ default: ++ dev_err(dev, "Unknown FQ type: %d\n", fq->type); ++ } ++ fq->channel = get_affine_channel(priv, fq->target_cpu); ++ } ++} ++ ++static void setup_fqs(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ /* We have one TxConf FQ per Tx flow */ ++ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; ++ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; ++ } ++ ++ /* The number of Rx queues (Rx distribution width) may be different from ++ * the number of cores. ++ * We only support one traffic class for now. ++ */ ++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { ++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; ++ priv->fq[priv->num_fqs++].flowid = (u16)i; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ /* We have exactly one Rx error queue per DPNI */ ++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; ++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; ++#endif ++ ++ /* For each FQ, decide on which core to process incoming frames */ ++ set_fq_affinity(priv); ++} ++ ++/* Allocate and configure one buffer pool for each interface */ ++static int setup_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ int err; ++ struct fsl_mc_device *dpbp_dev; ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, ++ &dpbp_dev); ++ if (err) { ++ dev_err(dev, "DPBP device allocation failed\n"); ++ return err; ++ } ++ ++ priv->dpbp_dev = dpbp_dev; ++ ++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, ++ &dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_enable() failed\n"); ++ goto err_enable; ++ } ++ ++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, ++ &priv->dpbp_attrs); ++ if (err) { ++ dev_err(dev, "dpbp_get_attributes() failed\n"); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); ++ if (err) ++ goto err_dpbp_ver; ++ ++ return 0; ++ ++err_dpbp_ver: ++err_get_attr: ++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_enable: ++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); ++err_open: ++ fsl_mc_object_free(dpbp_dev); ++ ++ return err; ++} ++ ++static void free_dpbp(struct dpaa2_eth_priv *priv) ++{ ++ drain_pool(priv); ++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); ++ fsl_mc_object_free(priv->dpbp_dev); ++} ++ ++/* Configure the DPNI object this interface is associated with */ ++static int setup_dpni(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_eth_priv *priv; ++ struct net_device *net_dev; ++ void *dma_mem; ++ int err; ++ ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++ priv->dpni_id = ls_dev->obj_desc.id; ++ ++ /* get a handle for the DPNI object */ ++ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_open() failed\n"); ++ goto err_open; ++ } ++ ++ ls_dev->mc_io = priv->mc_io; ++ ls_dev->mc_handle = priv->mc_token; ++ ++ /* Map a memory region which will be used by MC to pass us an ++ * attribute structure ++ */ ++ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ goto err_alloc; ++ ++ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem, ++ DPAA2_EXT_CFG_SIZE, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) { ++ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n"); ++ goto err_dma_map; ++ } ++ ++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, ++ &priv->dpni_attrs); ++ ++ /* We'll check the return code after unmapping, as we need to ++ * do this anyway ++ */ ++ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, ++ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); ++ ++ if (err) { ++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); ++ goto err_get_attr; ++ } ++ ++ err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); ++ if (err) ++ goto err_dpni_ver; ++ ++ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); ++ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_extract_extended_cfg() failed\n"); ++ goto err_extract; ++ } ++ ++ /* Configure our buffers' layout */ ++ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | ++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | ++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | ++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; ++ priv->buf_layout.pass_parser_result = true; ++ priv->buf_layout.pass_frame_status = true; ++ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; ++ /* HW erratum mandates data alignment in multiples of 256 */ ++ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; ++ ++ /* rx buffer */ ++ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* tx buffer: remove Rx-only options */ ++ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | ++ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); ++ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* tx-confirm: same options as tx */ ++ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; ++ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; ++ priv->buf_layout.pass_timestamp = 1; ++ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token, ++ &priv->buf_layout); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed"); ++ goto err_buf_layout; ++ } ++ /* Now that we've set our tx buffer layout, retrieve the minimum ++ * required tx data offset. ++ */ ++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, ++ &priv->tx_data_offset); ++ if (err) { ++ dev_err(dev, "dpni_get_tx_data_offset() failed\n"); ++ goto err_data_offset; ++ } ++ ++ if ((priv->tx_data_offset % 64) != 0) ++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", ++ priv->tx_data_offset); ++ ++ /* Accommodate SWA space. */ ++ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; ++ ++ /* allocate classification rule space */ ++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * ++ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL); ++ if (!priv->cls_rule) ++ goto err_cls_rule; ++ ++ kfree(dma_mem); ++ ++ return 0; ++ ++err_cls_rule: ++err_data_offset: ++err_buf_layout: ++err_extract: ++err_dpni_ver: ++err_get_attr: ++err_dma_map: ++ kfree(dma_mem); ++err_alloc: ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++err_open: ++ return err; ++} ++ ++static void free_dpni(struct dpaa2_eth_priv *priv) ++{ ++ int err; ++ ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) ++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", ++ err); ++ ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++} ++ ++static int setup_rx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_queue_attr rx_queue_attr; ++ struct dpni_queue_cfg queue_cfg; ++ int err; ++ ++ memset(&queue_cfg, 0, sizeof(queue_cfg)); ++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | ++ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; ++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ queue_cfg.dest_cfg.priority = 1; ++ queue_cfg.user_ctx = (u64)fq; ++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH; ++ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, ++ &queue_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_flow() failed\n"); ++ return err; ++ } ++ ++ /* Get the actual FQID that was assigned by MC */ ++ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, ++ &rx_queue_attr); ++ if (err) { ++ dev_err(dev, "dpni_get_rx_flow() failed\n"); ++ return err; ++ } ++ fq->fqid = rx_queue_attr.fqid; ++ ++ return 0; ++} ++ ++static int setup_tx_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_tx_flow_cfg tx_flow_cfg; ++ struct dpni_tx_conf_cfg tx_conf_cfg; ++ struct dpni_tx_conf_attr tx_conf_attr; ++ int err; ++ ++ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); ++ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; ++ tx_flow_cfg.use_common_tx_conf_queue = 0; ++ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, ++ &fq->flowid, &tx_flow_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_flow() failed\n"); ++ return err; ++ } ++ ++ tx_conf_cfg.errors_only = 0; ++ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | ++ DPNI_QUEUE_OPT_DEST; ++ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq; ++ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0; ++ ++ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, ++ &tx_conf_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_conf() failed\n"); ++ return err; ++ } ++ ++ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, ++ &tx_conf_attr); ++ if (err) { ++ dev_err(dev, "dpni_get_tx_conf() failed\n"); ++ return err; ++ } ++ ++ fq->fqid = tx_conf_attr.queue_attr.fqid; ++ ++ return 0; ++} ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, ++ struct dpaa2_eth_fq *fq) ++{ ++ struct dpni_queue_attr queue_attr; ++ struct dpni_queue_cfg queue_cfg; ++ int err; ++ ++ /* Configure the Rx error queue to generate CDANs, ++ * just like the Rx queues ++ */ ++ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; ++ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; ++ queue_cfg.dest_cfg.priority = 1; ++ queue_cfg.user_ctx = (u64)fq; ++ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; ++ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n"); ++ return err; ++ } ++ ++ /* Get the FQID */ ++ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, ++ &queue_attr); ++ if (err) { ++ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); ++ return err; ++ } ++ fq->fqid = queue_attr.fqid; ++ ++ return 0; ++} ++#endif ++ ++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, ++ * frame queues and channels ++ */ ++static int bind_dpni(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ struct dpni_pools_cfg pools_params; ++ struct dpni_error_cfg err_cfg; ++ int err = 0; ++ int i; ++ ++ pools_params.num_dpbp = 1; ++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; ++ pools_params.pools[0].backup_pool = 0; ++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; ++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); ++ if (err) { ++ dev_err(dev, "dpni_set_pools() failed\n"); ++ return err; ++ } ++ ++ check_fs_support(net_dev); ++ ++ /* have the interface implicitly distribute traffic based on supported ++ * header fields ++ */ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); ++ if (err) ++ return err; ++ } ++ ++ /* Configure handling of error frames */ ++ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK; ++ err_cfg.set_frame_annotation = 1; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; ++#else ++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; ++#endif ++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, ++ &err_cfg); ++ if (err) { ++ dev_err(dev, "dpni_set_errors_behavior failed\n"); ++ return err; ++ } ++ ++ /* Configure Rx and Tx conf queues to generate CDANs */ ++ for (i = 0; i < priv->num_fqs; i++) { ++ switch (priv->fq[i].type) { ++ case DPAA2_RX_FQ: ++ err = setup_rx_flow(priv, &priv->fq[i]); ++ break; ++ case DPAA2_TX_CONF_FQ: ++ err = setup_tx_flow(priv, &priv->fq[i]); ++ break; ++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE ++ case DPAA2_RX_ERR_FQ: ++ err = setup_rx_err_flow(priv, &priv->fq[i]); ++ break; ++#endif ++ default: ++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); ++ return -EINVAL; ++ } ++ if (err) ++ return err; ++ } ++ ++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid); ++ if (err) { ++ dev_err(dev, "dpni_get_qdid() failed\n"); ++ return err; ++ } ++ ++ return 0; ++} ++ ++/* Allocate rings for storing incoming frame descriptors */ ++static int alloc_rings(struct dpaa2_eth_priv *priv) ++{ ++ struct net_device *net_dev = priv->net_dev; ++ struct device *dev = net_dev->dev.parent; ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ priv->channel[i]->store = ++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); ++ if (!priv->channel[i]->store) { ++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); ++ goto err_ring; ++ } ++ } ++ ++ return 0; ++ ++err_ring: ++ for (i = 0; i < priv->num_channels; i++) { ++ if (!priv->channel[i]->store) ++ break; ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++ } ++ ++ return -ENOMEM; ++} ++ ++static void free_rings(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ ++ for (i = 0; i < priv->num_channels; i++) ++ dpaa2_io_store_destroy(priv->channel[i]->store); ++} ++ ++static int netdev_init(struct net_device *net_dev) ++{ ++ int err; ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ u8 mac_addr[ETH_ALEN]; ++ u8 bcast_addr[ETH_ALEN]; ++ ++ net_dev->netdev_ops = &dpaa2_eth_ops; ++ ++ /* If the DPNI attributes contain an all-0 mac_addr, ++ * set a random hardware address ++ */ ++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ mac_addr); ++ if (err) { ++ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err); ++ return err; ++ } ++ if (is_zero_ether_addr(mac_addr)) { ++ /* Fills in net_dev->dev_addr, as required by ++ * register_netdevice() ++ */ ++ eth_hw_addr_random(net_dev); ++ /* Make the user aware, without cluttering the boot log */ ++ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random"); ++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, ++ net_dev->dev_addr); ++ if (err) { ++ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err); ++ return err; ++ } ++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all ++ * practical purposes, this will be our "permanent" mac address, ++ * at least until the next reboot. This move will also permit ++ * register_netdevice() to properly fill up net_dev->perm_addr. ++ */ ++ net_dev->addr_assign_type = NET_ADDR_PERM; ++ } else { ++ /* NET_ADDR_PERM is default, all we have to do is ++ * fill in the device addr. ++ */ ++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); ++ } ++ ++ /* Explicitly add the broadcast address to the MAC filtering table; ++ * the MC won't do that for us. ++ */ ++ eth_broadcast_addr(bcast_addr); ++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); ++ if (err) { ++ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); ++ /* Won't return an error; at least, we'd have egress traffic */ ++ } ++ ++ /* Reserve enough space to align buffer as per hardware requirement; ++ * NOTE: priv->tx_data_offset MUST be initialized at this point. ++ */ ++ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); ++ ++ /* Our .ndo_init will be called herein */ ++ err = register_netdev(net_dev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev() = %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int poll_link_state(void *arg) ++{ ++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; ++ int err; ++ ++ while (!kthread_should_stop()) { ++ err = link_state_update(priv); ++ if (unlikely(err)) ++ return err; ++ ++ msleep(DPAA2_ETH_LINK_STATE_REFRESH); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) ++{ ++ u8 irq_index = DPNI_IRQ_INDEX; ++ u32 status, clear = 0; ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); ++ struct net_device *net_dev = dev_get_drvdata(dev); ++ int err; ++ ++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ irq_index, &status); ++ if (unlikely(err)) { ++ netdev_err(net_dev, "Can't get irq status (err %d)", err); ++ clear = 0xffffffff; ++ goto out; ++ } ++ ++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { ++ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; ++ link_state_update(netdev_priv(net_dev)); ++ } ++ ++out: ++ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, ++ irq_index, clear); ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *ls_dev) ++{ ++ int err = 0; ++ struct fsl_mc_device_irq *irq; ++ u8 irq_index = DPNI_IRQ_INDEX; ++ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; ++ ++ err = fsl_mc_allocate_irqs(ls_dev); ++ if (err) { ++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); ++ return err; ++ } ++ ++ irq = ls_dev->irqs[0]; ++ err = devm_request_threaded_irq(&ls_dev->dev, irq->irq_number, ++ dpni_irq0_handler, ++ dpni_irq0_handler_thread, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&ls_dev->dev), &ls_dev->dev); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); ++ goto free_mc_irq; ++ } ++ ++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ irq_index, mask); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); ++ goto free_irq; ++ } ++ ++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ irq_index, 1); ++ if (err < 0) { ++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); ++free_mc_irq: ++ fsl_mc_free_irqs(ls_dev); ++ ++ return err; ++} ++ ++static void add_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ ++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, ++ NAPI_POLL_WEIGHT); ++ } ++} ++ ++static void del_ch_napi(struct dpaa2_eth_priv *priv) ++{ ++ int i; ++ struct dpaa2_eth_channel *ch; ++ ++ for (i = 0; i < priv->num_channels; i++) { ++ ch = priv->channel[i]; ++ netif_napi_del(&ch->napi); ++ } ++} ++ ++/* SysFS support */ ++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ /* No MC API for getting the shaping config. We're stateful. */ ++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; ++ ++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); ++} ++ ++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ int err, items; ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpni_tx_shaping_cfg scfg; ++ ++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); ++ if (items != 2) { ++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); ++ return -EINVAL; ++ } ++ /* Size restriction as per MC API documentation */ ++ if (scfg.max_burst_size > 64000) { ++ pr_err("max_burst_size must be <= 64000, thanks.\n"); ++ return -EINVAL; ++ } ++ ++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); ++ if (err) { ++ dev_err(dev, "dpni_set_tx_shaping() failed\n"); ++ return -EPERM; ++ } ++ /* If successful, save the current configuration for future inquiries */ ++ priv->shaping_cfg = scfg; ++ ++ return count; ++} ++ ++static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ ++ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); ++} ++ ++static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); ++ struct dpaa2_eth_fq *fq; ++ bool running = netif_running(priv->net_dev); ++ int i, err; ++ ++ err = cpulist_parse(buf, &priv->txconf_cpumask); ++ if (err) ++ return err; ++ ++ /* Only accept CPUs that have an affine DPIO */ ++ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { ++ netdev_info(priv->net_dev, ++ "cpumask must be a subset of 0x%lx\n", ++ *cpumask_bits(&priv->dpio_cpumask)); ++ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, ++ &priv->txconf_cpumask); ++ } ++ ++ /* Rewiring the TxConf FQs requires interface shutdown. ++ */ ++ if (running) { ++ err = dpaa2_eth_stop(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ /* Set the new TxConf FQ affinities */ ++ set_fq_affinity(priv); ++ ++ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit ++ * link up notification is received. Give the polling thread enough time ++ * to detect the link state change, or else we'll end up with the ++ * transmission side forever shut down. ++ */ ++ if (priv->do_link_poll) ++ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); ++ ++ for (i = 0; i < priv->num_fqs; i++) { ++ fq = &priv->fq[i]; ++ if (fq->type != DPAA2_TX_CONF_FQ) ++ continue; ++ setup_tx_flow(priv, fq); ++ } ++ ++ if (running) { ++ err = dpaa2_eth_open(priv->net_dev); ++ if (err) ++ return -ENODEV; ++ } ++ ++ return count; ++} ++ ++static struct device_attribute dpaa2_eth_attrs[] = { ++ __ATTR(txconf_cpumask, ++ S_IRUSR | S_IWUSR, ++ dpaa2_eth_show_txconf_cpumask, ++ dpaa2_eth_write_txconf_cpumask), ++ ++ __ATTR(tx_shaping, ++ S_IRUSR | S_IWUSR, ++ dpaa2_eth_show_tx_shaping, ++ dpaa2_eth_write_tx_shaping), ++}; ++ ++void dpaa2_eth_sysfs_init(struct device *dev) ++{ ++ int i, err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { ++ err = device_create_file(dev, &dpaa2_eth_attrs[i]); ++ if (err) { ++ dev_err(dev, "ERROR creating sysfs file\n"); ++ goto undo; ++ } ++ } ++ return; ++ ++undo: ++ while (i > 0) ++ device_remove_file(dev, &dpaa2_eth_attrs[--i]); ++} ++ ++void dpaa2_eth_sysfs_remove(struct device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) ++ device_remove_file(dev, &dpaa2_eth_attrs[i]); ++} ++ ++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev = NULL; ++ struct dpaa2_eth_priv *priv = NULL; ++ int err = 0; ++ ++ dev = &dpni_dev->dev; ++ ++ /* Net device */ ++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); ++ if (!net_dev) { ++ dev_err(dev, "alloc_etherdev_mq() failed\n"); ++ return -ENOMEM; ++ } ++ ++ SET_NETDEV_DEV(net_dev, dev); ++ dev_set_drvdata(dev, net_dev); ++ ++ priv = netdev_priv(net_dev); ++ priv->net_dev = net_dev; ++ ++ /* Obtain a MC portal */ ++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &priv->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_portal_alloc; ++ } ++ ++ /* MC objects initialization and configuration */ ++ err = setup_dpni(dpni_dev); ++ if (err) ++ goto err_dpni_setup; ++ ++ err = setup_dpio(priv); ++ if (err) ++ goto err_dpio_setup; ++ ++ setup_fqs(priv); ++ ++ err = setup_dpbp(priv); ++ if (err) ++ goto err_dpbp_setup; ++ ++ err = bind_dpni(priv); ++ if (err) ++ goto err_bind; ++ ++ /* Add a NAPI context for each channel */ ++ add_ch_napi(priv); ++ ++ /* Percpu statistics */ ++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); ++ if (!priv->percpu_stats) { ++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_stats; ++ } ++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); ++ if (!priv->percpu_extras) { ++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); ++ err = -ENOMEM; ++ goto err_alloc_percpu_extras; ++ } ++ ++ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); ++ if (!dev_valid_name(net_dev->name)) { ++ dev_warn(&net_dev->dev, ++ "netdevice name \"%s\" cannot be used, reverting to default..\n", ++ net_dev->name); ++ dev_alloc_name(net_dev, "eth%d"); ++ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); ++ } ++ ++ err = netdev_init(net_dev); ++ if (err) ++ goto err_netdev_init; ++ ++ /* Configure checksum offload based on current interface flags */ ++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); ++ if (err) ++ goto err_csum; ++ ++ err = set_tx_csum(priv, !!(net_dev->features & ++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); ++ if (err) ++ goto err_csum; ++ ++ err = alloc_rings(priv); ++ if (err) ++ goto err_alloc_rings; ++ ++ net_dev->ethtool_ops = &dpaa2_ethtool_ops; ++ ++ err = setup_irqs(dpni_dev); ++ if (err) { ++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); ++ priv->poll_thread = kthread_run(poll_link_state, priv, ++ "%s_poll_link", net_dev->name); ++ if (IS_ERR(priv->poll_thread)) { ++ netdev_err(net_dev, "Error starting polling thread\n"); ++ goto err_poll_thread; ++ } ++ priv->do_link_poll = true; ++ } ++ ++ dpaa2_eth_sysfs_init(&net_dev->dev); ++ dpaa2_dbg_add(priv); ++ ++ dev_info(dev, "Probed interface %s\n", net_dev->name); ++ return 0; ++ ++err_poll_thread: ++ free_rings(priv); ++err_alloc_rings: ++err_csum: ++ unregister_netdev(net_dev); ++err_netdev_init: ++ free_percpu(priv->percpu_extras); ++err_alloc_percpu_extras: ++ free_percpu(priv->percpu_stats); ++err_alloc_percpu_stats: ++ del_ch_napi(priv); ++err_bind: ++ free_dpbp(priv); ++err_dpbp_setup: ++ free_dpio(priv); ++err_dpio_setup: ++ kfree(priv->cls_rule); ++ dpni_close(priv->mc_io, 0, priv->mc_token); ++err_dpni_setup: ++ fsl_mc_portal_free(priv->mc_io); ++err_portal_alloc: ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return err; ++} ++ ++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct net_device *net_dev; ++ struct dpaa2_eth_priv *priv; ++ ++ dev = &ls_dev->dev; ++ net_dev = dev_get_drvdata(dev); ++ priv = netdev_priv(net_dev); ++ ++ dpaa2_dbg_remove(priv); ++ dpaa2_eth_sysfs_remove(&net_dev->dev); ++ ++ unregister_netdev(net_dev); ++ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); ++ ++ free_dpio(priv); ++ free_rings(priv); ++ del_ch_napi(priv); ++ free_dpbp(priv); ++ free_dpni(priv); ++ ++ fsl_mc_portal_free(priv->mc_io); ++ ++ free_percpu(priv->percpu_stats); ++ free_percpu(priv->percpu_extras); ++ ++ if (priv->do_link_poll) ++ kthread_stop(priv->poll_thread); ++ else ++ fsl_mc_free_irqs(ls_dev); ++ ++ kfree(priv->cls_rule); ++ ++ dev_set_drvdata(dev, NULL); ++ free_netdev(net_dev); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpni", ++ .ver_major = DPNI_VER_MAJOR, ++ .ver_minor = DPNI_VER_MINOR ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_eth_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_eth_probe, ++ .remove = dpaa2_eth_remove, ++ .match_id_table = dpaa2_eth_match_id_table ++}; ++ ++static int __init dpaa2_eth_driver_init(void) ++{ ++ int err; ++ ++ dpaa2_eth_dbg_init(); ++ ++ err = fsl_mc_driver_register(&dpaa2_eth_driver); ++ if (err) { ++ dpaa2_eth_dbg_exit(); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void __exit dpaa2_eth_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dpaa2_eth_driver); ++ dpaa2_eth_dbg_exit(); ++} ++ ++module_init(dpaa2_eth_driver_init); ++module_exit(dpaa2_eth_driver_exit); +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +new file mode 100644 +index 0000000..7274fbe +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +@@ -0,0 +1,377 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef __DPAA2_ETH_H ++#define __DPAA2_ETH_H ++ ++#include ++#include ++#include "../../fsl-mc/include/fsl_dpaa2_io.h" ++#include "../../fsl-mc/include/fsl_dpaa2_fd.h" ++#include "../../fsl-mc/include/dpbp.h" ++#include "../../fsl-mc/include/dpbp-cmd.h" ++#include "../../fsl-mc/include/dpcon.h" ++#include "../../fsl-mc/include/dpcon-cmd.h" ++#include "../../fsl-mc/include/dpmng.h" ++#include "dpni.h" ++#include "dpni-cmd.h" ++ ++#include "dpaa2-eth-trace.h" ++#include "dpaa2-eth-debugfs.h" ++ ++#define DPAA2_ETH_STORE_SIZE 16 ++ ++/* Maximum number of scatter-gather entries in an ingress frame, ++ * considering the maximum receive frame size is 64K ++ */ ++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) ++ ++/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced ++ * Max Frame Length (currently 10k). ++ */ ++#define DPAA2_ETH_MFL (10 * 1024) ++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) ++/* Convert L3 MTU to L2 MFL */ ++#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN) ++ ++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo ++ * frames in the Rx queues (length of the current frame is not ++ * taken into account when making the taildrop decision) ++ */ ++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) ++ ++/* Buffer quota per queue. Must be large enough such that for minimum sized ++ * frames taildrop kicks in before the bpool gets depleted, so we compute ++ * how many 64B frames fit inside the taildrop threshold and add a margin ++ * to accommodate the buffer refill delay. ++ */ ++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) ++#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) ++#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE ++ ++/* Maximum number of buffers that can be acquired/released through a single ++ * QBMan command ++ */ ++#define DPAA2_ETH_BUFS_PER_CMD 7 ++ ++/* Hardware requires alignment for ingress/egress buffer addresses ++ * and ingress buffer lengths. ++ */ ++#define DPAA2_ETH_RX_BUF_SIZE 2048 ++#define DPAA2_ETH_TX_BUF_ALIGN 64 ++#define DPAA2_ETH_RX_BUF_ALIGN 256 ++#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ ++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) ++ ++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress ++ * buffers large enough to allow building an skb around them and also account ++ * for alignment restrictions ++ */ ++#define DPAA2_ETH_BUF_RAW_SIZE \ ++ (DPAA2_ETH_RX_BUF_SIZE + \ ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ ++ DPAA2_ETH_RX_BUF_ALIGN) ++ ++/* PTP nominal frequency 1MHz */ ++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 ++ ++/* We are accommodating a skb backpointer and some S/G info ++ * in the frame's software annotation. The hardware ++ * options are either 0 or 64, so we choose the latter. ++ */ ++#define DPAA2_ETH_SWA_SIZE 64 ++ ++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ ++struct dpaa2_eth_swa { ++ struct sk_buff *skb; ++ struct scatterlist *scl; ++ int num_sg; ++ int num_dma_bufs; ++}; ++ ++/* Annotation valid bits in FD FRC */ ++#define DPAA2_FD_FRC_FASV 0x8000 ++#define DPAA2_FD_FRC_FAEADV 0x4000 ++#define DPAA2_FD_FRC_FAPRV 0x2000 ++#define DPAA2_FD_FRC_FAIADV 0x1000 ++#define DPAA2_FD_FRC_FASWOV 0x0800 ++#define DPAA2_FD_FRC_FAICFDV 0x0400 ++ ++/* Annotation bits in FD CTRL */ ++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ ++#define DPAA2_FD_CTRL_PTA 0x00800000 ++#define DPAA2_FD_CTRL_PTV1 0x00400000 ++ ++/* Frame annotation status */ ++struct dpaa2_fas { ++ u8 reserved; ++ u8 ppid; ++ __le16 ifpid; ++ __le32 status; ++} __packed; ++ ++/* Error and status bits in the frame annotation status word */ ++/* Debug frame, otherwise supposed to be discarded */ ++#define DPAA2_FAS_DISC 0x80000000 ++/* MACSEC frame */ ++#define DPAA2_FAS_MS 0x40000000 ++#define DPAA2_FAS_PTP 0x08000000 ++/* Ethernet multicast frame */ ++#define DPAA2_FAS_MC 0x04000000 ++/* Ethernet broadcast frame */ ++#define DPAA2_FAS_BC 0x02000000 ++#define DPAA2_FAS_KSE 0x00040000 ++#define DPAA2_FAS_EOFHE 0x00020000 ++#define DPAA2_FAS_MNLE 0x00010000 ++#define DPAA2_FAS_TIDE 0x00008000 ++#define DPAA2_FAS_PIEE 0x00004000 ++/* Frame length error */ ++#define DPAA2_FAS_FLE 0x00002000 ++/* Frame physical error */ ++#define DPAA2_FAS_FPE 0x00001000 ++#define DPAA2_FAS_PTE 0x00000080 ++#define DPAA2_FAS_ISP 0x00000040 ++#define DPAA2_FAS_PHE 0x00000020 ++#define DPAA2_FAS_BLE 0x00000010 ++/* L3 csum validation performed */ ++#define DPAA2_FAS_L3CV 0x00000008 ++/* L3 csum error */ ++#define DPAA2_FAS_L3CE 0x00000004 ++/* L4 csum validation performed */ ++#define DPAA2_FAS_L4CV 0x00000002 ++/* L4 csum error */ ++#define DPAA2_FAS_L4CE 0x00000001 ++/* Possible errors on the ingress path */ ++#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \ ++ DPAA2_FAS_EOFHE | \ ++ DPAA2_FAS_MNLE | \ ++ DPAA2_FAS_TIDE | \ ++ DPAA2_FAS_PIEE | \ ++ DPAA2_FAS_FLE | \ ++ DPAA2_FAS_FPE | \ ++ DPAA2_FAS_PTE | \ ++ DPAA2_FAS_ISP | \ ++ DPAA2_FAS_PHE | \ ++ DPAA2_FAS_BLE | \ ++ DPAA2_FAS_L3CE | \ ++ DPAA2_FAS_L4CE) ++/* Tx errors */ ++#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \ ++ DPAA2_FAS_EOFHE | \ ++ DPAA2_FAS_MNLE | \ ++ DPAA2_FAS_TIDE) ++ ++/* Time in milliseconds between link state updates */ ++#define DPAA2_ETH_LINK_STATE_REFRESH 1000 ++ ++/* Driver statistics, other than those in struct rtnl_link_stats64. ++ * These are usually collected per-CPU and aggregated by ethtool. ++ */ ++struct dpaa2_eth_drv_stats { ++ __u64 tx_conf_frames; ++ __u64 tx_conf_bytes; ++ __u64 tx_sg_frames; ++ __u64 tx_sg_bytes; ++ __u64 rx_sg_frames; ++ __u64 rx_sg_bytes; ++ /* Enqueues retried due to portal busy */ ++ __u64 tx_portal_busy; ++}; ++ ++/* Per-FQ statistics */ ++struct dpaa2_eth_fq_stats { ++ /* Number of frames received on this queue */ ++ __u64 frames; ++}; ++ ++/* Per-channel statistics */ ++struct dpaa2_eth_ch_stats { ++ /* Volatile dequeues retried due to portal busy */ ++ __u64 dequeue_portal_busy; ++ /* Number of CDANs; useful to estimate avg NAPI len */ ++ __u64 cdan; ++ /* Number of frames received on queues from this channel */ ++ __u64 frames; ++ /* Pull errors */ ++ __u64 pull_err; ++}; ++ ++/* Maximum number of queues associated with a DPNI */ ++#define DPAA2_ETH_MAX_RX_QUEUES 16 ++#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS ++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 ++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ ++ DPAA2_ETH_MAX_TX_QUEUES + \ ++ DPAA2_ETH_MAX_RX_ERR_QUEUES) ++ ++#define DPAA2_ETH_MAX_DPCONS NR_CPUS ++ ++enum dpaa2_eth_fq_type { ++ DPAA2_RX_FQ = 0, ++ DPAA2_TX_CONF_FQ, ++ DPAA2_RX_ERR_FQ ++}; ++ ++struct dpaa2_eth_priv; ++ ++struct dpaa2_eth_fq { ++ u32 fqid; ++ u16 flowid; ++ int target_cpu; ++ struct dpaa2_eth_channel *channel; ++ enum dpaa2_eth_fq_type type; ++ ++ void (*consume)(struct dpaa2_eth_priv *, ++ struct dpaa2_eth_channel *, ++ const struct dpaa2_fd *, ++ struct napi_struct *); ++ struct dpaa2_eth_fq_stats stats; ++}; ++ ++struct dpaa2_eth_channel { ++ struct dpaa2_io_notification_ctx nctx; ++ struct fsl_mc_device *dpcon; ++ int dpcon_id; ++ int ch_id; ++ int dpio_id; ++ struct napi_struct napi; ++ struct dpaa2_io_store *store; ++ struct dpaa2_eth_priv *priv; ++ int buf_count; ++ struct dpaa2_eth_ch_stats stats; ++}; ++ ++struct dpaa2_eth_cls_rule { ++ struct ethtool_rx_flow_spec fs; ++ bool in_use; ++}; ++ ++/* Driver private data */ ++struct dpaa2_eth_priv { ++ struct net_device *net_dev; ++ ++ u8 num_fqs; ++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; ++ ++ u8 num_channels; ++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; ++ ++ int dpni_id; ++ struct dpni_attr dpni_attrs; ++ struct dpni_extended_cfg dpni_ext_cfg; ++ /* Insofar as the MC is concerned, we're using one layout on all 3 types ++ * of buffers (Rx, Tx, Tx-Conf). ++ */ ++ struct dpni_buffer_layout buf_layout; ++ u16 tx_data_offset; ++ ++ struct fsl_mc_device *dpbp_dev; ++ struct dpbp_attr dpbp_attrs; ++ ++ u16 tx_qdid; ++ struct fsl_mc_io *mc_io; ++ /* SysFS-controlled affinity mask for TxConf FQs */ ++ struct cpumask txconf_cpumask; ++ /* Cores which have an affine DPIO/DPCON. ++ * This is the cpu set on which Rx frames are processed; ++ * Tx confirmation frames are processed on a subset of this, ++ * depending on user settings. ++ */ ++ struct cpumask dpio_cpumask; ++ ++ /* Standard statistics */ ++ struct rtnl_link_stats64 __percpu *percpu_stats; ++ /* Extra stats, in addition to the ones known by the kernel */ ++ struct dpaa2_eth_drv_stats __percpu *percpu_extras; ++ ++ u16 mc_token; ++ ++ struct dpni_link_state link_state; ++ bool do_link_poll; ++ struct task_struct *poll_thread; ++ ++ /* enabled ethtool hashing bits */ ++ u64 rx_hash_fields; ++ ++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS ++ struct dpaa2_debugfs dbg; ++#endif ++ ++ /* array of classification rules */ ++ struct dpaa2_eth_cls_rule *cls_rule; ++ ++ struct dpni_tx_shaping_cfg shaping_cfg; ++ ++ bool ts_tx_en; /* Tx timestamping enabled */ ++ bool ts_rx_en; /* Rx timestamping enabled */ ++}; ++ ++/* default Rx hash options, set during probing */ ++#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ ++ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ ++ | RXH_L4_B_2_3) ++ ++#define dpaa2_eth_hash_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) ++ ++#define dpaa2_eth_fs_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) ++ ++#define DPAA2_CLASSIFIER_ENTRY_COUNT 16 ++ ++/* Required by struct dpni_attr::ext_cfg_iova */ ++#define DPAA2_EXT_CFG_SIZE 256 ++ ++extern const struct ethtool_ops dpaa2_ethtool_ops; ++ ++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); ++ ++static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) ++{ ++ if (!dpaa2_eth_hash_enabled(priv)) ++ return 1; ++ ++ return priv->dpni_ext_cfg.tc_cfg[0].max_dist; ++} ++ ++static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) ++{ ++ /* Ideally, we want a number of channels large enough ++ * to accommodate both the Rx distribution size ++ * and the max number of Tx confirmation queues ++ */ ++ return max_t(int, dpaa2_eth_queue_count(priv), ++ priv->dpni_attrs.max_senders); ++} ++ ++void check_fs_support(struct net_device *); ++ ++#endif /* __DPAA2_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +new file mode 100644 +index 0000000..fdab07f +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +@@ -0,0 +1,861 @@ ++/* Copyright 2014-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "dpni.h" /* DPNI_LINK_OPT_* */ ++#include "dpaa2-eth.h" ++ ++/* size of DMA memory used to pass configuration to classifier, in bytes */ ++#define DPAA2_CLASSIFIER_DMA_SIZE 256 ++ ++/* To be kept in sync with 'enum dpni_counter' */ ++char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { ++ "rx frames", ++ "rx bytes", ++ /* rx frames filtered/policed */ ++ "rx filtered frames", ++ /* rx frames dropped with errors */ ++ "rx discarded frames", ++ "rx mcast frames", ++ "rx mcast bytes", ++ "rx bcast frames", ++ "rx bcast bytes", ++ "tx frames", ++ "tx bytes", ++ /* tx frames dropped with errors */ ++ "tx discarded frames", ++}; ++ ++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) ++ ++/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ ++char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { ++ /* per-cpu stats */ ++ ++ "tx conf frames", ++ "tx conf bytes", ++ "tx sg frames", ++ "tx sg bytes", ++ "rx sg frames", ++ "rx sg bytes", ++ /* how many times we had to retry the enqueue command */ ++ "enqueue portal busy", ++ ++ /* Channel stats */ ++ /* How many times we had to retry the volatile dequeue command */ ++ "dequeue portal busy", ++ "channel pull errors", ++ /* Number of notifications received */ ++ "cdan", ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ /* FQ stats */ ++ "rx pending frames", ++ "rx pending bytes", ++ "tx conf pending frames", ++ "tx conf pending bytes", ++ "buffer count" ++#endif ++}; ++ ++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) ++ ++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, ++ struct ethtool_drvinfo *drvinfo) ++{ ++ struct mc_version mc_ver; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ char fw_version[ETHTOOL_FWVERS_LEN]; ++ char version[32]; ++ int err; ++ ++ err = mc_get_version(priv->mc_io, 0, &mc_ver); ++ if (err) { ++ strlcpy(drvinfo->fw_version, "Error retrieving MC version", ++ sizeof(drvinfo->fw_version)); ++ } else { ++ scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", ++ mc_ver.major, mc_ver.minor, mc_ver.revision); ++ strlcpy(drvinfo->fw_version, fw_version, ++ sizeof(drvinfo->fw_version)); ++ } ++ ++ scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, ++ DPNI_VER_MINOR); ++ strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); ++ ++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), ++ sizeof(drvinfo->bus_info)); ++} ++ ++static int dpaa2_eth_get_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpni_link_state state = {0}; ++ int err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ goto out; ++ } ++ ++ /* At the moment, we have no way of interrogating the DPMAC ++ * from the DPNI side - and for that matter there may exist ++ * no DPMAC at all. So for now we just don't report anything ++ * beyond the DPNI attributes. ++ */ ++ if (state.options & DPNI_LINK_OPT_AUTONEG) ++ cmd->autoneg = AUTONEG_ENABLE; ++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) ++ cmd->duplex = DUPLEX_FULL; ++ ethtool_cmd_speed_set(cmd, state.rate); ++ ++out: ++ return err; ++} ++ ++static int dpaa2_eth_set_settings(struct net_device *net_dev, ++ struct ethtool_cmd *cmd) ++{ ++ struct dpni_link_cfg cfg = {0}; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err = 0; ++ ++ netdev_dbg(net_dev, "Setting link parameters..."); ++ ++ /* Due to a temporary firmware limitation, the DPNI must be down ++ * in order to be able to change link settings. Taking steps to let ++ * the user know that. ++ */ ++ if (netif_running(net_dev)) { ++ netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); ++ return -EACCES; ++ } ++ ++ cfg.rate = ethtool_cmd_speed(cmd); ++ if (cmd->autoneg == AUTONEG_ENABLE) ++ cfg.options |= DPNI_LINK_OPT_AUTONEG; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; ++ if (cmd->duplex == DUPLEX_HALF) ++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) ++ /* ethtool will be loud enough if we return an error; no point ++ * in putting our own error message on the console by default ++ */ ++ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); ++ ++ return err; ++} ++ ++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, ++ u8 *data) ++{ ++ u8 *p = data; ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { ++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } ++ break; ++ } ++} ++ ++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ ++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++/** Fill in hardware counters, as returned by the MC firmware. ++ */ ++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ int i; /* Current index in the data array */ ++ int j, k, err; ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ u32 fcnt, bcnt; ++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; ++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; ++ u32 buf_cnt; ++#endif ++ u64 cdan = 0; ++ u64 portal_busy = 0, pull_err = 0; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpaa2_eth_drv_stats *extras; ++ struct dpaa2_eth_ch_stats *ch_stats; ++ ++ memset(data, 0, ++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); ++ ++ /* Print standard counters, from DPNI statistics */ ++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { ++ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i, ++ data + i); ++ if (err != 0) ++ netdev_warn(net_dev, "Err %d getting DPNI counter %d", ++ err, i); ++ } ++ ++ /* Print per-cpu extra stats */ ++ for_each_online_cpu(k) { ++ extras = per_cpu_ptr(priv->percpu_extras, k); ++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) ++ *((__u64 *)data + i + j) += *((__u64 *)extras + j); ++ } ++ i += j; ++ ++ /* We may be using fewer DPIOs than actual CPUs */ ++ for_each_cpu(j, &priv->dpio_cpumask) { ++ ch_stats = &priv->channel[j]->stats; ++ cdan += ch_stats->cdan; ++ portal_busy += ch_stats->dequeue_portal_busy; ++ pull_err += ch_stats->pull_err; ++ } ++ ++ *(data + i++) = portal_busy; ++ *(data + i++) = pull_err; ++ *(data + i++) = cdan; ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++ for (j = 0; j < priv->num_fqs; j++) { ++ /* Print FQ instantaneous counts */ ++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, ++ &fcnt, &bcnt); ++ if (err) { ++ netdev_warn(net_dev, "FQ query error %d", err); ++ return; ++ } ++ ++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { ++ fcnt_tx_total += fcnt; ++ bcnt_tx_total += bcnt; ++ } else { ++ fcnt_rx_total += fcnt; ++ bcnt_rx_total += bcnt; ++ } ++ } ++ *(data + i++) = fcnt_rx_total; ++ *(data + i++) = bcnt_rx_total; ++ *(data + i++) = fcnt_tx_total; ++ *(data + i++) = bcnt_tx_total; ++ ++ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt); ++ if (err) { ++ netdev_warn(net_dev, "Buffer count query error %d\n", err); ++ return; ++ } ++ *(data + i++) = buf_cnt; ++#endif ++} ++ ++static const struct dpaa2_eth_hash_fields { ++ u64 rxnfc_field; ++ enum net_prot cls_prot; ++ int cls_field; ++ int size; ++} hash_fields[] = { ++ { ++ /* L2 header */ ++ .rxnfc_field = RXH_L2DA, ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_DA, ++ .size = 6, ++ }, { ++ /* VLAN header */ ++ .rxnfc_field = RXH_VLAN, ++ .cls_prot = NET_PROT_VLAN, ++ .cls_field = NH_FLD_VLAN_TCI, ++ .size = 2, ++ }, { ++ /* IP header */ ++ .rxnfc_field = RXH_IP_SRC, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_SRC, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_IP_DST, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_DST, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_L3_PROTO, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_PROTO, ++ .size = 1, ++ }, { ++ /* Using UDP ports, this is functionally equivalent to raw ++ * byte pairs from L4 header. ++ */ ++ .rxnfc_field = RXH_L4_B_0_1, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_SRC, ++ .size = 2, ++ }, { ++ .rxnfc_field = RXH_L4_B_2_3, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_DST, ++ .size = 2, ++ }, ++}; ++ ++static int cls_is_enabled(struct net_device *net_dev, u64 flag) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ return !!(priv->rx_hash_fields & flag); ++} ++ ++static int cls_key_off(struct net_device *net_dev, u64 flag) ++{ ++ int i, off = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ if (hash_fields[i].rxnfc_field & flag) ++ return off; ++ if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) ++ off += hash_fields[i].size; ++ } ++ ++ return -1; ++} ++ ++static u8 cls_key_size(struct net_device *net_dev) ++{ ++ u8 i, size = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) ++ continue; ++ size += hash_fields[i].size; ++ } ++ ++ return size; ++} ++ ++static u8 cls_max_key_size(struct net_device *net_dev) ++{ ++ u8 i, size = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) ++ size += hash_fields[i].size; ++ ++ return size; ++} ++ ++void check_fs_support(struct net_device *net_dev) ++{ ++ u8 key_size = cls_max_key_size(net_dev); ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && ++ priv->dpni_attrs.max_dist_key_size < key_size) { ++ dev_err(&net_dev->dev, ++ "max_dist_key_size = %d, expected %d. Steering is disabled\n", ++ priv->dpni_attrs.max_dist_key_size, ++ key_size); ++ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; ++ } ++} ++ ++/* Set RX hash options ++ * flags is a combination of RXH_ bits ++ */ ++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) ++{ ++ struct device *dev = net_dev->dev.parent; ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpkg_profile_cfg cls_cfg; ++ struct dpni_rx_tc_dist_cfg dist_cfg; ++ u8 *dma_mem; ++ u64 enabled_flags = 0; ++ int i; ++ int err = 0; ++ ++ if (!dpaa2_eth_hash_enabled(priv)) { ++ dev_err(dev, "Hashing support is not enabled\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (flags & ~DPAA2_RXH_SUPPORTED) { ++ /* RXH_DISCARD is not supported */ ++ dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ ++ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { ++ struct dpkg_extract *key = ++ &cls_cfg.extracts[cls_cfg.num_extracts]; ++ ++ if (!(flags & hash_fields[i].rxnfc_field)) ++ continue; ++ ++ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { ++ dev_err(dev, "error adding key extraction rule, too many rules?\n"); ++ return -E2BIG; ++ } ++ ++ key->type = DPKG_EXTRACT_FROM_HDR; ++ key->extract.from_hdr.prot = hash_fields[i].cls_prot; ++ key->extract.from_hdr.type = DPKG_FULL_FIELD; ++ key->extract.from_hdr.field = hash_fields[i].cls_field; ++ cls_cfg.num_extracts++; ++ ++ enabled_flags |= hash_fields[i].rxnfc_field; ++ } ++ ++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_prepare_key_cfg error %d", err); ++ return err; ++ } ++ ++ memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ ++ /* Prepare for setting the rx dist */ ++ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ kfree(dma_mem); ++ return -ENOMEM; ++ } ++ ++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); ++ if (dpaa2_eth_fs_enabled(priv)) { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; ++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; ++ } else { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ kfree(dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); ++ return err; ++ } ++ ++ priv->rx_hash_fields = enabled_flags; ++ ++ return 0; ++} ++ ++static int prep_cls_rule(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ void *key) ++{ ++ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; ++ struct ethhdr *eth_h, *eth_m; ++ struct ethtool_flow_ext *ext_h, *ext_m; ++ const u8 key_size = cls_key_size(net_dev); ++ void *msk = key + key_size; ++ ++ memset(key, 0, key_size * 2); ++ ++ /* This code is a major mess, it has to be cleaned up after the ++ * classification mask issue is fixed and key format will be made static ++ */ ++ ++ switch (fs->flow_type & 0xff) { ++ case TCP_V4_FLOW: ++ l4ip4_h = &fs->h_u.tcp_ip4_spec; ++ l4ip4_m = &fs->m_u.tcp_ip4_spec; ++ /* TODO: ethertype to match IPv4 and protocol to match TCP */ ++ goto l4ip4; ++ ++ case UDP_V4_FLOW: ++ l4ip4_h = &fs->h_u.udp_ip4_spec; ++ l4ip4_m = &fs->m_u.udp_ip4_spec; ++ goto l4ip4; ++ ++ case SCTP_V4_FLOW: ++ l4ip4_h = &fs->h_u.sctp_ip4_spec; ++ l4ip4_m = &fs->m_u.sctp_ip4_spec; ++ ++l4ip4: ++ if (l4ip4_m->tos) { ++ netdev_err(net_dev, ++ "ToS is not supported for IPv4 L4\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { ++ netdev_err(net_dev, "IP SRC not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { ++ netdev_err(net_dev, "IP DST not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { ++ netdev_err(net_dev, "PSRC not supported, ignored\n"); ++ return -EOPNOTSUPP; ++ } ++ if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { ++ netdev_err(net_dev, "PDST not supported, ignored\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_IP_SRC)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) ++ = l4ip4_h->ip4src; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) ++ = l4ip4_m->ip4src; ++ } ++ if (cls_is_enabled(net_dev, RXH_IP_DST)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) ++ = l4ip4_h->ip4dst; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) ++ = l4ip4_m->ip4dst; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) ++ = l4ip4_h->psrc; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) ++ = l4ip4_m->psrc; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { ++ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) ++ = l4ip4_h->pdst; ++ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) ++ = l4ip4_m->pdst; ++ } ++ break; ++ ++ case ETHER_FLOW: ++ eth_h = &fs->h_u.ether_spec; ++ eth_m = &fs->m_u.ether_spec; ++ ++ if (eth_m->h_proto) { ++ netdev_err(net_dev, "Ethertype is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (!is_zero_ether_addr(eth_m->h_source)) { ++ netdev_err(net_dev, "ETH SRC is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (cls_is_enabled(net_dev, RXH_L2DA)) { ++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), ++ eth_h->h_dest); ++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), ++ eth_m->h_dest); ++ } else { ++ if (!is_zero_ether_addr(eth_m->h_dest)) { ++ netdev_err(net_dev, ++ "ETH DST is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ } ++ break; ++ ++ default: ++ /* TODO: IP user flow, AH, ESP */ ++ return -EOPNOTSUPP; ++ } ++ ++ if (fs->flow_type & FLOW_EXT) { ++ /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ ++ return -EOPNOTSUPP; ++ } ++ ++ if (fs->flow_type & FLOW_MAC_EXT) { ++ ext_h = &fs->h_ext; ++ ext_m = &fs->m_ext; ++ ++ if (cls_is_enabled(net_dev, RXH_L2DA)) { ++ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), ++ ext_h->h_dest); ++ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), ++ ext_m->h_dest); ++ } else { ++ if (!is_zero_ether_addr(ext_m->h_dest)) { ++ netdev_err(net_dev, ++ "ETH DST is not supported!\n"); ++ return -EOPNOTSUPP; ++ } ++ } ++ } ++ return 0; ++} ++ ++static int do_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs, ++ bool add) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; ++ struct dpni_rule_cfg rule_cfg; ++ void *dma_mem; ++ int err = 0; ++ ++ if (!dpaa2_eth_fs_enabled(priv)) { ++ netdev_err(net_dev, "dev does not support steering!\n"); ++ /* dev doesn't support steering */ ++ return -EOPNOTSUPP; ++ } ++ ++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && ++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || ++ fs->location >= rule_cnt) ++ return -EINVAL; ++ ++ memset(&rule_cfg, 0, sizeof(rule_cfg)); ++ rule_cfg.key_size = cls_key_size(net_dev); ++ ++ /* allocate twice the key size, for the actual key and for mask */ ++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = prep_cls_rule(net_dev, fs, dma_mem); ++ if (err) ++ goto err_free_mem; ++ ++ rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ rule_cfg.key_size * 2, ++ DMA_TO_DEVICE); ++ ++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; ++ ++ if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { ++ int i; ++ u8 *mask = dma_mem + rule_cfg.key_size; ++ ++ /* check that nothing is masked out, otherwise it won't work */ ++ for (i = 0; i < rule_cfg.key_size; i++) { ++ if (mask[i] == 0xff) ++ continue; ++ netdev_err(net_dev, "dev does not support masking!\n"); ++ err = -EOPNOTSUPP; ++ goto err_free_mem; ++ } ++ rule_cfg.mask_iova = 0; ++ } ++ ++ /* No way to control rule order in firmware */ ++ if (add) ++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, ++ &rule_cfg, (u16)fs->ring_cookie); ++ else ++ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, ++ &rule_cfg); ++ ++ dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, ++ rule_cfg.key_size * 2, DMA_TO_DEVICE); ++ if (err) { ++ netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); ++ goto err_free_mem; ++ } ++ ++ priv->cls_rule[fs->location].fs = *fs; ++ priv->cls_rule[fs->location].in_use = true; ++ ++err_free_mem: ++ kfree(dma_mem); ++ ++ return err; ++} ++ ++static int add_cls(struct net_device *net_dev, ++ struct ethtool_rx_flow_spec *fs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, fs, true); ++ if (err) ++ return err; ++ ++ priv->cls_rule[fs->location].in_use = true; ++ priv->cls_rule[fs->location].fs = *fs; ++ ++ return 0; ++} ++ ++static int del_cls(struct net_device *net_dev, int location) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int err; ++ ++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); ++ if (err) ++ return err; ++ ++ priv->cls_rule[location].in_use = false; ++ ++ return 0; ++} ++ ++static void clear_cls(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int i, err; ++ ++ for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { ++ if (!priv->cls_rule[i].in_use) ++ continue; ++ ++ err = del_cls(net_dev, i); ++ if (err) ++ netdev_warn(net_dev, ++ "err trying to delete classification entry %d\n", ++ i); ++ } ++} ++ ++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc) ++{ ++ int err = 0; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_SRXFH: ++ /* first off clear ALL classification rules, chaging key ++ * composition will break them anyway ++ */ ++ clear_cls(net_dev); ++ /* we purposely ignore cmd->flow_type for now, because the ++ * classifier only supports a single set of fields for all ++ * protocols ++ */ ++ err = dpaa2_eth_set_hash(net_dev, rxnfc->data); ++ break; ++ case ETHTOOL_SRXCLSRLINS: ++ err = add_cls(net_dev, &rxnfc->fs); ++ break; ++ ++ case ETHTOOL_SRXCLSRLDEL: ++ err = del_cls(net_dev, rxnfc->fs.location); ++ break; ++ ++ default: ++ err = -EOPNOTSUPP; ++ } ++ ++ return err; ++} ++ ++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, ++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; ++ int i, j; ++ ++ switch (rxnfc->cmd) { ++ case ETHTOOL_GRXFH: ++ /* we purposely ignore cmd->flow_type for now, because the ++ * classifier only supports a single set of fields for all ++ * protocols ++ */ ++ rxnfc->data = priv->rx_hash_fields; ++ break; ++ ++ case ETHTOOL_GRXRINGS: ++ rxnfc->data = dpaa2_eth_queue_count(priv); ++ break; ++ ++ case ETHTOOL_GRXCLSRLCNT: ++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) ++ if (priv->cls_rule[i].in_use) ++ rxnfc->rule_cnt++; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ case ETHTOOL_GRXCLSRULE: ++ if (!priv->cls_rule[rxnfc->fs.location].in_use) ++ return -EINVAL; ++ ++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; ++ break; ++ ++ case ETHTOOL_GRXCLSRLALL: ++ for (i = 0, j = 0; i < rule_cnt; i++) { ++ if (!priv->cls_rule[i].in_use) ++ continue; ++ if (j == rxnfc->rule_cnt) ++ return -EMSGSIZE; ++ rule_locs[j++] = i; ++ } ++ rxnfc->rule_cnt = j; ++ rxnfc->data = rule_cnt; ++ break; ++ ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++const struct ethtool_ops dpaa2_ethtool_ops = { ++ .get_drvinfo = dpaa2_eth_get_drvinfo, ++ .get_link = ethtool_op_get_link, ++ .get_settings = dpaa2_eth_get_settings, ++ .set_settings = dpaa2_eth_set_settings, ++ .get_sset_count = dpaa2_eth_get_sset_count, ++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, ++ .get_strings = dpaa2_eth_get_strings, ++ .get_rxnfc = dpaa2_eth_get_rxnfc, ++ .set_rxnfc = dpaa2_eth_set_rxnfc, ++}; +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +new file mode 100644 +index 0000000..92ec12b +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h +@@ -0,0 +1,175 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPKG_H_ ++#define __FSL_DPKG_H_ ++ ++#include ++#include "../../fsl-mc/include/net.h" ++ ++/* Data Path Key Generator API ++ * Contains initialization APIs and runtime APIs for the Key Generator ++ */ ++ ++/** Key Generator properties */ ++ ++/** ++ * Number of masks per key extraction ++ */ ++#define DPKG_NUM_OF_MASKS 4 ++/** ++ * Number of extractions per key profile ++ */ ++#define DPKG_MAX_NUM_OF_EXTRACTS 10 ++ ++/** ++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types ++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset ++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field ++ * @DPKG_FULL_FIELD: Extract a full field ++ */ ++enum dpkg_extract_from_hdr_type { ++ DPKG_FROM_HDR = 0, ++ DPKG_FROM_FIELD = 1, ++ DPKG_FULL_FIELD = 2 ++}; ++ ++/** ++ * enum dpkg_extract_type - Enumeration for selecting extraction type ++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header ++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header ++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; ++ * e.g. can be used to extract header existence; ++ * please refer to 'Parse Result definition' section in the parser BG ++ */ ++enum dpkg_extract_type { ++ DPKG_EXTRACT_FROM_HDR = 0, ++ DPKG_EXTRACT_FROM_DATA = 1, ++ DPKG_EXTRACT_FROM_PARSE = 3 ++}; ++ ++/** ++ * struct dpkg_mask - A structure for defining a single extraction mask ++ * @mask: Byte mask for the extracted content ++ * @offset: Offset within the extracted content ++ */ ++struct dpkg_mask { ++ uint8_t mask; ++ uint8_t offset; ++}; ++ ++/** ++ * struct dpkg_extract - A structure for defining a single extraction ++ * @type: Determines how the union below is interpreted: ++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; ++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; ++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' ++ * @extract: Selects extraction method ++ * @num_of_byte_masks: Defines the number of valid entries in the array below; ++ * This is also the number of bytes to be used as masks ++ * @masks: Masks parameters ++ */ ++struct dpkg_extract { ++ enum dpkg_extract_type type; ++ /** ++ * union extract - Selects extraction method ++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' ++ */ ++ union { ++ /** ++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' ++ * @prot: Any of the supported headers ++ * @type: Defines the type of header extraction: ++ * DPKG_FROM_HDR: use size & offset below; ++ * DPKG_FROM_FIELD: use field, size and offset below; ++ * DPKG_FULL_FIELD: use field below ++ * @field: One of the supported fields (NH_FLD_) ++ * ++ * @size: Size in bytes ++ * @offset: Byte offset ++ * @hdr_index: Clear for cases not listed below; ++ * Used for protocols that may have more than a single ++ * header, 0 indicates an outer header; ++ * Supported protocols (possible values): ++ * NET_PROT_VLAN (0, HDR_INDEX_LAST); ++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); ++ * NET_PROT_IP(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv4(0, HDR_INDEX_LAST); ++ * NET_PROT_IPv6(0, HDR_INDEX_LAST); ++ */ ++ ++ struct { ++ enum net_prot prot; ++ enum dpkg_extract_from_hdr_type type; ++ uint32_t field; ++ uint8_t size; ++ uint8_t offset; ++ uint8_t hdr_index; ++ } from_hdr; ++ /** ++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ uint8_t size; ++ uint8_t offset; ++ } from_data; ++ ++ /** ++ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' ++ * @size: Size in bytes ++ * @offset: Byte offset ++ */ ++ struct { ++ uint8_t size; ++ uint8_t offset; ++ } from_parse; ++ } extract; ++ ++ uint8_t num_of_byte_masks; ++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; ++}; ++ ++/** ++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation ++ * profile (rule) ++ * @num_extracts: Defines the number of valid entries in the array below ++ * @extracts: Array of required extractions ++ */ ++struct dpkg_profile_cfg { ++ uint8_t num_extracts; ++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; ++}; ++ ++#endif /* __FSL_DPKG_H_ */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +new file mode 100644 +index 0000000..c0f8af0 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h +@@ -0,0 +1,1058 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPNI_CMD_H ++#define _FSL_DPNI_CMD_H ++ ++/* DPNI Version */ ++#define DPNI_VER_MAJOR 6 ++#define DPNI_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPNI_CMDID_OPEN 0x801 ++#define DPNI_CMDID_CLOSE 0x800 ++#define DPNI_CMDID_CREATE 0x901 ++#define DPNI_CMDID_DESTROY 0x900 ++ ++#define DPNI_CMDID_ENABLE 0x002 ++#define DPNI_CMDID_DISABLE 0x003 ++#define DPNI_CMDID_GET_ATTR 0x004 ++#define DPNI_CMDID_RESET 0x005 ++#define DPNI_CMDID_IS_ENABLED 0x006 ++ ++#define DPNI_CMDID_SET_IRQ 0x010 ++#define DPNI_CMDID_GET_IRQ 0x011 ++#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPNI_CMDID_SET_IRQ_MASK 0x014 ++#define DPNI_CMDID_GET_IRQ_MASK 0x015 ++#define DPNI_CMDID_GET_IRQ_STATUS 0x016 ++#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPNI_CMDID_SET_POOLS 0x200 ++#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 ++#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 ++#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 ++#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 ++#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 ++#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 ++#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 ++#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 ++#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 ++#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A ++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B ++#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C ++ ++#define DPNI_CMDID_GET_QDID 0x210 ++#define DPNI_CMDID_GET_SP_INFO 0x211 ++#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 ++#define DPNI_CMDID_GET_COUNTER 0x213 ++#define DPNI_CMDID_SET_COUNTER 0x214 ++#define DPNI_CMDID_GET_LINK_STATE 0x215 ++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 ++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 ++#define DPNI_CMDID_SET_MTU 0x218 ++#define DPNI_CMDID_GET_MTU 0x219 ++#define DPNI_CMDID_SET_LINK_CFG 0x21A ++#define DPNI_CMDID_SET_TX_SHAPING 0x21B ++ ++#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 ++#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 ++#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 ++#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 ++#define DPNI_CMDID_SET_PRIM_MAC 0x224 ++#define DPNI_CMDID_GET_PRIM_MAC 0x225 ++#define DPNI_CMDID_ADD_MAC_ADDR 0x226 ++#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 ++#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 ++ ++#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 ++#define DPNI_CMDID_ADD_VLAN_ID 0x231 ++#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 ++#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 ++ ++#define DPNI_CMDID_SET_RX_TC_DIST 0x235 ++#define DPNI_CMDID_SET_TX_FLOW 0x236 ++#define DPNI_CMDID_GET_TX_FLOW 0x237 ++#define DPNI_CMDID_SET_RX_FLOW 0x238 ++#define DPNI_CMDID_GET_RX_FLOW 0x239 ++#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A ++#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B ++ ++#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E ++#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F ++ ++#define DPNI_CMDID_SET_QOS_TBL 0x240 ++#define DPNI_CMDID_ADD_QOS_ENT 0x241 ++#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 ++#define DPNI_CMDID_CLR_QOS_TBL 0x243 ++#define DPNI_CMDID_ADD_FS_ENT 0x244 ++#define DPNI_CMDID_REMOVE_FS_ENT 0x245 ++#define DPNI_CMDID_CLR_FS_ENT 0x246 ++#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 ++#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 ++#define DPNI_CMDID_SET_IPR 0x249 ++#define DPNI_CMDID_SET_IPF 0x24A ++ ++#define DPNI_CMDID_SET_TX_SELECTION 0x250 ++#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 ++#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 ++#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 ++#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 ++#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 ++#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 ++#define DPNI_CMDID_SET_TX_CONF 0x257 ++#define DPNI_CMDID_GET_TX_CONF 0x258 ++#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 ++#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A ++#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B ++#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_OPEN(cmd, dpni_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) ++ ++#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ ++do { \ ++ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ ++ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ ++ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ ++ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ ++ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ ++ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ ++ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ ++ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ ++ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ ++ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ ++ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ ++ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ ++ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ ++ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ ++ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ ++ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ ++ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv4); \ ++ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv6); \ ++ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ ++ cfg->ipr_cfg.max_reass_frm_size); \ ++ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv4); \ ++ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv6); \ ++} while (0) ++ ++#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ ++do { \ ++ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ ++ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ ++ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ ++ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ ++ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ ++ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ ++ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ ++ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ ++ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ ++ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ ++ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ ++ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ ++ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ ++ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ ++ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ ++ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ ++ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv4); \ ++ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.max_open_frames_ipv6); \ ++ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ ++ cfg->ipr_cfg.max_reass_frm_size); \ ++ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv4); \ ++ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ ++ cfg->ipr_cfg.min_frag_size_ipv6); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ ++ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ ++ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ ++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ ++ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ ++ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ ++ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ ++ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ ++ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ ++ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ ++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_POOLS(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ ++ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ ++ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ ++ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ ++ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ ++ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ ++ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ ++ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ ++ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ ++ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ ++ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ ++ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_ATTR(cmd, attr) \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ ++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ ++ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ ++ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ ++ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ ++ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ ++ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ ++ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ ++ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ ++ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ ++ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ ++ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ ++ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ ++ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ ++ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ ++ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ ++ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ ++ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_QDID(cmd, qdid) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_COUNTER(cmd, counter) \ ++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_COUNTER(cmd, value) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ ++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MTU(cmd, mtu) \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MTU(cmd, mtu) \ ++ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ ++ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ ++ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ ++ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ ++ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[0].mode); \ ++ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[1].mode); \ ++ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[2].mode); \ ++ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[3].mode); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[4].mode); \ ++ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[5].mode); \ ++ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[6].mode); \ ++ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ ++ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ ++ cfg->tc_sched[7].mode); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ ++ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ ++ cfg->fs_cfg.miss_action); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ ++ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ ++ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ ++ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ ++ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ ++ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.frame_data_size);\ ++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.flow_context_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ ++ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ ++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.frame_data_size);\ ++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.flow_context_size);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.frame_data_size);\ ++ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ cfg->flc_cfg.flow_context_size);\ ++ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ ++ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.frame_data_size);\ ++ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ ++ attr->flc_cfg.flow_context_size);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ ++ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ ++ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ ++ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IPR(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_IPF(cmd, en) \ ++ MC_CMD_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ ++ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ ++ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ ++ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ ++ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_PREP_EARLY_DROP(ext, cfg) \ ++do { \ ++ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ ++ MC_PREP_OP(ext, 0, 2, 2, \ ++ enum dpni_congestion_unit, cfg->units); \ ++ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ ++ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ ++ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ ++ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ ++ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ ++ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ ++ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ ++ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ ++ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_EXT_EARLY_DROP(ext, cfg) \ ++do { \ ++ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ ++ MC_EXT_OP(ext, 0, 2, 2, \ ++ enum dpni_congestion_unit, cfg->units); \ ++ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ ++ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ ++ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ ++ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ ++ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ ++ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ ++ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ ++ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ ++ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ ++ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) ++ ++#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) ++ ++#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ ++ cfg->queue_cfg.dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ ++ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ ++ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ ++ cfg->queue_cfg.tail_drop_threshold); \ ++ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ ++ cfg->queue_cfg.flc_cfg.flc_type); \ ++ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ ++ cfg->queue_cfg.flc_cfg.frame_data_size); \ ++ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ ++ cfg->queue_cfg.flc_cfg.flow_context_size); \ ++ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ ++ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ ++ cfg->queue_cfg.flc_cfg.flow_context); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ ++ attr->queue_attr.dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ ++ attr->queue_attr.dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ ++ MC_RSP_OP(cmd, 0, 46, 1, int, \ ++ attr->queue_attr.order_preservation_en); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ ++ attr->queue_attr.tail_drop_threshold); \ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ ++ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ ++ attr->queue_attr.flc_cfg.flc_type); \ ++ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ ++ attr->queue_attr.flc_cfg.frame_data_size); \ ++ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ ++ attr->queue_attr.flc_cfg.flow_context_size); \ ++ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ ++ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ ++ attr->queue_attr.flc_cfg.flow_context); \ ++} while (0) ++ ++#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ ++ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) ++ ++#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ ++ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ ++ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ ++ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ ++ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ ++ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ ++ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ ++} while (0) ++ ++#endif /* _FSL_DPNI_CMD_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c +new file mode 100644 +index 0000000..c228ce5 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c +@@ -0,0 +1,1907 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpni.h" ++#include "dpni-cmd.h" ++ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, ++ uint8_t *key_cfg_buf) ++{ ++ int i, j; ++ int offset = 0; ++ int param = 1; ++ uint64_t *params = (uint64_t *)key_cfg_buf; ++ ++ if (!key_cfg_buf || !cfg) ++ return -EINVAL; ++ ++ params[0] |= mc_enc(0, 8, cfg->num_extracts); ++ params[0] = cpu_to_le64(params[0]); ++ ++ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) ++ return -EINVAL; ++ ++ for (i = 0; i < cfg->num_extracts; i++) { ++ switch (cfg->extracts[i].type) { ++ case DPKG_EXTRACT_FROM_HDR: ++ params[param] |= mc_enc(0, 8, ++ cfg->extracts[i].extract.from_hdr.prot); ++ params[param] |= mc_enc(8, 4, ++ cfg->extracts[i].extract.from_hdr.type); ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract.from_hdr.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_hdr.offset); ++ params[param] |= mc_enc(32, 32, ++ cfg->extracts[i].extract. ++ from_hdr.field); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ params[param] |= mc_enc(0, 8, ++ cfg->extracts[i].extract. ++ from_hdr.hdr_index); ++ break; ++ case DPKG_EXTRACT_FROM_DATA: ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract. ++ from_data.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_data.offset); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ break; ++ case DPKG_EXTRACT_FROM_PARSE: ++ params[param] |= mc_enc(16, 8, ++ cfg->extracts[i].extract. ++ from_parse.size); ++ params[param] |= mc_enc(24, 8, ++ cfg->extracts[i].extract. ++ from_parse.offset); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ break; ++ default: ++ return -EINVAL; ++ } ++ params[param] |= mc_enc( ++ 24, 8, cfg->extracts[i].num_of_byte_masks); ++ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ for (offset = 0, j = 0; ++ j < DPKG_NUM_OF_MASKS; ++ offset += 16, j++) { ++ params[param] |= mc_enc( ++ (offset), 8, cfg->extracts[i].masks[j].mask); ++ params[param] |= mc_enc( ++ (offset + 8), 8, ++ cfg->extracts[i].masks[j].offset); ++ } ++ params[param] = cpu_to_le64(params[param]); ++ param++; ++ } ++ return 0; ++} ++ ++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, ++ uint8_t *ext_cfg_buf) ++{ ++ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; ++ ++ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); ++ ++ return 0; ++} ++ ++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, ++ const uint8_t *ext_cfg_buf) ++{ ++ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; ++ ++ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); ++ ++ return 0; ++} ++ ++int dpni_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpni_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPNI_CMD_OPEN(cmd, dpni_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpni_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpni_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPNI_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpni_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_pools_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_POOLS(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpni_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpni_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_ATTR(cmd, attr); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_error_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *qdid) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_QDID(cmd, *qdid); ++ ++ return 0; ++} ++ ++int dpni_get_sp_info(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_sp_info *sp_info) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_SP_INFO(cmd, sp_info); ++ ++ return 0; ++} ++ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *data_offset) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); ++ ++ return 0; ++} ++ ++int dpni_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t *value) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_COUNTER(cmd, counter); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_COUNTER(cmd, *value); ++ ++ return 0; ++} ++ ++int dpni_set_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t value) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_COUNTER(cmd, counter, value); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_LINK_CFG(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_link_state *state) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_LINK_STATE(cmd, state); ++ ++ return 0; ++} ++ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_shaping_cfg *tx_shaper) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *max_frame_length) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); ++ ++ return 0; ++} ++ ++int dpni_set_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t mtu) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MTU(cmd, mtu); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *mtu) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MTU(cmd, *mtu); ++ ++ return 0; ++} ++ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); ++ ++ return 0; ++} ++ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); ++ ++ return 0; ++} ++ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int unicast, ++ int multicast) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_selection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_selection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *flow_id, ++ const struct dpni_tx_flow_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); ++ ++ return 0; ++} ++ ++int dpni_get_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_flow_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_TX_FLOW(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ const struct dpni_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ struct dpni_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_FLOW(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_queue_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_queue_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int revoke) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_qos_tbl_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg, ++ uint8_t tc_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg, ++ uint16_t flow_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, ++ cmd_flags, ++ token); ++ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, ++ cmd_flags, token); ++ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, ++ cmd_flags, token); ++ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_ipr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IPR(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_ipf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_IPF(cmd, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_policing_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_rx_tc_policing_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); ++ ++ return 0; ++} ++ ++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, ++ uint8_t *early_drop_buf) ++{ ++ uint64_t *ext_params = (uint64_t *)early_drop_buf; ++ ++ DPNI_PREP_EARLY_DROP(ext_params, cfg); ++} ++ ++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, ++ const uint8_t *early_drop_buf) ++{ ++ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; ++ ++ DPNI_EXT_EARLY_DROP(ext_params, cfg); ++} ++ ++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_tx_conf_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_conf_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_CONF(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_CONF(cmd, attr); ++ ++ return 0; ++} ++ ++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h +new file mode 100644 +index 0000000..fca426d +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h +@@ -0,0 +1,2581 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPNI_H ++#define __FSL_DPNI_H ++ ++#include "dpkg.h" ++ ++struct fsl_mc_io; ++ ++/** ++ * Data Path Network Interface API ++ * Contains initialization APIs and runtime control APIs for DPNI ++ */ ++ ++/** General DPNI macros */ ++ ++/** ++ * Maximum number of traffic classes ++ */ ++#define DPNI_MAX_TC 8 ++/** ++ * Maximum number of buffer pools per DPNI ++ */ ++#define DPNI_MAX_DPBP 8 ++/** ++ * Maximum number of storage-profiles per DPNI ++ */ ++#define DPNI_MAX_SP 2 ++ ++/** ++ * All traffic classes considered; see dpni_set_rx_flow() ++ */ ++#define DPNI_ALL_TCS (uint8_t)(-1) ++/** ++ * All flows within traffic class considered; see dpni_set_rx_flow() ++ */ ++#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) ++/** ++ * Generate new flow ID; see dpni_set_tx_flow() ++ */ ++#define DPNI_NEW_FLOW_ID (uint16_t)(-1) ++/* use for common tx-conf queue; see dpni_set_tx_conf_() */ ++#define DPNI_COMMON_TX_CONF (uint16_t)(-1) ++ ++/** ++ * dpni_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpni_id: DPNI unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpni_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpni_id, ++ uint16_t *token); ++ ++/** ++ * dpni_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/* DPNI configuration options */ ++ ++/** ++ * Allow different distribution key profiles for different traffic classes; ++ * if not set, a single key profile is assumed ++ */ ++#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 ++ ++/** ++ * Disable all non-error transmit confirmation; error frames are reported ++ * back to a common Tx error queue ++ */ ++#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 ++ ++/** ++ * Disable per-sender private Tx confirmation/error queue ++ */ ++#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 ++ ++/** ++ * Support distribution based on hashed key; ++ * allows statistical distribution over receive queues in a traffic class ++ */ ++#define DPNI_OPT_DIST_HASH 0x00000010 ++ ++/** ++ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are ++ * '0' then backward compatibility is preserved; ++ * Support distribution based on flow steering; ++ * allows explicit control of distribution over receive queues in a traffic ++ * class ++ */ ++#define DPNI_OPT_DIST_FS 0x00000020 ++ ++/** ++ * Unicast filtering support ++ */ ++#define DPNI_OPT_UNICAST_FILTER 0x00000080 ++/** ++ * Multicast filtering support ++ */ ++#define DPNI_OPT_MULTICAST_FILTER 0x00000100 ++/** ++ * VLAN filtering support ++ */ ++#define DPNI_OPT_VLAN_FILTER 0x00000200 ++/** ++ * Support IP reassembly on received packets ++ */ ++#define DPNI_OPT_IPR 0x00000800 ++/** ++ * Support IP fragmentation on transmitted packets ++ */ ++#define DPNI_OPT_IPF 0x00001000 ++/** ++ * VLAN manipulation support ++ */ ++#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 ++/** ++ * Support masking of QoS lookup keys ++ */ ++#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 ++/** ++ * Support masking of Flow Steering lookup keys ++ */ ++#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 ++ ++/** ++ * struct dpni_extended_cfg - Structure representing extended DPNI configuration ++ * @tc_cfg: TCs configuration ++ * @ipr_cfg: IP reassembly configuration ++ */ ++struct dpni_extended_cfg { ++ /** ++ * struct tc_cfg - TC configuration ++ * @max_dist: Maximum distribution size for Rx traffic class; ++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, ++ * 112,128,192,224,256,384,448,512,768,896,1024; ++ * value '0' will be treated as '1'. ++ * other unsupported values will be round down to the nearest ++ * supported value. ++ * @max_fs_entries: Maximum FS entries for Rx traffic class; ++ * '0' means no support for this TC; ++ */ ++ struct { ++ uint16_t max_dist; ++ uint16_t max_fs_entries; ++ } tc_cfg[DPNI_MAX_TC]; ++ /** ++ * struct ipr_cfg - Structure representing IP reassembly configuration ++ * @max_reass_frm_size: Maximum size of the reassembled frame ++ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments ++ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments ++ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly ++ * process ++ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly ++ * process ++ */ ++ struct { ++ uint16_t max_reass_frm_size; ++ uint16_t min_frag_size_ipv4; ++ uint16_t min_frag_size_ipv6; ++ uint16_t max_open_frames_ipv4; ++ uint16_t max_open_frames_ipv6; ++ } ipr_cfg; ++}; ++ ++/** ++ * dpni_prepare_extended_cfg() - function prepare extended parameters ++ * @cfg: extended structure ++ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before dpni_create() ++ */ ++int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, ++ uint8_t *ext_cfg_buf); ++ ++/** ++ * struct dpni_cfg - Structure representing DPNI configuration ++ * @mac_addr: Primary MAC address ++ * @adv: Advanced parameters; default is all zeros; ++ * use this structure to change default settings ++ */ ++struct dpni_cfg { ++ uint8_t mac_addr[6]; ++ /** ++ * struct adv - Advanced parameters ++ * @options: Mask of available options; use 'DPNI_OPT_' values ++ * @start_hdr: Selects the packet starting header for parsing; ++ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' ++ * @max_senders: Maximum number of different senders; used as the number ++ * of dedicated Tx flows; Non-power-of-2 values are rounded ++ * up to the next power-of-2 value as hardware demands it; ++ * '0' will be treated as '1' ++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); ++ * '0' will e treated as '1' ++ * @max_unicast_filters: Maximum number of unicast filters; ++ * '0' is treated as '16' ++ * @max_multicast_filters: Maximum number of multicast filters; ++ * '0' is treated as '64' ++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in ++ * the QoS table; '0' is treated as '64' ++ * @max_qos_key_size: Maximum key size for the QoS look-up; ++ * '0' is treated as '24' which is enough for IPv4 ++ * 5-tuple ++ * @max_dist_key_size: Maximum key size for the distribution; ++ * '0' is treated as '24' which is enough for IPv4 5-tuple ++ * @max_policers: Maximum number of policers; ++ * should be between '0' and max_tcs ++ * @max_congestion_ctrl: Maximum number of congestion control groups ++ * (CGs); covers early drop and congestion notification ++ * requirements; ++ * should be between '0' and ('max_tcs' + 'max_senders') ++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory ++ * filled with the extended configuration by calling ++ * dpni_prepare_extended_cfg() ++ */ ++ struct { ++ uint32_t options; ++ enum net_prot start_hdr; ++ uint8_t max_senders; ++ uint8_t max_tcs; ++ uint8_t max_unicast_filters; ++ uint8_t max_multicast_filters; ++ uint8_t max_vlan_filters; ++ uint8_t max_qos_entries; ++ uint8_t max_qos_key_size; ++ uint8_t max_dist_key_size; ++ uint8_t max_policers; ++ uint8_t max_congestion_ctrl; ++ uint64_t ext_cfg_iova; ++ } adv; ++}; ++ ++/** ++ * dpni_create() - Create the DPNI object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPNI object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpni_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpni_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpni_destroy() - Destroy the DPNI object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpni_pools_cfg - Structure representing buffer pools configuration ++ * @num_dpbp: Number of DPBPs ++ * @pools: Array of buffer pools parameters; The number of valid entries ++ * must match 'num_dpbp' value ++ */ ++struct dpni_pools_cfg { ++ uint8_t num_dpbp; ++ /** ++ * struct pools - Buffer pools parameters ++ * @dpbp_id: DPBP object ID ++ * @buffer_size: Buffer size ++ * @backup_pool: Backup pool ++ */ ++ struct { ++ int dpbp_id; ++ uint16_t buffer_size; ++ int backup_pool; ++ } pools[DPNI_MAX_DPBP]; ++}; ++ ++/** ++ * dpni_set_pools() - Set buffer pools configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Buffer pools configuration ++ * ++ * mandatory for DPNI operation ++ * warning:Allowed only when DPNI is disabled ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_pools(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_pools_cfg *cfg); ++ ++/** ++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_is_enabled() - Check if the DPNI is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_reset() - Reset the DPNI, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * DPNI IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPNI_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 ++ ++/** ++ * struct dpni_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpni_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpni_irq_cfg *irq_cfg); ++ ++/** ++ * dpni_get_irq() - Get IRQ information from the DPNI. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpni_irq_cfg *irq_cfg); ++ ++/** ++ * dpni_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state: - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpni_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpni_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpni_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpni_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpni_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpni_attr - Structure representing DPNI attributes ++ * @id: DPNI object ID ++ * @version: DPNI version ++ * @start_hdr: Indicates the packet starting header for parsing ++ * @options: Mask of available options; reflects the value as was given in ++ * object's creation ++ * @max_senders: Maximum number of different senders; used as the number ++ * of dedicated Tx flows; ++ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) ++ * @max_unicast_filters: Maximum number of unicast filters ++ * @max_multicast_filters: Maximum number of multicast filters ++ * @max_vlan_filters: Maximum number of VLAN filters ++ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table ++ * @max_qos_key_size: Maximum key size for the QoS look-up ++ * @max_dist_key_size: Maximum key size for the distribution look-up ++ * @max_policers: Maximum number of policers; ++ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); ++ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; ++ * call dpni_extract_extended_cfg() to extract the extended configuration ++ */ ++struct dpni_attr { ++ int id; ++ /** ++ * struct version - DPNI version ++ * @major: DPNI major version ++ * @minor: DPNI minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ enum net_prot start_hdr; ++ uint32_t options; ++ uint8_t max_senders; ++ uint8_t max_tcs; ++ uint8_t max_unicast_filters; ++ uint8_t max_multicast_filters; ++ uint8_t max_vlan_filters; ++ uint8_t max_qos_entries; ++ uint8_t max_qos_key_size; ++ uint8_t max_dist_key_size; ++ uint8_t max_policers; ++ uint8_t max_congestion_ctrl; ++ uint64_t ext_cfg_iova; ++}; ++ ++/** ++ * dpni_get_attributes() - Retrieve DPNI attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_attr *attr); ++ ++/** ++ * dpni_extract_extended_cfg() - extract the extended parameters ++ * @cfg: extended structure ++ * @ext_cfg_buf: 256 bytes of DMA-able memory ++ * ++ * This function has to be called after dpni_get_attributes() ++ */ ++int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, ++ const uint8_t *ext_cfg_buf); ++ ++/** ++ * DPNI errors ++ */ ++ ++/** ++ * Extract out of frame header error ++ */ ++#define DPNI_ERROR_EOFHE 0x00020000 ++/** ++ * Frame length error ++ */ ++#define DPNI_ERROR_FLE 0x00002000 ++/** ++ * Frame physical error ++ */ ++#define DPNI_ERROR_FPE 0x00001000 ++/** ++ * Parsing header error ++ */ ++#define DPNI_ERROR_PHE 0x00000020 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L3CE 0x00000004 ++/** ++ * Parser L3 checksum error ++ */ ++#define DPNI_ERROR_L4CE 0x00000001 ++ ++/** ++ * enum dpni_error_action - Defines DPNI behavior for errors ++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame ++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow ++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue ++ */ ++enum dpni_error_action { ++ DPNI_ERROR_ACTION_DISCARD = 0, ++ DPNI_ERROR_ACTION_CONTINUE = 1, ++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 ++}; ++ ++/** ++ * struct dpni_error_cfg - Structure representing DPNI errors treatment ++ * @errors: Errors mask; use 'DPNI_ERROR__ ++ * @error_action: The desired action for the errors mask ++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation ++ * status (FAS); relevant only for the non-discard action ++ */ ++struct dpni_error_cfg { ++ uint32_t errors; ++ enum dpni_error_action error_action; ++ int set_frame_annotation; ++}; ++ ++/** ++ * dpni_set_errors_behavior() - Set errors behavior ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Errors configuration ++ * ++ * this function may be called numerous times with different ++ * error masks ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_error_cfg *cfg); ++ ++/** ++ * DPNI buffer layout modification options ++ */ ++ ++/** ++ * Select to modify the time-stamp setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 ++/** ++ * Select to modify the parser-result setting; not applicable for Tx ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 ++/** ++ * Select to modify the frame-status setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 ++/** ++ * Select to modify the private-data-size setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 ++/** ++ * Select to modify the data-alignment setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 ++/** ++ * Select to modify the data-head-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 ++/** ++ * Select to modify the data-tail-room setting ++ */ ++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 ++ ++/** ++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout ++ * @options: Flags representing the suggested modifications to the buffer ++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags ++ * @pass_timestamp: Pass timestamp value ++ * @pass_parser_result: Pass parser results ++ * @pass_frame_status: Pass frame status ++ * @private_data_size: Size kept for private data (in bytes) ++ * @data_align: Data alignment ++ * @data_head_room: Data head room ++ * @data_tail_room: Data tail room ++ */ ++struct dpni_buffer_layout { ++ uint32_t options; ++ int pass_timestamp; ++ int pass_parser_result; ++ int pass_frame_status; ++ uint16_t private_data_size; ++ uint16_t data_align; ++ uint16_t data_head_room; ++ uint16_t data_tail_room; ++}; ++ ++/** ++ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout ++ * attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Returns buffer layout attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout ++ * configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @layout: Buffer layout configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Allowed only when DPNI is disabled ++ */ ++int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_buffer_layout *layout); ++ ++/** ++ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used ++ * for enqueue operations ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qdid: Returned virtual QDID value that should be used as an argument ++ * in all enqueue operations ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_qdid(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *qdid); ++ ++/** ++ * struct dpni_sp_info - Structure representing DPNI storage-profile information ++ * (relevant only for DPNI owned by AIOP) ++ * @spids: array of storage-profiles ++ */ ++struct dpni_sp_info { ++ uint16_t spids[DPNI_MAX_SP]; ++}; ++ ++/** ++ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @sp_info: Returned AIOP storage-profile information ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Only relevant for DPNI that belongs to AIOP container. ++ */ ++int dpni_get_sp_info(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_sp_info *sp_info); ++ ++/** ++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @data_offset: Tx data offset (from start of buffer) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *data_offset); ++ ++/** ++ * enum dpni_counter - DPNI counter types ++ * @DPNI_CNT_ING_FRAME: Counts ingress frames ++ * @DPNI_CNT_ING_BYTE: Counts ingress bytes ++ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit ++ * 'drop' setting ++ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors ++ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames ++ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes ++ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames ++ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes ++ * @DPNI_CNT_EGR_FRAME: Counts egress frames ++ * @DPNI_CNT_EGR_BYTE: Counts egress bytes ++ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors ++ */ ++enum dpni_counter { ++ DPNI_CNT_ING_FRAME = 0x0, ++ DPNI_CNT_ING_BYTE = 0x1, ++ DPNI_CNT_ING_FRAME_DROP = 0x2, ++ DPNI_CNT_ING_FRAME_DISCARD = 0x3, ++ DPNI_CNT_ING_MCAST_FRAME = 0x4, ++ DPNI_CNT_ING_MCAST_BYTE = 0x5, ++ DPNI_CNT_ING_BCAST_FRAME = 0x6, ++ DPNI_CNT_ING_BCAST_BYTES = 0x7, ++ DPNI_CNT_EGR_FRAME = 0x8, ++ DPNI_CNT_EGR_BYTE = 0x9, ++ DPNI_CNT_EGR_FRAME_DISCARD = 0xa ++}; ++ ++/** ++ * dpni_get_counter() - Read a specific DPNI counter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @counter: The requested counter ++ * @value: Returned counter's current value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t *value); ++ ++/** ++ * dpni_set_counter() - Set (or clear) a specific DPNI counter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @counter: The requested counter ++ * @value: New counter value; typically pass '0' for resetting ++ * the counter. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpni_counter counter, ++ uint64_t value); ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct - Structure representing DPNI link configuration ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ */ ++struct dpni_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpni_set_link_cfg() - set the link configuration. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_link_cfg *cfg); ++ ++/** ++ * struct dpni_link_state - Structure representing DPNI link state ++ * @rate: Rate ++ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values ++ * @up: Link state; '0' for down, '1' for up ++ */ ++struct dpni_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpni_get_link_state() - Return the link state (either up or down) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @state: Returned link state; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_link_state *state); ++ ++/** ++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration ++ * @rate_limit: rate in Mbps ++ * @max_burst_size: burst size in bytes (up to 64KB) ++ */ ++struct dpni_tx_shaping_cfg { ++ uint32_t rate_limit; ++ uint16_t max_burst_size; ++}; ++ ++/** ++ * dpni_set_tx_shaping() - Set the transmit shaping ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tx_shaper: tx shaping configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_shaping_cfg *tx_shaper); ++ ++/** ++ * dpni_set_max_frame_length() - Set the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t max_frame_length); ++ ++/** ++ * dpni_get_max_frame_length() - Get the maximum received frame length. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @max_frame_length: Maximum received frame length (in ++ * bytes); frame is discarded if its ++ * length exceeds this value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *max_frame_length); ++ ++/** ++ * dpni_set_mtu() - Set the MTU for the interface. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mtu: MTU length (in bytes) ++ * ++ * MTU determines the maximum fragment size for performing IP ++ * fragmentation on egress packets. ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t mtu); ++ ++/** ++ * dpni_get_mtu() - Get the MTU. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mtu: Returned MTU length (in bytes) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_mtu(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *mtu); ++ ++/** ++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Returns '1' if enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpni_set_primary_mac_addr() - Set the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to set as primary address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_get_primary_mac_addr() - Get the primary MAC address ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: Returned MAC address ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t mac_addr[6]); ++ ++/** ++ * dpni_add_mac_addr() - Add MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to add ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_remove_mac_addr() - Remove MAC address filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @mac_addr: MAC address to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const uint8_t mac_addr[6]); ++ ++/** ++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @unicast: Set to '1' to clear unicast addresses ++ * @multicast: Set to '1' to clear multicast addresses ++ * ++ * The primary MAC address is not cleared by this operation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int unicast, ++ int multicast); ++ ++/** ++ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_add_vlan_id() - Add VLAN ID filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @vlan_id: VLAN ID to add ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id); ++ ++/** ++ * dpni_remove_vlan_id() - Remove VLAN ID filter ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @vlan_id: VLAN ID to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t vlan_id); ++ ++/** ++ * dpni_clear_vlan_filters() - Clear all VLAN filters ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode ++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority ++ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling ++ */ ++enum dpni_tx_schedule_mode { ++ DPNI_TX_SCHED_STRICT_PRIORITY, ++ DPNI_TX_SCHED_WEIGHTED, ++}; ++ ++/** ++ * struct dpni_tx_schedule_cfg - Structure representing Tx ++ * scheduling configuration ++ * @mode: scheduling mode ++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; ++ * not applicable for 'strict-priority' mode; ++ */ ++struct dpni_tx_schedule_cfg { ++ enum dpni_tx_schedule_mode mode; ++ uint16_t delta_bandwidth; ++}; ++ ++/** ++ * struct dpni_tx_selection_cfg - Structure representing transmission ++ * selection configuration ++ * @tc_sched: an array of traffic-classes ++ */ ++struct dpni_tx_selection_cfg { ++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; ++}; ++ ++/** ++ * dpni_set_tx_selection() - Set transmission selection configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: transmission selection configuration ++ * ++ * warning: Allowed only when DPNI is disabled ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_selection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_tx_selection_cfg *cfg); ++ ++/** ++ * enum dpni_dist_mode - DPNI distribution mode ++ * @DPNI_DIST_MODE_NONE: No distribution ++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if ++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation ++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if ++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation ++ */ ++enum dpni_dist_mode { ++ DPNI_DIST_MODE_NONE = 0, ++ DPNI_DIST_MODE_HASH = 1, ++ DPNI_DIST_MODE_FS = 2 ++}; ++ ++/** ++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action ++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame ++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id ++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash ++ */ ++enum dpni_fs_miss_action { ++ DPNI_FS_MISS_DROP = 0, ++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, ++ DPNI_FS_MISS_HASH = 2 ++}; ++ ++/** ++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration ++ * @miss_action: Miss action selection ++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' ++ */ ++struct dpni_fs_tbl_cfg { ++ enum dpni_fs_miss_action miss_action; ++ uint16_t default_flow_id; ++}; ++ ++/** ++ * dpni_prepare_key_cfg() - function prepare extract parameters ++ * @cfg: defining a full Key Generation profile (rule) ++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before the following functions: ++ * - dpni_set_rx_tc_dist() ++ * - dpni_set_qos_table() ++ */ ++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, ++ uint8_t *key_cfg_buf); ++ ++/** ++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration ++ * @dist_size: Set the distribution size; ++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, ++ * 112,128,192,224,256,384,448,512,768,896,1024 ++ * @dist_mode: Distribution mode ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * the extractions to be used for the distribution key by calling ++ * dpni_prepare_key_cfg() relevant only when ++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' ++ * @fs_cfg: Flow Steering table configuration; only relevant if ++ * 'dist_mode = DPNI_DIST_MODE_FS' ++ */ ++struct dpni_rx_tc_dist_cfg { ++ uint16_t dist_size; ++ enum dpni_dist_mode dist_mode; ++ uint64_t key_cfg_iova; ++ struct dpni_fs_tbl_cfg fs_cfg; ++}; ++ ++/** ++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class distribution configuration ++ * ++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() ++ * first to prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_dist_cfg *cfg); ++ ++/** ++ * Set to select color aware mode (otherwise - color blind) ++ */ ++#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 ++/** ++ * Set to discard frame with RED color ++ */ ++#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 ++ ++/** ++ * enum dpni_policer_mode - selecting the policer mode ++ * @DPNI_POLICER_MODE_NONE: Policer is disabled ++ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through ++ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 ++ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 ++ */ ++enum dpni_policer_mode { ++ DPNI_POLICER_MODE_NONE = 0, ++ DPNI_POLICER_MODE_PASS_THROUGH, ++ DPNI_POLICER_MODE_RFC_2698, ++ DPNI_POLICER_MODE_RFC_4115 ++}; ++ ++/** ++ * enum dpni_policer_unit - DPNI policer units ++ * @DPNI_POLICER_UNIT_BYTES: bytes units ++ * @DPNI_POLICER_UNIT_FRAMES: frames units ++ */ ++enum dpni_policer_unit { ++ DPNI_POLICER_UNIT_BYTES = 0, ++ DPNI_POLICER_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpni_policer_color - selecting the policer color ++ * @DPNI_POLICER_COLOR_GREEN: Green color ++ * @DPNI_POLICER_COLOR_YELLOW: Yellow color ++ * @DPNI_POLICER_COLOR_RED: Red color ++ */ ++enum dpni_policer_color { ++ DPNI_POLICER_COLOR_GREEN = 0, ++ DPNI_POLICER_COLOR_YELLOW, ++ DPNI_POLICER_COLOR_RED ++}; ++ ++/** ++ * struct dpni_rx_tc_policing_cfg - Policer configuration ++ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values ++ * @mode: policer mode ++ * @default_color: For pass-through mode the policer re-colors with this ++ * color any incoming packets. For Color aware non-pass-through mode: ++ * policer re-colors with this color all packets with FD[DROPP]>2. ++ * @units: Bytes or Packets ++ * @cir: Committed information rate (CIR) in Kbps or packets/second ++ * @cbs: Committed burst size (CBS) in bytes or packets ++ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second ++ * Excess information rate (EIR, rfc4115) in Kbps or packets/second ++ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets ++ * Excess burst size (EBS, rfc4115) in bytes or packets ++ */ ++struct dpni_rx_tc_policing_cfg { ++ uint32_t options; ++ enum dpni_policer_mode mode; ++ enum dpni_policer_unit units; ++ enum dpni_policer_color default_color; ++ uint32_t cir; ++ uint32_t cbs; ++ uint32_t eir; ++ uint32_t ebs; ++}; ++ ++/** ++ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class policing configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rx_tc_policing_cfg *cfg); ++ ++/** ++ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Traffic class policing configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_rx_tc_policing_cfg *cfg); ++ ++/** ++ * enum dpni_congestion_unit - DPNI congestion units ++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units ++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units ++ */ ++enum dpni_congestion_unit { ++ DPNI_CONGESTION_UNIT_BYTES = 0, ++ DPNI_CONGESTION_UNIT_FRAMES ++}; ++ ++/** ++ * enum dpni_early_drop_mode - DPNI early drop mode ++ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled ++ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode ++ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode ++ */ ++enum dpni_early_drop_mode { ++ DPNI_EARLY_DROP_MODE_NONE = 0, ++ DPNI_EARLY_DROP_MODE_TAIL, ++ DPNI_EARLY_DROP_MODE_WRED ++}; ++ ++/** ++ * struct dpni_wred_cfg - WRED configuration ++ * @max_threshold: maximum threshold that packets may be discarded. Above this ++ * threshold all packets are discarded; must be less than 2^39; ++ * approximated to be expressed as (x+256)*2^(y-1) due to HW ++ * implementation. ++ * @min_threshold: minimum threshold that packets may be discarded at ++ * @drop_probability: probability that a packet will be discarded (1-100, ++ * associated with the max_threshold). ++ */ ++struct dpni_wred_cfg { ++ uint64_t max_threshold; ++ uint64_t min_threshold; ++ uint8_t drop_probability; ++}; ++ ++/** ++ * struct dpni_early_drop_cfg - early-drop configuration ++ * @mode: drop mode ++ * @units: units type ++ * @green: WRED - 'green' configuration ++ * @yellow: WRED - 'yellow' configuration ++ * @red: WRED - 'red' configuration ++ * @tail_drop_threshold: tail drop threshold ++ */ ++struct dpni_early_drop_cfg { ++ enum dpni_early_drop_mode mode; ++ enum dpni_congestion_unit units; ++ ++ struct dpni_wred_cfg green; ++ struct dpni_wred_cfg yellow; ++ struct dpni_wred_cfg red; ++ ++ uint32_t tail_drop_threshold; ++}; ++ ++/** ++ * dpni_prepare_early_drop() - prepare an early drop. ++ * @cfg: Early-drop configuration ++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called before dpni_set_rx_tc_early_drop or ++ * dpni_set_tx_tc_early_drop ++ * ++ */ ++void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, ++ uint8_t *early_drop_buf); ++ ++/** ++ * dpni_extract_early_drop() - extract the early drop configuration. ++ * @cfg: Early-drop configuration ++ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA ++ * ++ * This function has to be called after dpni_get_rx_tc_early_drop or ++ * dpni_get_tx_tc_early_drop ++ * ++ */ ++void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, ++ const uint8_t *early_drop_buf); ++ ++/** ++ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled ++ * with the early-drop configuration by calling dpni_prepare_early_drop() ++ * ++ * warning: Before calling this function, call dpni_prepare_early_drop() to ++ * prepare the early_drop_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory ++ * ++ * warning: After calling this function, call dpni_extract_early_drop() to ++ * get the early drop configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled ++ * with the early-drop configuration by calling dpni_prepare_early_drop() ++ * ++ * warning: Before calling this function, call dpni_prepare_early_drop() to ++ * prepare the early_drop_iova parameter ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory ++ * ++ * warning: After calling this function, call dpni_extract_early_drop() to ++ * get the early drop configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint64_t early_drop_iova); ++ ++/** ++ * enum dpni_dest - DPNI destination types ++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and ++ * does not generate FQDAN notifications; user is expected to ++ * dequeue from the queue based on polling or other user-defined ++ * method ++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN ++ * notifications to the specified DPIO; user is expected to dequeue ++ * from the queue only after notification is received ++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate ++ * FQDAN notifications, but is connected to the specified DPCON ++ * object; user is expected to dequeue from the DPCON channel ++ */ ++enum dpni_dest { ++ DPNI_DEST_NONE = 0, ++ DPNI_DEST_DPIO = 1, ++ DPNI_DEST_DPCON = 2 ++}; ++ ++/** ++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters ++ * @dest_type: Destination type ++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type ++ * @priority: Priority selection within the DPIO or DPCON channel; valid values ++ * are 0-1 or 0-7, depending on the number of priorities in that ++ * channel; not relevant for 'DPNI_DEST_NONE' option ++ */ ++struct dpni_dest_cfg { ++ enum dpni_dest dest_type; ++ int dest_id; ++ uint8_t priority; ++}; ++ ++/* DPNI congestion options */ ++ ++/** ++ * CSCN message is written to message_iova once entering a ++ * congestion state (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 ++/** ++ * CSCN message is written to message_iova once exiting a ++ * congestion state (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 ++/** ++ * CSCN write will attempt to allocate into a cache (coherent write); ++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected ++ */ ++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once entering a congestion state ++ * (see 'threshold_entry') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to ++ * DPIO/DPCON's WQ channel once exiting a congestion state ++ * (see 'threshold_exit') ++ */ ++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 ++/** ++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the ++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) ++ */ ++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 ++ ++/** ++ * struct dpni_congestion_notification_cfg - congestion notification ++ * configuration ++ * @units: units type ++ * @threshold_entry: above this threshold we enter a congestion state. ++ * set it to '0' to disable it ++ * @threshold_exit: below this threshold we exit the congestion state. ++ * @message_ctx: The context that will be part of the CSCN message ++ * @message_iova: I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is ++ * contained in 'options' ++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel ++ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values ++ */ ++ ++struct dpni_congestion_notification_cfg { ++ enum dpni_congestion_unit units; ++ uint32_t threshold_entry; ++ uint32_t threshold_exit; ++ uint64_t message_ctx; ++ uint64_t message_iova; ++ struct dpni_dest_cfg dest_cfg; ++ uint16_t options; ++}; ++ ++/** ++ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * enum dpni_flc_type - DPNI FLC types ++ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value ++ * @DPNI_FLC_STASH: select the FLC to be used for stash control ++ */ ++enum dpni_flc_type { ++ DPNI_FLC_USER_DEFINED = 0, ++ DPNI_FLC_STASH = 1, ++}; ++ ++/** ++ * enum dpni_stash_size - DPNI FLC stashing size ++ * @DPNI_STASH_SIZE_0B: no stash ++ * @DPNI_STASH_SIZE_64B: stashes 64 bytes ++ * @DPNI_STASH_SIZE_128B: stashes 128 bytes ++ * @DPNI_STASH_SIZE_192B: stashes 192 bytes ++ */ ++enum dpni_stash_size { ++ DPNI_STASH_SIZE_0B = 0, ++ DPNI_STASH_SIZE_64B = 1, ++ DPNI_STASH_SIZE_128B = 2, ++ DPNI_STASH_SIZE_192B = 3, ++}; ++ ++/* DPNI FLC stash options */ ++ ++/** ++ * stashes the whole annotation area (up to 192 bytes) ++ */ ++#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 ++ ++/** ++ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration ++ * @flc_type: FLC type ++ * @options: Mask of available options; ++ * use 'DPNI_FLC_STASH_' values ++ * @frame_data_size: Size of frame data to be stashed ++ * @flow_context_size: Size of flow context to be stashed ++ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': ++ * this value will be provided in the frame descriptor ++ * (FD[FLC]) ++ * 2. In case flc_type is 'DPNI_FLC_STASH': ++ * this value will be I/O virtual address of the ++ * flow-context; ++ * Must be cacheline-aligned and DMA-able memory ++ */ ++struct dpni_flc_cfg { ++ enum dpni_flc_type flc_type; ++ uint32_t options; ++ enum dpni_stash_size frame_data_size; ++ enum dpni_stash_size flow_context_size; ++ uint64_t flow_context; ++}; ++ ++/** ++ * DPNI queue modification options ++ */ ++ ++/** ++ * Select to modify the user's context associated with the queue ++ */ ++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 ++/** ++ * Select to modify the queue's destination ++ */ ++#define DPNI_QUEUE_OPT_DEST 0x00000002 ++/** Select to modify the flow-context parameters; ++ * not applicable for Tx-conf/Err queues as the FD comes from the user ++ */ ++#define DPNI_QUEUE_OPT_FLC 0x00000004 ++/** ++ * Select to modify the queue's order preservation ++ */ ++#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 ++/* Select to modify the queue's tail-drop threshold */ ++#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 ++ ++/** ++ * struct dpni_queue_cfg - Structure representing queue configuration ++ * @options: Flags representing the suggested modifications to the queue; ++ * Use any combination of 'DPNI_QUEUE_OPT_' flags ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' ++ * is contained in 'options' ++ * @dest_cfg: Queue destination parameters; ++ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' ++ * @flc_cfg: Flow context configuration; in case the TC's distribution ++ * is either NONE or HASH the FLC's settings of flow#0 are used. ++ * in the case of FS (flow-steering) the flow's FLC settings ++ * are used. ++ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' ++ * @order_preservation_en: enable/disable order preservation; ++ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained ++ * in 'options' ++ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; ++ * '0' value disable the threshold; maximum value is 0xE000000; ++ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained ++ * in 'options' ++ */ ++struct dpni_queue_cfg { ++ uint32_t options; ++ uint64_t user_ctx; ++ struct dpni_dest_cfg dest_cfg; ++ struct dpni_flc_cfg flc_cfg; ++ int order_preservation_en; ++ uint32_t tail_drop_threshold; ++}; ++ ++/** ++ * struct dpni_queue_attr - Structure representing queue attributes ++ * @user_ctx: User context value provided in the frame descriptor of each ++ * dequeued frame ++ * @dest_cfg: Queue destination configuration ++ * @flc_cfg: Flow context configuration ++ * @order_preservation_en: enable/disable order preservation ++ * @tail_drop_threshold: queue's tail drop threshold in bytes; ++ * @fqid: Virtual fqid value to be used for dequeue operations ++ */ ++struct dpni_queue_attr { ++ uint64_t user_ctx; ++ struct dpni_dest_cfg dest_cfg; ++ struct dpni_flc_cfg flc_cfg; ++ int order_preservation_en; ++ uint32_t tail_drop_threshold; ++ ++ uint32_t fqid; ++}; ++ ++/** ++ * DPNI Tx flow modification options ++ */ ++ ++/** ++ * Select to modify the settings for dedicate Tx confirmation/error ++ */ ++#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 ++/** ++ * Select to modify the L3 checksum generation setting ++ */ ++#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 ++/** ++ * Select to modify the L4 checksum generation setting ++ */ ++#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 ++ ++/** ++ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration ++ * @options: Flags representing the suggested modifications to the Tx flow; ++ * Use any combination 'DPNI_TX_FLOW_OPT_' flags ++ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx ++ * confirmation and error queue; Set to '0' to use the private ++ * Tx confirmation and error queue; valid only if ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation ++ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' ++ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; ++ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' ++ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; ++ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' ++ */ ++struct dpni_tx_flow_cfg { ++ uint32_t options; ++ int use_common_tx_conf_queue; ++ int l3_chksum_gen; ++ int l4_chksum_gen; ++}; ++ ++/** ++ * dpni_set_tx_flow() - Set Tx flow configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: Provides (or returns) the sender's flow ID; ++ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate ++ * a new flow_id; this ID should be used as the QDBIN argument ++ * in enqueue operations ++ * @cfg: Tx flow configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t *flow_id, ++ const struct dpni_tx_flow_cfg *cfg); ++ ++/** ++ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes ++ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and ++ * error queue; '0' if using private Tx confirmation and error queue ++ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled ++ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled ++ */ ++struct dpni_tx_flow_attr { ++ int use_common_tx_conf_queue; ++ int l3_chksum_gen; ++ int l4_chksum_gen; ++}; ++ ++/** ++ * dpni_get_tx_flow() - Get Tx flow attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function ++ * @attr: Returned Tx flow attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_flow_attr *attr); ++ ++/** ++ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration ++ * @errors_only: Set to '1' to report back only error frames; ++ * Set to '0' to confirm transmission/error for all transmitted frames; ++ * @queue_cfg: Queue configuration ++ */ ++struct dpni_tx_conf_cfg { ++ int errors_only; ++ struct dpni_queue_cfg queue_cfg; ++}; ++ ++/** ++ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: Queue configuration ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported ++ * back - successfully transmitted frames are not confirmed. Otherwise, all ++ * transmitted frames are sent for confirmation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_tx_conf_cfg *cfg); ++ ++/** ++ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes ++ * @errors_only: '1' if only error frames are reported back; '0' if all ++ * transmitted frames are confirmed ++ * @queue_attr: Queue attributes ++ */ ++struct dpni_tx_conf_attr { ++ int errors_only; ++ struct dpni_queue_attr queue_attr; ++}; ++ ++/** ++ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @attr: Returned tx-conf attributes ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_tx_conf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_tx_conf_attr *attr); ++ ++/** ++ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: congestion notification configuration ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ const struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @flow_id: The sender's flow ID, as returned by the ++ * dpni_set_tx_flow() function; ++ * use 'DPNI_COMMON_TX_CONF' for common tx-conf ++ * @cfg: congestion notification ++ * ++ * If either 'DPNI_OPT_TX_CONF_DISABLED' or ++ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, ++ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; ++ * i.e. only serve the common tx-conf-err queue; ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint16_t flow_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ ++/** ++ * dpni_set_tx_conf_revoke() - Tx confirmation revocation ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @revoke: revoke or not ++ * ++ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not ++ * selected at DPNI creation. ++ * Calling this function with 'revoke' set to '1' disables all transmit ++ * confirmation (including the private confirmation queues), regardless of ++ * previous settings; Note that in this case, Tx error frames are still ++ * enqueued to the general transmit errors queue. ++ * Calling this function with 'revoke' set to '0' restores the previous ++ * settings for both general and private transmit confirmation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int revoke); ++ ++/** ++ * dpni_set_rx_flow() - Set Rx flow configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7); ++ * use 'DPNI_ALL_TCS' to set all TCs and all flows ++ * @flow_id: Rx flow id within the traffic class; use ++ * 'DPNI_ALL_TC_FLOWS' to set all flows within ++ * this tc_id; ignored if tc_id is set to ++ * 'DPNI_ALL_TCS'; ++ * @cfg: Rx flow configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ const struct dpni_queue_cfg *cfg); ++ ++/** ++ * dpni_get_rx_flow() - Get Rx flow attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @flow_id: Rx flow id within the traffic class ++ * @attr: Returned Rx flow attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_flow(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ uint16_t flow_id, ++ struct dpni_queue_attr *attr); ++ ++/** ++ * dpni_set_rx_err_queue() - Set Rx error queue configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: Queue configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_queue_cfg *cfg); ++ ++/** ++ * dpni_get_rx_err_queue() - Get Rx error queue attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @attr: Returned Queue attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpni_queue_attr *attr); ++ ++/** ++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * key extractions to be used as the QoS criteria by calling ++ * dpni_prepare_key_cfg() ++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); ++ * '0' to use the 'default_tc' in such cases ++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 ++ */ ++struct dpni_qos_tbl_cfg { ++ uint64_t key_cfg_iova; ++ int discard_on_miss; ++ uint8_t default_tc; ++}; ++ ++/** ++ * dpni_set_qos_table() - Set QoS mapping table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS table configuration ++ * ++ * This function and all QoS-related functions require that ++ *'max_tcs > 1' was set at DPNI creation. ++ * ++ * warning: Before calling this function, call dpni_prepare_key_cfg() to ++ * prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_qos_tbl_cfg *cfg); ++ ++/** ++ * struct dpni_rule_cfg - Rule configuration for table lookup ++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) ++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) ++ * @key_size: key and mask size (in bytes) ++ */ ++struct dpni_rule_cfg { ++ uint64_t key_iova; ++ uint64_t mask_iova; ++ uint8_t key_size; ++}; ++ ++/** ++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS rule to add ++ * @tc_id: Traffic class selection (0-7) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg, ++ uint8_t tc_id); ++ ++/** ++ * dpni_remove_qos_entry() - Remove QoS mapping entry ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS rule to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dpni_rule_cfg *cfg); ++ ++/** ++ * dpni_clear_qos_table() - Clear all QoS mapping entries ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * ++ * Following this function call, all frames are directed to ++ * the default traffic class (0) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_qos_table(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class ++ * (to select a flow ID) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Flow steering rule to add ++ * @flow_id: Flow id selection (must be smaller than the ++ * distribution size of the traffic class) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg, ++ uint16_t flow_id); ++ ++/** ++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific ++ * traffic class ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: Flow steering rule to remove ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id, ++ const struct dpni_rule_cfg *cfg); ++ ++/** ++ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific ++ * traffic class ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @tc_id: Traffic class selection (0-7) ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t tc_id); ++ ++/** ++ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set ++ * at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set ++ * at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_ipr(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++/** ++ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @en: Set to '1' to enable; '0' to disable ++ * ++ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI ++ * creation. Fragmentation is performed according to MTU value ++ * set by dpni_set_mtu() function ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_ipf(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int en); ++ ++#endif /* __FSL_DPNI_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig +new file mode 100644 +index 0000000..174a9cd +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Kconfig +@@ -0,0 +1,24 @@ ++config FSL_DPAA2_MAC ++ tristate "DPAA2 MAC / PHY interface" ++ depends on FSL_MC_BUS && FSL_DPAA2 ++ select MDIO_BUS_MUX_MMIOREG ++ select FSL_XGMAC_MDIO ++ select FIXED_PHY ++ ---help--- ++ Prototype driver for DPAA2 MAC / PHY interface object. ++ This driver works as a proxy between phylib including phy drivers and ++ the MC firmware. It receives updates on link state changes from PHY ++ lib and forwards them to MC and receives interrupt from MC whenever ++ a request is made to change the link state. ++ ++ ++config FSL_DPAA2_MAC_NETDEVS ++ bool "Expose net interfaces for PHYs" ++ default n ++ depends on FSL_DPAA2_MAC ++ ---help--- ++ Exposes macX net interfaces which allow direct control over MACs and ++ PHYs. ++ . ++ Leave disabled if unsure. ++ +diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile +new file mode 100644 +index 0000000..bda9410 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/Makefile +@@ -0,0 +1,10 @@ ++ ++obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o ++ ++dpaa2-mac-objs := mac.o dpmac.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h +new file mode 100644 +index 0000000..dc00590 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h +@@ -0,0 +1,195 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMAC_CMD_H ++#define _FSL_DPMAC_CMD_H ++ ++/* DPMAC Version */ ++#define DPMAC_VER_MAJOR 3 ++#define DPMAC_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPMAC_CMDID_CLOSE 0x800 ++#define DPMAC_CMDID_OPEN 0x80c ++#define DPMAC_CMDID_CREATE 0x90c ++#define DPMAC_CMDID_DESTROY 0x900 ++ ++#define DPMAC_CMDID_GET_ATTR 0x004 ++#define DPMAC_CMDID_RESET 0x005 ++ ++#define DPMAC_CMDID_SET_IRQ 0x010 ++#define DPMAC_CMDID_GET_IRQ 0x011 ++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMAC_CMDID_SET_IRQ_MASK 0x014 ++#define DPMAC_CMDID_GET_IRQ_MASK 0x015 ++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPMAC_CMDID_MDIO_READ 0x0c0 ++#define DPMAC_CMDID_MDIO_WRITE 0x0c1 ++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 ++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 ++#define DPMAC_CMDID_GET_COUNTER 0x0c4 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ ++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ ++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_MDIO_READ(cmd, data) \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_COUNTER(cmd, type) \ ++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) ++ ++#endif /* _FSL_DPMAC_CMD_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c +new file mode 100644 +index 0000000..fc23b40 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c +@@ -0,0 +1,422 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../fsl-mc/include/mc-sys.h" ++#include "../../fsl-mc/include/mc-cmd.h" ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmac_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPMAC_CMD_OPEN(cmd, dpmac_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPMAC_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmac_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmac_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); ++ ++ return 0; ++} ++ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, ++ cmd_flags, ++ token); ++ DPMAC_CMD_MDIO_READ(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPMAC_RSP_MDIO_READ(cmd, cfg->data); ++ ++ return 0; ++} ++ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_MDIO_WRITE(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); ++ ++ return 0; ++} ++ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_state *link_state) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, ++ cmd_flags, ++ token); ++ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter) ++{ ++ struct mc_command cmd = { 0 }; ++ int err = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, ++ cmd_flags, ++ token); ++ DPMAC_CMD_GET_COUNTER(cmd, type); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ DPMAC_RSP_GET_COUNTER(cmd, *counter); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h +new file mode 100644 +index 0000000..ad27772 +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h +@@ -0,0 +1,593 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMAC_H ++#define __FSL_DPMAC_H ++ ++/* Data Path MAC API ++ * Contains initialization APIs and runtime control APIs for DPMAC ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmac_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpmac_id: DPMAC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmac_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmac_id, ++ uint16_t *token); ++ ++/** ++ * dpmac_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpmac_link_type - DPMAC link type ++ * @DPMAC_LINK_TYPE_NONE: No link ++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type ++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID ++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type ++ */ ++enum dpmac_link_type { ++ DPMAC_LINK_TYPE_NONE, ++ DPMAC_LINK_TYPE_FIXED, ++ DPMAC_LINK_TYPE_PHY, ++ DPMAC_LINK_TYPE_BACKPLANE ++}; ++ ++/** ++ * enum dpmac_eth_if - DPMAC Ethrnet interface ++ * @DPMAC_ETH_IF_MII: MII interface ++ * @DPMAC_ETH_IF_RMII: RMII interface ++ * @DPMAC_ETH_IF_SMII: SMII interface ++ * @DPMAC_ETH_IF_GMII: GMII interface ++ * @DPMAC_ETH_IF_RGMII: RGMII interface ++ * @DPMAC_ETH_IF_SGMII: SGMII interface ++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface ++ * @DPMAC_ETH_IF_XAUI: XAUI interface ++ * @DPMAC_ETH_IF_XFI: XFI interface ++ */ ++enum dpmac_eth_if { ++ DPMAC_ETH_IF_MII, ++ DPMAC_ETH_IF_RMII, ++ DPMAC_ETH_IF_SMII, ++ DPMAC_ETH_IF_GMII, ++ DPMAC_ETH_IF_RGMII, ++ DPMAC_ETH_IF_SGMII, ++ DPMAC_ETH_IF_QSGMII, ++ DPMAC_ETH_IF_XAUI, ++ DPMAC_ETH_IF_XFI ++}; ++ ++/** ++ * struct dpmac_cfg - Structure representing DPMAC configuration ++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, ++ * the MAC IDs are continuous. ++ * For example: 2 WRIOPs, 16 MACs in each: ++ * MAC IDs for the 1st WRIOP: 1-16, ++ * MAC IDs for the 2nd WRIOP: 17-32. ++ */ ++struct dpmac_cfg { ++ int mac_id; ++}; ++ ++/** ++ * dpmac_create() - Create the DPMAC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMAC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmac_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmac_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * DPMAC IRQ Index and Events ++ */ ++ ++/** ++ * IRQ index ++ */ ++#define DPMAC_IRQ_INDEX 0 ++/** ++ * IRQ event - indicates a change in link state ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 ++/** ++ * IRQ event - Indicates that the link state changed ++ */ ++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 ++ ++/** ++ * struct dpmac_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpmac_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmac_irq_cfg *irq_cfg); ++ ++/** ++ * dpmac_get_irq() - Get IRQ information from the DPMAC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmac_irq_cfg *irq_cfg); ++ ++/** ++ * dpmac_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmac_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmac_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmac_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmac_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpmac_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpmac_attr - Structure representing DPMAC attributes ++ * @id: DPMAC object ID ++ * @phy_id: PHY ID ++ * @link_type: link type ++ * @eth_if: Ethernet interface ++ * @max_rate: Maximum supported rate - in Mbps ++ * @version: DPMAC version ++ */ ++struct dpmac_attr { ++ int id; ++ int phy_id; ++ enum dpmac_link_type link_type; ++ enum dpmac_eth_if eth_if; ++ uint32_t max_rate; ++ /** ++ * struct version - Structure representing DPMAC version ++ * @major: DPMAC major version ++ * @minor: DPMAC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmac_get_attributes - Retrieve DPMAC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_attr *attr); ++ ++/** ++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters ++ * @phy_addr: MDIO device address ++ * @reg: Address of the register within the Clause 45 PHY device from which data ++ * is to be read ++ * @data: Data read/write from/to MDIO ++ */ ++struct dpmac_mdio_cfg { ++ uint8_t phy_addr; ++ uint8_t reg; ++ uint16_t data; ++}; ++ ++/** ++ * dpmac_mdio_read() - Perform MDIO read transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/** ++ * dpmac_mdio_write() - Perform MDIO write transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/** ++ * DPMAC link configuration/state options ++ */ ++ ++/** ++ * Enable auto-negotiation ++ */ ++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/** ++ * Enable half-duplex mode ++ */ ++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/** ++ * Enable pause frames ++ */ ++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL ++/** ++ * Enable a-symmetric pause frames ++ */ ++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration ++ * @rate: Link's rate - in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ */ ++struct dpmac_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpmac_get_link_cfg() - Get Ethernet link configuration ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @cfg: Returned structure with the link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_cfg *cfg); ++ ++/** ++ * struct dpmac_link_state - DPMAC link configuration request ++ * @rate: Rate in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ * @up: Link state ++ */ ++struct dpmac_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpmac_set_link_state() - Set the Ethernet link status ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @link_state: Link state configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmac_link_state *link_state); ++ ++/** ++ * enum dpmac_counter - DPMAC counter types ++ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger ++ * (up to max frame length specified), ++ * good or bad. ++ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received ++ * with a wrong CRC ++ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length ++ * specified, with a bad frame check sequence. ++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. ++ * Occurs when a receive FIFO overflows. ++ * Includes also frames truncated as a result of ++ * the receive FIFO overflow. ++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error ++ * (optional used for wrong SFD). ++ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 ++ * bytes long with a good CRC. ++ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length ++ * specified, with a good frame check sequence. ++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) ++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted ++ * (regular and PFC). ++ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid ++ * frames and valid pause frames. ++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. ++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. ++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. ++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. ++ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error ++ * (except for undersized/fragment frame). ++ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid ++ * frames and valid pause frames transmitted. ++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. ++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. ++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. ++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. ++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including ++ * pause frames. ++ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including ++ * pause frames. ++ */ ++enum dpmac_counter { ++ DPMAC_CNT_ING_FRAME_64, ++ DPMAC_CNT_ING_FRAME_127, ++ DPMAC_CNT_ING_FRAME_255, ++ DPMAC_CNT_ING_FRAME_511, ++ DPMAC_CNT_ING_FRAME_1023, ++ DPMAC_CNT_ING_FRAME_1518, ++ DPMAC_CNT_ING_FRAME_1519_MAX, ++ DPMAC_CNT_ING_FRAG, ++ DPMAC_CNT_ING_JABBER, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ DPMAC_CNT_ING_ALIGN_ERR, ++ DPMAC_CNT_EGR_UNDERSIZED, ++ DPMAC_CNT_ING_OVERSIZED, ++ DPMAC_CNT_ING_VALID_PAUSE_FRAME, ++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, ++ DPMAC_CNT_ING_BYTE, ++ DPMAC_CNT_ING_MCAST_FRAME, ++ DPMAC_CNT_ING_BCAST_FRAME, ++ DPMAC_CNT_ING_ALL_FRAME, ++ DPMAC_CNT_ING_UCAST_FRAME, ++ DPMAC_CNT_ING_ERR_FRAME, ++ DPMAC_CNT_EGR_BYTE, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ DPMAC_CNT_EGR_BCAST_FRAME, ++ DPMAC_CNT_EGR_UCAST_FRAME, ++ DPMAC_CNT_EGR_ERR_FRAME, ++ DPMAC_CNT_ING_GOOD_FRAME, ++ DPMAC_CNT_ENG_GOOD_FRAME ++}; ++ ++/** ++ * dpmac_get_counter() - Read a specific DPMAC counter ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMAC object ++ * @type: The requested counter ++ * @counter: Returned counter value ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter); ++ ++#endif /* __FSL_DPMAC_H */ +diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c +new file mode 100644 +index 0000000..366ad4c +--- /dev/null ++++ b/drivers/staging/fsl-dpaa2/mac/mac.c +@@ -0,0 +1,694 @@ ++/* Copyright 2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../fsl-mc/include/mc.h" ++#include "../../fsl-mc/include/mc-sys.h" ++ ++#include "dpmac.h" ++#include "dpmac-cmd.h" ++ ++#define DPAA2_SUPPORTED_DPMAC_VERSION 3 ++ ++struct dpaa2_mac_priv { ++ struct net_device *netdev; ++ struct fsl_mc_device *mc_dev; ++ struct dpmac_attr attr; ++ struct dpmac_link_state old_state; ++}; ++ ++/* TODO: fix the 10G modes, mapping can't be right: ++ * XGMII is paralel ++ * XAUI is serial, using 8b/10b encoding ++ * XFI is also serial but using 64b/66b encoding ++ * they can't all map to XGMII... ++ * ++ * This must be kept in sync with enum dpmac_eth_if. ++ */ ++static phy_interface_t dpaa2_mac_iface_mode[] = { ++ /* DPMAC_ETH_IF_MII */ ++ PHY_INTERFACE_MODE_MII, ++ /* DPMAC_ETH_IF_RMII */ ++ PHY_INTERFACE_MODE_RMII, ++ /* DPMAC_ETH_IF_SMII */ ++ PHY_INTERFACE_MODE_SMII, ++ /* DPMAC_ETH_IF_GMII */ ++ PHY_INTERFACE_MODE_GMII, ++ /* DPMAC_ETH_IF_RGMII */ ++ PHY_INTERFACE_MODE_RGMII, ++ /* DPMAC_ETH_IF_SGMII */ ++ PHY_INTERFACE_MODE_SGMII, ++ /* DPMAC_ETH_IF_QSGMII */ ++ PHY_INTERFACE_MODE_QSGMII, ++ /* DPMAC_ETH_IF_XAUI */ ++ PHY_INTERFACE_MODE_XGMII, ++ /* DPMAC_ETH_IF_XFI */ ++ PHY_INTERFACE_MODE_XGMII, ++}; ++ ++static void dpaa2_mac_link_changed(struct net_device *netdev) ++{ ++ struct phy_device *phydev; ++ struct dpmac_link_state state = { 0 }; ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int err; ++ ++ /* the PHY just notified us of link state change */ ++ phydev = netdev->phydev; ++ ++ state.up = !!phydev->link; ++ if (phydev->link) { ++ state.rate = phydev->speed; ++ ++ if (!phydev->duplex) ++ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; ++ if (phydev->autoneg) ++ state.options |= DPMAC_LINK_OPT_AUTONEG; ++ ++ netif_carrier_on(netdev); ++ } else { ++ netif_carrier_off(netdev); ++ } ++ ++ if (priv->old_state.up != state.up || ++ priv->old_state.rate != state.rate || ++ priv->old_state.options != state.options) { ++ priv->old_state = state; ++ phy_print_status(phydev); ++ } ++ ++ /* We must call into the MC firmware at all times, because we don't know ++ * when and whether a potential DPNI may have read the link state. ++ */ ++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, ++ priv->mc_dev->mc_handle, &state); ++ if (unlikely(err)) ++ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); ++} ++ ++/* IRQ bits that we handle */ ++static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, ++ struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ ++static int dpaa2_mac_open(struct net_device *netdev) ++{ ++ /* start PHY state machine */ ++ phy_start(netdev->phydev); ++ ++ return 0; ++} ++ ++static int dpaa2_mac_stop(struct net_device *netdev) ++{ ++ if (!netdev->phydev) ++ goto done; ++ ++ /* stop PHY state machine */ ++ phy_stop(netdev->phydev); ++ ++ /* signal link down to firmware */ ++ netdev->phydev->link = 0; ++ dpaa2_mac_link_changed(netdev); ++ ++done: ++ return 0; ++} ++ ++static int dpaa2_mac_get_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_gset(netdev->phydev, cmd); ++} ++ ++static int dpaa2_mac_set_settings(struct net_device *netdev, ++ struct ethtool_cmd *cmd) ++{ ++ return phy_ethtool_sset(netdev->phydev, cmd); ++} ++ ++static struct rtnl_link_stats64 ++*dpaa2_mac_get_stats(struct net_device *netdev, ++ struct rtnl_link_stats64 *storage) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ u64 tmp; ++ int err; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ &storage->tx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); ++ if (err) ++ goto error; ++ storage->tx_packets += tmp; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); ++ if (err) ++ goto error; ++ ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ &storage->rx_dropped); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); ++ if (err) ++ goto error; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_OVERSIZED, &tmp); ++ if (err) ++ goto error; ++ storage->rx_errors += tmp; ++ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, ++ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); ++ if (err) ++ goto error; ++ ++ return storage; ++ ++error: ++ netdev_err(netdev, "dpmac_get_counter err %d\n", err); ++ return storage; ++} ++ ++static struct { ++ enum dpmac_counter id; ++ char name[ETH_GSTRING_LEN]; ++} dpaa2_mac_counters[] = { ++ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, ++ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, ++ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, ++ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, ++ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, ++ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, ++ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, ++ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, ++ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, ++ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, ++ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, ++ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, ++ {DPMAC_CNT_ING_FRAG, "rx frags"}, ++ {DPMAC_CNT_ING_JABBER, "rx jabber"}, ++ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, ++ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, ++ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, ++ {DPMAC_CNT_ING_BYTE, "rx bytes"}, ++ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, ++ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, ++ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, ++ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, ++ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, ++ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, ++ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, ++ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, ++ ++}; ++ ++static void dpaa2_mac_get_strings(struct net_device *netdev, ++ u32 stringset, u8 *data) ++{ ++ int i; ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) ++ memcpy(data + i * ETH_GSTRING_LEN, ++ dpaa2_mac_counters[i].name, ++ ETH_GSTRING_LEN); ++ break; ++ } ++} ++ ++static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, ++ struct ethtool_stats *stats, ++ u64 *data) ++{ ++ struct dpaa2_mac_priv *priv = netdev_priv(netdev); ++ int i; ++ int err; ++ ++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { ++ err = dpmac_get_counter(priv->mc_dev->mc_io, ++ 0, ++ priv->mc_dev->mc_handle, ++ dpaa2_mac_counters[i].id, &data[i]); ++ if (err) ++ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", ++ dpaa2_mac_counters[i].name, err); ++ } ++} ++ ++static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_STATS: ++ return ARRAY_SIZE(dpaa2_mac_counters); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static const struct net_device_ops dpaa2_mac_ndo_ops = { ++ .ndo_start_xmit = &dpaa2_mac_drop_frame, ++ .ndo_open = &dpaa2_mac_open, ++ .ndo_stop = &dpaa2_mac_stop, ++ .ndo_get_stats64 = &dpaa2_mac_get_stats, ++}; ++ ++static const struct ethtool_ops dpaa2_mac_ethtool_ops = { ++ .get_settings = &dpaa2_mac_get_settings, ++ .set_settings = &dpaa2_mac_set_settings, ++ .get_strings = &dpaa2_mac_get_strings, ++ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, ++ .get_sset_count = &dpaa2_mac_get_sset_count, ++}; ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++static void configure_link(struct dpaa2_mac_priv *priv, ++ struct dpmac_link_cfg *cfg) ++{ ++ struct phy_device *phydev = priv->netdev->phydev; ++ ++ if (unlikely(!phydev)) ++ return; ++ ++ phydev->speed = cfg->rate; ++ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); ++ ++ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { ++ phydev->autoneg = 1; ++ phydev->advertising |= ADVERTISED_Autoneg; ++ } else { ++ phydev->autoneg = 0; ++ phydev->advertising &= ~ADVERTISED_Autoneg; ++ } ++ ++ phy_start_aneg(phydev); ++} ++ ++static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ struct dpmac_link_cfg link_cfg; ++ u32 status; ++ int err; ++ ++ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, &status); ++ if (unlikely(err || !status)) ++ return IRQ_NONE; ++ ++ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ ++ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { ++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ &link_cfg); ++ if (unlikely(err)) ++ goto out; ++ ++ configure_link(priv, &link_cfg); ++ } ++ ++out: ++ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, status); ++ ++ return IRQ_HANDLED; ++} ++ ++static int setup_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err; ++ ++ err = fsl_mc_allocate_irqs(mc_dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); ++ return err; ++ } ++ ++ err = devm_request_threaded_irq(&mc_dev->dev, ++ mc_dev->irqs[0]->irq_number, ++ NULL, &dpaa2_mac_irq_handler, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ dev_name(&mc_dev->dev), &mc_dev->dev); ++ if (err) { ++ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", ++ err); ++ goto free_irq; ++ } ++ ++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, dpmac_irq_mask); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); ++ goto free_irq; ++ } ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 1); ++ if (err) { ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ goto free_irq; ++ } ++ ++ return 0; ++ ++free_irq: ++ fsl_mc_free_irqs(mc_dev); ++ ++ return err; ++} ++ ++static void teardown_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int err; ++ ++ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, dpmac_irq_mask); ++ if (err) ++ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); ++ ++ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ DPMAC_IRQ_INDEX, 0); ++ if (err) ++ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); ++ ++ fsl_mc_free_irqs(mc_dev); ++} ++ ++static struct device_node *lookup_node(struct device *dev, int dpmac_id) ++{ ++ struct device_node *dpmacs, *dpmac = NULL; ++ struct device_node *mc_node = dev->of_node; ++ u32 id; ++ int err; ++ ++ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); ++ if (!dpmacs) { ++ dev_err(dev, "No dpmacs subnode in device-tree\n"); ++ return NULL; ++ } ++ ++ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { ++ err = of_property_read_u32(dpmac, "reg", &id); ++ if (err) ++ continue; ++ if (id == dpmac_id) ++ return dpmac; ++ } ++ ++ return NULL; ++} ++ ++static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev; ++ struct dpaa2_mac_priv *priv = NULL; ++ struct device_node *phy_node, *dpmac_node; ++ struct net_device *netdev; ++ phy_interface_t if_mode; ++ int err = 0; ++ ++ dev = &mc_dev->dev; ++ ++ /* prepare a net_dev structure to make the phy lib API happy */ ++ netdev = alloc_etherdev(sizeof(*priv)); ++ if (!netdev) { ++ dev_err(dev, "alloc_etherdev error\n"); ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ priv = netdev_priv(netdev); ++ priv->mc_dev = mc_dev; ++ priv->netdev = netdev; ++ ++ SET_NETDEV_DEV(netdev, dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); ++#endif ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); ++ if (err || !mc_dev->mc_io) { ++ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_netdev; ++ } ++ ++ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (err || !mc_dev->mc_handle) { ++ dev_err(dev, "dpmac_open error: %d\n", err); ++ err = -ENODEV; ++ goto err_free_mcp; ++ } ++ ++ err = dpmac_get_attributes(mc_dev->mc_io, 0, ++ mc_dev->mc_handle, &priv->attr); ++ if (err) { ++ dev_err(dev, "dpmac_get_attributes err %d\n", err); ++ err = -EINVAL; ++ goto err_close; ++ } ++ ++ dev_info_once(dev, "Using DPMAC API %d.%d\n", ++ priv->attr.version.major, priv->attr.version.minor); ++ ++ /* Look up the DPMAC node in the device-tree. */ ++ dpmac_node = lookup_node(dev, priv->attr.id); ++ if (!dpmac_node) { ++ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); ++ err = -ENODEV; ++ goto err_close; ++ } ++ ++ err = setup_irqs(mc_dev); ++ if (err) { ++ err = -EFAULT; ++ goto err_close; ++ } ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ /* OPTIONAL, register netdev just to make it visible to the user */ ++ netdev->netdev_ops = &dpaa2_mac_ndo_ops; ++ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; ++ ++ /* phy starts up enabled so netdev should be up too */ ++ netdev->flags |= IFF_UP; ++ ++ err = register_netdev(priv->netdev); ++ if (err < 0) { ++ dev_err(dev, "register_netdev error %d\n", err); ++ err = -ENODEV; ++ goto err_free_irq; ++ } ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ ++ /* probe the PHY as a fixed-link if the link type declared in DPC ++ * explicitly mandates this ++ */ ++ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED) ++ goto probe_fixed_link; ++ ++ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { ++ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; ++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", ++ phy_modes(if_mode), priv->attr.eth_if); ++ } else { ++ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", ++ priv->attr.eth_if); ++ goto probe_fixed_link; ++ } ++ ++ /* try to connect to the PHY */ ++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); ++ if (!phy_node) { ++ if (!phy_node) { ++ dev_err(dev, "dpmac node has no phy-handle property\n"); ++ err = -ENODEV; ++ goto err_no_phy; ++ } ++ } ++ netdev->phydev = of_phy_connect(netdev, phy_node, ++ &dpaa2_mac_link_changed, 0, if_mode); ++ if (!netdev->phydev) { ++ /* No need for dev_err(); the kernel's loud enough as it is. */ ++ dev_dbg(dev, "Can't of_phy_connect() now.\n"); ++ /* We might be waiting for the MDIO MUX to probe, so defer ++ * our own probing. ++ */ ++ err = -EPROBE_DEFER; ++ goto err_defer; ++ } ++ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); ++ ++probe_fixed_link: ++ if (!netdev->phydev) { ++ struct fixed_phy_status status = { ++ .link = 1, ++ /* fixed-phys don't support 10Gbps speed for now */ ++ .speed = 1000, ++ .duplex = 1, ++ }; ++ ++ /* try to register a fixed link phy */ ++ netdev->phydev = fixed_phy_register(PHY_POLL, &status, NULL); ++ if (!netdev->phydev || IS_ERR(netdev->phydev)) { ++ dev_err(dev, "error trying to register fixed PHY\n"); ++ /* So we don't crash unregister_netdev() later on */ ++ netdev->phydev = NULL; ++ err = -EFAULT; ++ goto err_no_phy; ++ } ++ dev_info(dev, "Registered fixed PHY.\n"); ++ } ++ ++ /* start PHY state machine */ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ dpaa2_mac_open(netdev); ++#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ phy_start(netdev->phydev); ++#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ return 0; ++ ++err_defer: ++err_no_phy: ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(netdev); ++err_free_irq: ++#endif ++ teardown_irqs(mc_dev); ++err_close: ++ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++err_free_mcp: ++ fsl_mc_portal_free(mc_dev->mc_io); ++err_free_netdev: ++ free_netdev(netdev); ++err_exit: ++ return err; ++} ++ ++static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct device *dev = &mc_dev->dev; ++ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++ unregister_netdev(priv->netdev); ++#endif ++ teardown_irqs(priv->mc_dev); ++ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); ++ fsl_mc_portal_free(priv->mc_dev->mc_io); ++ free_netdev(priv->netdev); ++ ++ dev_set_drvdata(dev, NULL); ++ kfree(priv); ++ ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpmac", ++ .ver_major = DPMAC_VER_MAJOR, ++ .ver_minor = DPMAC_VER_MINOR, ++ }, ++ {} ++}; ++ ++static struct fsl_mc_driver dpaa2_mac_drv = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_mac_probe, ++ .remove = dpaa2_mac_remove, ++ .match_id_table = dpaa2_mac_match_id_table, ++}; ++ ++module_fsl_mc_driver(dpaa2_mac_drv); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); +diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig +new file mode 100644 +index 0000000..32df07b +--- /dev/null ++++ b/drivers/staging/fsl-mc/Kconfig +@@ -0,0 +1 @@ ++source "drivers/staging/fsl-mc/bus/Kconfig" +diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile +new file mode 100644 +index 0000000..9c6a001 +--- /dev/null ++++ b/drivers/staging/fsl-mc/Makefile +@@ -0,0 +1,2 @@ ++# Freescale Management Complex (MC) bus drivers ++obj-$(CONFIG_FSL_MC_BUS) += bus/ +diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO +new file mode 100644 +index 0000000..d78288b +--- /dev/null ++++ b/drivers/staging/fsl-mc/TODO +@@ -0,0 +1,13 @@ ++* Add README file (with ASCII art) describing relationships between ++ DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc. ++ Also, define all acronyms used. ++ ++* Decide if multiple root fsl-mc buses will be supported per Linux instance, ++ and if so add support for this. ++ ++* Add at least one device driver for a DPAA2 object (child device of the ++ fsl-mc bus). ++ ++Please send any patches to Greg Kroah-Hartman , ++german.rivera@freescale.com, devel@driverdev.osuosl.org, ++linux-kernel@vger.kernel.org +diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig +new file mode 100644 +index 0000000..8bef5b8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/Kconfig +@@ -0,0 +1,45 @@ ++# ++# Freescale Management Complex (MC) bus drivers ++# ++# Copyright (C) 2014 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++ ++config FSL_MC_BUS ++ tristate "Freescale Management Complex (MC) bus driver" ++ depends on OF && ARM64 ++ help ++ Driver to enable the bus infrastructure for the Freescale ++ QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware ++ module of the QorIQ LS2 SoCs, that does resource management ++ for hardware building-blocks in the SoC that can be used ++ to dynamically create networking hardware objects such as ++ network interfaces (NICs), crypto accelerator instances, ++ or L2 switches. ++ ++ Only enable this option when building the kernel for ++ Freescale QorQIQ LS2xxxx SoCs. ++ ++config FSL_MC_RESTOOL ++ tristate "Freescale Management Complex (MC) restool driver" ++ depends on FSL_MC_BUS ++ help ++ Driver that provides kernel support for the Freescale Management ++ Complex resource manager user-space tool. ++ ++config FSL_MC_DPIO ++ tristate "Freescale Data Path I/O (DPIO) driver" ++ depends on FSL_MC_BUS ++ help ++ Driver for Freescale Data Path I/O (DPIO) devices. ++ A DPIO device provides queue and buffer management facilities ++ for software to interact with other Data Path devices. This ++ driver does not expose the DPIO device individually, but ++ groups them under a service layer API. ++ ++config FSL_QBMAN_DEBUG ++ tristate "Freescale QBMAN Debug APIs" ++ depends on FSL_MC_DPIO ++ help ++ QBMan debug assistant APIs. +diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile +new file mode 100644 +index 0000000..f29399c +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/Makefile +@@ -0,0 +1,24 @@ ++# ++# Freescale Management Complex (MC) bus drivers ++# ++# Copyright (C) 2014 Freescale Semiconductor, Inc. ++# ++# This file is released under the GPLv2 ++# ++obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o ++ ++mc-bus-driver-objs := mc-bus.o \ ++ mc-sys.o \ ++ dprc.o \ ++ dpmng.o \ ++ dprc-driver.o \ ++ mc-allocator.o \ ++ dpmcp.o \ ++ dpbp.o \ ++ dpcon.o ++ ++# MC restool kernel support ++obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o ++ ++# MC DPIO driver ++obj-$(CONFIG_FSL_MC_DPIO) += dpio/ +diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c +new file mode 100644 +index 0000000..f183121 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpbp.c +@@ -0,0 +1,459 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpbp.h" ++#include "../include/dpbp-cmd.h" ++ ++int dpbp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpbp_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ ++ cmd.params[0] |= mc_enc(0, 32, dpbp_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++EXPORT_SYMBOL(dpbp_open); ++ ++int dpbp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_close); ++ ++int dpbp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpbp_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ (void)(cfg); /* unused */ ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpbp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_enable); ++ ++int dpbp_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpbp_disable); ++ ++int dpbp_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (int)mc_dec(cmd.params[0], 0, 1); ++ ++ return 0; ++} ++ ++int dpbp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpbp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 8, irq_index); ++ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpbp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->addr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++ ++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); ++ return 0; ++} ++ ++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpbp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, *status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->bpid = (uint16_t)mc_dec(cmd.params[0], 16, 16); ++ attr->id = (int)mc_dec(cmd.params[0], 32, 32); ++ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); ++ return 0; ++} ++EXPORT_SYMBOL(dpbp_get_attributes); ++ ++int dpbp_set_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry); ++ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit); ++ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry); ++ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit); ++ cmd.params[2] |= mc_enc(0, 16, cfg->options); ++ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx); ++ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpbp_get_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ cfg->depletion_entry = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ cfg->depletion_exit = (uint32_t)mc_dec(cmd.params[0], 32, 32); ++ cfg->surplus_entry = (uint32_t)mc_dec(cmd.params[1], 0, 32); ++ cfg->surplus_exit = (uint32_t)mc_dec(cmd.params[1], 32, 32); ++ cfg->options = (uint16_t)mc_dec(cmd.params[2], 0, 16); ++ cfg->message_ctx = (uint64_t)mc_dec(cmd.params[3], 0, 64); ++ cfg->message_iova = (uint64_t)mc_dec(cmd.params[4], 0, 64); ++ ++ return 0; ++} +diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c +new file mode 100644 +index 0000000..7965284 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpcon.c +@@ -0,0 +1,407 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpcon.h" ++#include "../include/dpcon-cmd.h" ++ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpcon_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPCON_CMD_OPEN(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_open); ++ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_close); ++ ++int dpcon_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpcon_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPCON_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpcon_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_enable); ++ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_disable); ++ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, ++ cmd_flags, token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpcon_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpcon_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpcon_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPCON_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpcon_get_attributes); ++ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_notification_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, ++ cmd_flags, ++ token); ++ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dpcon_set_notification); ++ +diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile +new file mode 100644 +index 0000000..c20356b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile +@@ -0,0 +1,9 @@ ++# ++# Freescale DPIO driver ++# ++ ++obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o ++ ++fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o ++ ++obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c +new file mode 100644 +index 0000000..80add27 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c +@@ -0,0 +1,401 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../../include/mc.h" ++#include "../../include/fsl_dpaa2_io.h" ++ ++#include "fsl_qbman_portal.h" ++#include "fsl_dpio.h" ++#include "fsl_dpio_cmd.h" ++ ++#include "dpio-drv.h" ++ ++#define DPIO_DESCRIPTION "DPIO Driver" ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_AUTHOR("Freescale Semiconductor, Inc"); ++MODULE_DESCRIPTION(DPIO_DESCRIPTION); ++ ++#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */ ++ ++struct dpio_priv { ++ struct dpaa2_io *io; ++ char irq_name[MAX_DPIO_IRQ_NAME]; ++ struct task_struct *thread; ++}; ++ ++static int dpio_thread(void *data) ++{ ++ struct dpaa2_io *io = data; ++ ++ while (!kthread_should_stop()) { ++ int err = dpaa2_io_poll(io); ++ ++ if (err) { ++ pr_err("dpaa2_io_poll() failed\n"); ++ return err; ++ } ++ msleep(50); ++ } ++ return 0; ++} ++ ++static irqreturn_t dpio_irq_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct dpio_priv *priv = dev_get_drvdata(dev); ++ ++ return dpaa2_io_irq(priv->io); ++} ++ ++static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev) ++{ ++ int i; ++ struct fsl_mc_device_irq *irq; ++ int irq_count = ls_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = ls_dev->irqs[i]; ++ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); ++ } ++} ++ ++static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu) ++{ ++ struct dpio_priv *priv; ++ unsigned int i; ++ int error; ++ struct fsl_mc_device_irq *irq; ++ unsigned int num_irq_handlers_registered = 0; ++ int irq_count = ls_dev->obj_desc.irq_count; ++ cpumask_t mask; ++ ++ priv = dev_get_drvdata(&ls_dev->dev); ++ ++ if (WARN_ON(irq_count != 1)) ++ return -EINVAL; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = ls_dev->irqs[i]; ++ error = devm_request_irq(&ls_dev->dev, ++ irq->irq_number, ++ dpio_irq_handler, ++ 0, ++ priv->irq_name, ++ &ls_dev->dev); ++ if (error < 0) { ++ dev_err(&ls_dev->dev, ++ "devm_request_irq() failed: %d\n", ++ error); ++ goto error_unregister_irq_handlers; ++ } ++ ++ /* Set the IRQ affinity */ ++ cpumask_clear(&mask); ++ cpumask_set_cpu(cpu, &mask); ++ if (irq_set_affinity(irq->irq_number, &mask)) ++ pr_err("irq_set_affinity failed irq %d cpu %d\n", ++ irq->irq_number, cpu); ++ ++ num_irq_handlers_registered++; ++ } ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ for (i = 0; i < num_irq_handlers_registered; i++) { ++ irq = ls_dev->irqs[i]; ++ devm_free_irq(&ls_dev->dev, irq->irq_number, ++ &ls_dev->dev); ++ } ++ ++ return error; ++} ++ ++static int __cold ++dpaa2_dpio_probe(struct fsl_mc_device *ls_dev) ++{ ++ struct dpio_attr dpio_attrs; ++ struct dpaa2_io_desc desc; ++ struct dpio_priv *priv; ++ int err = -ENOMEM; ++ struct device *dev = &ls_dev->dev; ++ struct dpaa2_io *defservice; ++ bool irq_allocated = false; ++ static int next_cpu; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ goto err_priv_alloc; ++ ++ dev_set_drvdata(dev, priv); ++ ++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ err = -EPROBE_DEFER; ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, ++ &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle, ++ &dpio_attrs); ++ if (err) { ++ dev_err(dev, "dpio_get_attributes() failed %d\n", err); ++ goto err_get_attr; ++ } ++ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_enable() failed %d\n", err); ++ goto err_get_attr; ++ } ++ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n", ++ ls_dev->regions[0].start, ++ ls_dev->regions[1].start, ++ dpio_attrs.qbman_portal_id, ++ dpio_attrs.num_priorities); ++ ++ pr_info("ce_size=0x%llx, ci_size=0x%llx\n", ++ resource_size(&ls_dev->regions[0]), ++ resource_size(&ls_dev->regions[1])); ++ ++ desc.qman_version = dpio_attrs.qbman_version; ++ /* Build DPIO driver object out of raw MC object */ ++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; ++ desc.has_irq = 1; ++ desc.will_poll = 1; ++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; ++ desc.cpu = next_cpu; ++ desc.stash_affinity = next_cpu; ++ next_cpu = (next_cpu + 1) % num_active_cpus(); ++ desc.dpio_id = ls_dev->obj_desc.id; ++ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start, ++ resource_size(&ls_dev->regions[0])); ++ desc.regs_cinh = ioremap(ls_dev->regions[1].start, ++ resource_size(&ls_dev->regions[1])); ++ ++ err = fsl_mc_allocate_irqs(ls_dev); ++ if (err) { ++ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n"); ++ desc.has_irq = 0; ++ } else { ++ irq_allocated = true; ++ ++ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d", ++ desc.dpio_id); ++ ++ err = register_dpio_irq_handlers(ls_dev, desc.cpu); ++ if (err) ++ desc.has_irq = 0; ++ } ++ ++ priv->io = dpaa2_io_create(&desc); ++ if (!priv->io) { ++ dev_err(dev, "DPIO setup failed\n"); ++ goto err_dpaa2_io_create; ++ } ++ ++ /* If no irq then go to poll mode */ ++ if (desc.has_irq == 0) { ++ dev_info(dev, "Using polling mode for DPIO %d\n", ++ desc.dpio_id); ++ /* goto err_register_dpio_irq; */ ++ /* TEMP: Start polling if IRQ could not ++ be registered. This will go away once ++ KVM support for MSI is present */ ++ if (irq_allocated == true) ++ fsl_mc_free_irqs(ls_dev); ++ ++ if (desc.stash_affinity) ++ priv->thread = kthread_create_on_cpu(dpio_thread, ++ priv->io, ++ desc.cpu, ++ "dpio_aff%u"); ++ else ++ priv->thread = ++ kthread_create(dpio_thread, ++ priv->io, ++ "dpio_non%u", ++ dpio_attrs.qbman_portal_id); ++ if (IS_ERR(priv->thread)) { ++ dev_err(dev, "DPIO thread failure\n"); ++ err = PTR_ERR(priv->thread); ++ goto err_dpaa_thread; ++ } ++ wake_up_process(priv->thread); ++ } ++ ++ defservice = dpaa2_io_default_service(); ++ err = dpaa2_io_service_add(defservice, priv->io); ++ dpaa2_io_down(defservice); ++ if (err) { ++ dev_err(dev, "DPIO add-to-service failed\n"); ++ goto err_dpaa2_io_add; ++ } ++ ++ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id); ++ dev_info(dev, " receives_notifications = %d\n", ++ desc.receives_notifications); ++ dev_info(dev, " has_irq = %d\n", desc.has_irq); ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ fsl_mc_portal_free(ls_dev->mc_io); ++ return 0; ++ ++err_dpaa2_io_add: ++ unregister_dpio_irq_handlers(ls_dev); ++/* TEMP: To be restored once polling is removed ++ err_register_dpio_irq: ++ fsl_mc_free_irqs(ls_dev); ++*/ ++err_dpaa_thread: ++err_dpaa2_io_create: ++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_get_attr: ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_open: ++ fsl_mc_portal_free(ls_dev->mc_io); ++err_mcportal: ++ dev_set_drvdata(dev, NULL); ++ devm_kfree(dev, priv); ++err_priv_alloc: ++ return err; ++} ++ ++/* ++ * Tear down interrupts for a given DPIO object ++ */ ++static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev) ++{ ++ /* (void)disable_dpio_irqs(ls_dev); */ ++ unregister_dpio_irq_handlers(ls_dev); ++ fsl_mc_free_irqs(ls_dev); ++} ++ ++static int __cold ++dpaa2_dpio_remove(struct fsl_mc_device *ls_dev) ++{ ++ struct device *dev; ++ struct dpio_priv *priv; ++ int err; ++ ++ dev = &ls_dev->dev; ++ priv = dev_get_drvdata(dev); ++ ++ /* there is no implementation yet for pulling a DPIO object out of a ++ * running service (and they're currently always running). ++ */ ++ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n"); ++ ++ if (priv->thread) ++ kthread_stop(priv->thread); ++ else ++ dpio_teardown_irqs(ls_dev); ++ ++ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); ++ if (err) { ++ dev_err(dev, "MC portal allocation failed\n"); ++ goto err_mcportal; ++ } ++ ++ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, ++ &ls_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpio_open() failed\n"); ++ goto err_open; ++ } ++ ++ dev_set_drvdata(dev, NULL); ++ dpaa2_io_down(priv->io); ++ ++ err = 0; ++ ++ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); ++ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); ++err_open: ++ fsl_mc_portal_free(ls_dev->mc_io); ++err_mcportal: ++ return err; ++} ++ ++static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpio", ++ .ver_major = DPIO_VER_MAJOR, ++ .ver_minor = DPIO_VER_MINOR ++ }, ++ { .vendor = 0x0 } ++}; ++ ++static struct fsl_mc_driver dpaa2_dpio_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ }, ++ .probe = dpaa2_dpio_probe, ++ .remove = dpaa2_dpio_remove, ++ .match_id_table = dpaa2_dpio_match_id_table ++}; ++ ++static int dpio_driver_init(void) ++{ ++ int err; ++ ++ err = dpaa2_io_service_driver_init(); ++ if (!err) { ++ err = fsl_mc_driver_register(&dpaa2_dpio_driver); ++ if (err) ++ dpaa2_io_service_driver_exit(); ++ } ++ return err; ++} ++static void dpio_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dpaa2_dpio_driver); ++ dpaa2_io_service_driver_exit(); ++} ++module_init(dpio_driver_init); ++module_exit(dpio_driver_exit); +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h +new file mode 100644 +index 0000000..fe8d40b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h +@@ -0,0 +1,33 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++int dpaa2_io_service_driver_init(void); ++void dpaa2_io_service_driver_exit(void); +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c +new file mode 100644 +index 0000000..b63edd6 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c +@@ -0,0 +1,468 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../../include/mc-sys.h" ++#include "../../include/mc-cmd.h" ++#include "fsl_dpio.h" ++#include "fsl_dpio_cmd.h" ++ ++int dpio_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpio_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ DPIO_CMD_OPEN(cmd, dpio_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpio_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpio_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ DPIO_CMD_CREATE(cmd, cfg); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpio_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_IS_ENABLED(cmd, *en); ++ ++ return 0; ++} ++ ++int dpio_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpio_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpio_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); ++ ++ return 0; ++} ++ ++int dpio_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); ++ ++ return 0; ++} ++ ++int dpio_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); ++ ++ return 0; ++} ++ ++int dpio_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); ++ ++ return 0; ++} ++ ++int dpio_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpio_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_ATTR(cmd, attr); ++ ++ return 0; ++} ++ ++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t sdest) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, ++ cmd_flags, ++ token); ++ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t *sdest) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); ++ ++ return 0; ++} ++ ++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id, ++ uint8_t *channel_index) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, ++ cmd_flags, ++ token); ++ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); ++ ++ return 0; ++} ++ ++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, ++ cmd_flags, ++ token); ++ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio_service.c b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c +new file mode 100644 +index 0000000..ebcfd59 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c +@@ -0,0 +1,801 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include "fsl_qbman_portal.h" ++#include "../../include/mc.h" ++#include "../../include/fsl_dpaa2_io.h" ++#include "fsl_dpio.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dpio-drv.h" ++#include "qbman_debug.h" ++ ++#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__) ++ ++#define MAGIC_SERVICE 0xabcd9876 ++#define MAGIC_OBJECT 0x1234fedc ++ ++struct dpaa2_io { ++ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part ++ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If ++ * it's neither, something got corrupted. This is mainly to satisfy ++ * dpaa2_io_from_registration(), which dereferences a caller- ++ * instantiated struct and so warrants a bug-checking step - hence the ++ * magic rather than a boolean. ++ */ ++ unsigned int magic; ++ atomic_t refs; ++ union { ++ struct dpaa2_io_service { ++ spinlock_t lock; ++ struct list_head list; ++ /* for targeted dpaa2_io selection */ ++ struct dpaa2_io *objects_by_cpu[NR_CPUS]; ++ cpumask_t cpus_notifications; ++ cpumask_t cpus_stashing; ++ int has_nonaffine; ++ /* slight hack. record the special case of the ++ * "default service", because that's the case where we ++ * need to avoid a kfree() ... */ ++ int is_defservice; ++ } service; ++ struct dpaa2_io_object { ++ struct dpaa2_io_desc dpio_desc; ++ struct qbman_swp_desc swp_desc; ++ struct qbman_swp *swp; ++ /* If the object is part of a service, this is it (and ++ * 'node' is linked into the service's list) */ ++ struct dpaa2_io *service; ++ struct list_head node; ++ /* Interrupt mask, as used with ++ * qbman_swp_interrupt_[gs]et_vanish(). This isn't ++ * locked, because the higher layer is driving all ++ * "ingress" processing. */ ++ uint32_t irq_mask; ++ /* As part of simplifying assumptions, we provide an ++ * irq-safe lock for each type of DPIO operation that ++ * isn't innately lockless. The selection algorithms ++ * (which are simplified) require this, whereas ++ * eventually adherence to cpu-affinity will presumably ++ * relax the locking requirements. */ ++ spinlock_t lock_mgmt_cmd; ++ spinlock_t lock_notifications; ++ struct list_head notifications; ++ } object; ++ }; ++}; ++ ++struct dpaa2_io_store { ++ unsigned int max; ++ dma_addr_t paddr; ++ struct dpaa2_dq *vaddr; ++ void *alloced_addr; /* the actual return from kmalloc as it may ++ be adjusted for alignment purposes */ ++ unsigned int idx; /* position of the next-to-be-returned entry */ ++ struct qbman_swp *swp; /* portal used to issue VDQCR */ ++ struct device *dev; /* device used for DMA mapping */ ++}; ++ ++static struct dpaa2_io def_serv; ++ ++/**********************/ ++/* Internal functions */ ++/**********************/ ++ ++static void service_init(struct dpaa2_io *d, int is_defservice) ++{ ++ struct dpaa2_io_service *s = &d->service; ++ ++ d->magic = MAGIC_SERVICE; ++ atomic_set(&d->refs, 1); ++ spin_lock_init(&s->lock); ++ INIT_LIST_HEAD(&s->list); ++ cpumask_clear(&s->cpus_notifications); ++ cpumask_clear(&s->cpus_stashing); ++ s->has_nonaffine = 0; ++ s->is_defservice = is_defservice; ++} ++ ++/* Selection algorithms, stupid ones at that. These are to handle the case where ++ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within ++ * it to use. ++ */ ++static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss, ++ int cpu) ++{ ++ struct dpaa2_io *o; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&ss->lock, irqflags); ++ /* TODO: this is about the dumbest and slowest selection algorithm you ++ * could imagine. (We're looking for something working first, and ++ * something efficient second...) ++ */ ++ list_for_each_entry(o, &ss->list, object.node) ++ if (o->object.dpio_desc.cpu == cpu) ++ goto found; ++ ++ /* No joy. Try the first nonaffine portal (bleurgh) */ ++ if (ss->has_nonaffine) ++ list_for_each_entry(o, &ss->list, object.node) ++ if (!o->object.dpio_desc.stash_affinity) ++ goto found; ++ ++ /* No joy. Try the first object. Told you it was horrible. */ ++ if (!list_empty(&ss->list)) ++ o = list_entry(ss->list.next, struct dpaa2_io, object.node); ++ else ++ o = NULL; ++ ++found: ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ return o; ++} ++ ++static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu) ++{ ++ struct dpaa2_io_service *ss; ++ unsigned long irqflags; ++ ++ if (!d) ++ d = &def_serv; ++ else if (d->magic == MAGIC_OBJECT) ++ return d; ++ BUG_ON(d->magic != MAGIC_SERVICE); ++ ++ ss = &d->service; ++ ++ /* If cpu==-1, choose the current cpu, with no guarantees about ++ * potentially being migrated away. ++ */ ++ if (unlikely(cpu < 0)) { ++ spin_lock_irqsave(&ss->lock, irqflags); ++ cpu = smp_processor_id(); ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ ++ return _service_select_by_cpu_slow(ss, cpu); ++ } ++ ++ /* If a specific cpu was requested, pick it up immediately */ ++ return ss->objects_by_cpu[cpu]; ++} ++ ++static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d) ++{ ++ struct dpaa2_io_service *ss; ++ struct dpaa2_io *o; ++ unsigned long irqflags; ++ ++ if (!d) ++ d = &def_serv; ++ else if (d->magic == MAGIC_OBJECT) ++ return d; ++ BUG_ON(d->magic != MAGIC_SERVICE); ++ ++ /* ++ * Lock the service, looking for the first DPIO object in the list, ++ * ignore everything else about that DPIO, and choose it to do the ++ * operation! As a post-selection step, move the DPIO to the end of ++ * the list. It should improve load-balancing a little, although it ++ * might also incur a performance hit, given that the lock is *global* ++ * and this may be called on the fast-path... ++ */ ++ ss = &d->service; ++ spin_lock_irqsave(&ss->lock, irqflags); ++ if (!list_empty(&ss->list)) { ++ o = list_entry(ss->list.next, struct dpaa2_io, object.node); ++ list_del(&o->object.node); ++ list_add_tail(&o->object.node, &ss->list); ++ } else ++ o = NULL; ++ spin_unlock_irqrestore(&ss->lock, irqflags); ++ return o; ++} ++ ++/* If the context is not preemptible, select the service affine to the ++ * current cpu. Otherwise, "select any". ++ */ ++static inline struct dpaa2_io *_service_select(struct dpaa2_io *d) ++{ ++ struct dpaa2_io *temp = d; ++ ++ if (likely(!preemptible())) { ++ d = service_select_by_cpu(d, smp_processor_id()); ++ if (likely(d)) ++ return d; ++ } ++ return service_select_any(temp); ++} ++ ++/**********************/ ++/* Exported functions */ ++/**********************/ ++ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) ++{ ++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ struct dpaa2_io_object *o = &ret->object; ++ ++ if (!ret) ++ return NULL; ++ ret->magic = MAGIC_OBJECT; ++ atomic_set(&ret->refs, 1); ++ o->dpio_desc = *desc; ++ o->swp_desc.cena_bar = o->dpio_desc.regs_cena; ++ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh; ++ o->swp_desc.qman_version = o->dpio_desc.qman_version; ++ o->swp = qbman_swp_init(&o->swp_desc); ++ o->service = NULL; ++ if (!o->swp) { ++ kfree(ret); ++ return NULL; ++ } ++ INIT_LIST_HEAD(&o->node); ++ spin_lock_init(&o->lock_mgmt_cmd); ++ spin_lock_init(&o->lock_notifications); ++ INIT_LIST_HEAD(&o->notifications); ++ if (!o->dpio_desc.has_irq) ++ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff); ++ else { ++ /* For now only enable DQRR interrupts */ ++ qbman_swp_interrupt_set_trigger(o->swp, ++ QBMAN_SWP_INTERRUPT_DQRI); ++ } ++ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff); ++ if (o->dpio_desc.receives_notifications) ++ qbman_swp_push_set(o->swp, 0, 1); ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_create); ++ ++struct dpaa2_io *dpaa2_io_create_service(void) ++{ ++ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ ++ if (ret) ++ service_init(ret, 0); ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_create_service); ++ ++struct dpaa2_io *dpaa2_io_default_service(void) ++{ ++ atomic_inc(&def_serv.refs); ++ return &def_serv; ++} ++EXPORT_SYMBOL(dpaa2_io_default_service); ++ ++void dpaa2_io_down(struct dpaa2_io *d) ++{ ++ if (!atomic_dec_and_test(&d->refs)) ++ return; ++ if (d->magic == MAGIC_SERVICE) { ++ BUG_ON(!list_empty(&d->service.list)); ++ if (d->service.is_defservice) ++ /* avoid the kfree()! */ ++ return; ++ } else { ++ BUG_ON(d->magic != MAGIC_OBJECT); ++ BUG_ON(d->object.service); ++ BUG_ON(!list_empty(&d->object.notifications)); ++ } ++ kfree(d); ++} ++EXPORT_SYMBOL(dpaa2_io_down); ++ ++int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ struct dpaa2_io_object *oo = &o->object; ++ int res = -EINVAL; ++ ++ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT)) ++ return res; ++ atomic_inc(&o->refs); ++ atomic_inc(&s->refs); ++ spin_lock(&ss->lock); ++ /* 'obj' must not already be associated with a service */ ++ if (!oo->service) { ++ oo->service = s; ++ list_add(&oo->node, &ss->list); ++ if (oo->dpio_desc.receives_notifications) { ++ cpumask_set_cpu(oo->dpio_desc.cpu, ++ &ss->cpus_notifications); ++ /* Update the fast-access array */ ++ ss->objects_by_cpu[oo->dpio_desc.cpu] = ++ container_of(oo, struct dpaa2_io, object); ++ } ++ if (oo->dpio_desc.stash_affinity) ++ cpumask_set_cpu(oo->dpio_desc.cpu, ++ &ss->cpus_stashing); ++ if (!oo->dpio_desc.stash_affinity) ++ ss->has_nonaffine = 1; ++ /* success */ ++ res = 0; ++ } ++ spin_unlock(&ss->lock); ++ if (res) { ++ dpaa2_io_down(s); ++ dpaa2_io_down(o); ++ } ++ return res; ++} ++EXPORT_SYMBOL(dpaa2_io_service_add); ++ ++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc) ++{ ++ if (obj->magic == MAGIC_SERVICE) ++ return -EINVAL; ++ BUG_ON(obj->magic != MAGIC_OBJECT); ++ *desc = obj->object.dpio_desc; ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_get_descriptor); ++ ++#define DPAA_POLL_MAX 32 ++ ++int dpaa2_io_poll(struct dpaa2_io *obj) ++{ ++ const struct dpaa2_dq *dq; ++ struct qbman_swp *swp; ++ int max = 0; ++ ++ if (obj->magic != MAGIC_OBJECT) ++ return -EINVAL; ++ swp = obj->object.swp; ++ dq = qbman_swp_dqrr_next(swp); ++ while (dq) { ++ if (qbman_result_is_SCN(dq)) { ++ struct dpaa2_io_notification_ctx *ctx; ++ uint64_t q64; ++ ++ q64 = qbman_result_SCN_ctx(dq); ++ ctx = (void *)q64; ++ ctx->cb(ctx); ++ } else ++ pr_crit("Unrecognised/ignored DQRR entry\n"); ++ qbman_swp_dqrr_consume(swp, dq); ++ ++max; ++ if (max > DPAA_POLL_MAX) ++ return 0; ++ dq = qbman_swp_dqrr_next(swp); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_poll); ++ ++int dpaa2_io_irq(struct dpaa2_io *obj) ++{ ++ struct qbman_swp *swp; ++ uint32_t status; ++ ++ if (obj->magic != MAGIC_OBJECT) ++ return -EINVAL; ++ swp = obj->object.swp; ++ status = qbman_swp_interrupt_read_status(swp); ++ if (!status) ++ return IRQ_NONE; ++ dpaa2_io_poll(obj); ++ qbman_swp_interrupt_clear_status(swp, status); ++ qbman_swp_interrupt_set_inhibit(swp, 0); ++ return IRQ_HANDLED; ++} ++EXPORT_SYMBOL(dpaa2_io_irq); ++ ++int dpaa2_io_pause_poll(struct dpaa2_io *obj) ++{ ++ UNIMPLEMENTED(); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(dpaa2_io_pause_poll); ++ ++int dpaa2_io_resume_poll(struct dpaa2_io *obj) ++{ ++ UNIMPLEMENTED(); ++ return -EINVAL; ++} ++EXPORT_SYMBOL(dpaa2_io_resume_poll); ++ ++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ cpumask_copy(mask, &ss->cpus_notifications); ++} ++EXPORT_SYMBOL(dpaa2_io_service_notifications); ++ ++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ cpumask_copy(mask, &ss->cpus_stashing); ++} ++EXPORT_SYMBOL(dpaa2_io_service_stashing); ++ ++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s) ++{ ++ struct dpaa2_io_service *ss = &s->service; ++ ++ BUG_ON(s->magic != MAGIC_SERVICE); ++ return ss->has_nonaffine; ++} ++EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine); ++ ++int dpaa2_io_service_register(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ ++ d = service_select_by_cpu(d, ctx->desired_cpu); ++ if (!d) ++ return -ENODEV; ++ ctx->dpio_id = d->object.dpio_desc.dpio_id; ++ ctx->qman64 = (uint64_t)ctx; ++ ctx->dpio_private = d; ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_add(&ctx->node, &d->object.notifications); ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ if (ctx->is_cdan) ++ /* Enable the generation of CDAN notifications */ ++ qbman_swp_CDAN_set_context_enable(d->object.swp, ++ (uint16_t)ctx->id, ++ ctx->qman64); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_service_register); ++ ++int dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ struct dpaa2_io *d = ctx->dpio_private; ++ unsigned long irqflags; ++ ++ if (!service) ++ service = &def_serv; ++ BUG_ON((service != d) && (service != d->object.service)); ++ if (ctx->is_cdan) ++ qbman_swp_CDAN_disable(d->object.swp, ++ (uint16_t)ctx->id); ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_del(&ctx->node); ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_service_deregister); ++ ++int dpaa2_io_service_rearm(struct dpaa2_io *d, ++ struct dpaa2_io_notification_ctx *ctx) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ if (ctx->is_cdan) ++ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id); ++ else ++ err = qbman_swp_fq_schedule(d->object.swp, ctx->id); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_rearm); ++ ++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, ++ struct dpaa2_io **io) ++{ ++ struct dpaa2_io_notification_ctx *tmp; ++ struct dpaa2_io *d = ctx->dpio_private; ++ unsigned long irqflags; ++ int ret = 0; ++ ++ BUG_ON(d->magic != MAGIC_OBJECT); ++ /* Iterate the notifications associated with 'd' looking for a match. If ++ * not, we've been passed an unregistered ctx! */ ++ spin_lock_irqsave(&d->object.lock_notifications, irqflags); ++ list_for_each_entry(tmp, &d->object.notifications, node) ++ if (tmp == ctx) ++ goto found; ++ ret = -EINVAL; ++found: ++ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); ++ if (!ret) { ++ atomic_inc(&d->refs); ++ *io = d; ++ } ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_from_registration); ++ ++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, ++ struct dpaa2_io **ret) ++{ ++ if (cpu == -1) ++ *ret = service_select_any(service); ++ else ++ *ret = service_select_by_cpu(service, cpu); ++ if (*ret) { ++ atomic_inc(&(*ret)->refs); ++ return 0; ++ } ++ return -ENODEV; ++} ++EXPORT_SYMBOL(dpaa2_io_service_get_persistent); ++ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); ++ qbman_pull_desc_set_fq(&pd, fqid); ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ s->swp = d->object.swp; ++ err = qbman_swp_pull(d->object.swp, &pd); ++ if (err) ++ s->swp = NULL; ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_fq); ++ ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, ++ struct dpaa2_io_store *s) ++{ ++ struct qbman_pull_desc pd; ++ int err; ++ ++ qbman_pull_desc_clear(&pd); ++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); ++ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); ++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ s->swp = d->object.swp; ++ err = qbman_swp_pull(d->object.swp, &pd); ++ if (err) ++ s->swp = NULL; ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_pull_channel); ++ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, ++ uint32_t fqid, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_fq(&ed, fqid); ++ return qbman_swp_enqueue(d->object.swp, &ed, ++ (const struct qbman_fd *)fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); ++ ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, ++ uint32_t qdid, uint8_t prio, uint16_t qdbin, ++ const struct dpaa2_fd *fd) ++{ ++ struct qbman_eq_desc ed; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_eq_desc_clear(&ed); ++ qbman_eq_desc_set_no_orp(&ed, 0); ++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); ++ return qbman_swp_enqueue(d->object.swp, &ed, ++ (const struct qbman_fd *)fd); ++} ++EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); ++ ++int dpaa2_io_service_release(struct dpaa2_io *d, ++ uint32_t bpid, ++ const uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ struct qbman_release_desc rd; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ qbman_release_desc_clear(&rd); ++ qbman_release_desc_set_bpid(&rd, bpid); ++ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers); ++} ++EXPORT_SYMBOL(dpaa2_io_service_release); ++ ++int dpaa2_io_service_acquire(struct dpaa2_io *d, ++ uint32_t bpid, ++ uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ unsigned long irqflags; ++ int err; ++ ++ d = _service_select(d); ++ if (!d) ++ return -ENODEV; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ return err; ++} ++EXPORT_SYMBOL(dpaa2_io_service_acquire); ++ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev) ++{ ++ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL); ++ size_t size; ++ ++ BUG_ON(!max_frames || (max_frames > 16)); ++ if (!ret) ++ return NULL; ++ ret->max = max_frames; ++ size = max_frames * sizeof(struct dpaa2_dq) + 64; ++ ret->alloced_addr = kmalloc(size, GFP_KERNEL); ++ if (!ret->alloced_addr) { ++ kfree(ret); ++ return NULL; ++ } ++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); ++ ret->paddr = dma_map_single(dev, ret->vaddr, ++ sizeof(struct dpaa2_dq) * max_frames, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(dev, ret->paddr)) { ++ kfree(ret->alloced_addr); ++ kfree(ret); ++ return NULL; ++ } ++ ret->idx = 0; ++ ret->dev = dev; ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_create); ++ ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s) ++{ ++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, ++ DMA_FROM_DEVICE); ++ kfree(s->alloced_addr); ++ kfree(s); ++} ++EXPORT_SYMBOL(dpaa2_io_store_destroy); ++ ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) ++{ ++ int match; ++ struct dpaa2_dq *ret = &s->vaddr[s->idx]; ++ ++ match = qbman_result_has_new_result(s->swp, ret); ++ if (!match) { ++ *is_last = 0; ++ return NULL; ++ } ++ BUG_ON(!qbman_result_is_DQ(ret)); ++ s->idx++; ++ if (dpaa2_dq_is_pull_complete(ret)) { ++ *is_last = 1; ++ s->idx = 0; ++ /* If we get an empty dequeue result to terminate a zero-results ++ * vdqcr, return NULL to the caller rather than expecting him to ++ * check non-NULL results every time. */ ++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) ++ ret = NULL; ++ } else ++ *is_last = 0; ++ return ret; ++} ++EXPORT_SYMBOL(dpaa2_io_store_next); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ uint32_t *fcnt, uint32_t *bcnt) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select_any(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->object.swp; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ ret = qbman_fq_query_state(swp, fqid, &state); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *fcnt = qbman_fq_state_frame_count(&state); ++ *bcnt = qbman_fq_state_byte_count(&state); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_fq_count); ++ ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, ++ uint32_t *num) ++{ ++ struct qbman_attr state; ++ struct qbman_swp *swp; ++ unsigned long irqflags; ++ int ret; ++ ++ d = service_select_any(d); ++ if (!d) ++ return -ENODEV; ++ ++ swp = d->object.swp; ++ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); ++ ret = qbman_bp_query(swp, bpid, &state); ++ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); ++ if (ret) ++ return ret; ++ *num = qbman_bp_info_num_free_bufs(&state); ++ return 0; ++} ++EXPORT_SYMBOL(dpaa2_io_query_bp_count); ++ ++#endif ++ ++/* module init/exit hooks called from dpio-drv.c. These are declared in ++ * dpio-drv.h. ++ */ ++int dpaa2_io_service_driver_init(void) ++{ ++ service_init(&def_serv, 1); ++ return 0; ++} ++ ++void dpaa2_io_service_driver_exit(void) ++{ ++ if (atomic_read(&def_serv.refs) != 1) ++ pr_err("default DPIO service leaves dangling DPIO objects!\n"); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h +new file mode 100644 +index 0000000..88a492f +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h +@@ -0,0 +1,460 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPIO_H ++#define __FSL_DPIO_H ++ ++/* Data Path I/O Portal API ++ * Contains initialization APIs and runtime control APIs for DPIO ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpio_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpio_id: DPIO unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpio_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpio_id, ++ uint16_t *token); ++ ++/** ++ * dpio_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * enum dpio_channel_mode - DPIO notification channel mode ++ * @DPIO_NO_CHANNEL: No support for notification channel ++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a ++ * dedicated channel in the DPIO; user should point the queue's ++ * destination in the relevant interface to this DPIO ++ */ ++enum dpio_channel_mode { ++ DPIO_NO_CHANNEL = 0, ++ DPIO_LOCAL_CHANNEL = 1, ++}; ++ ++/** ++ * struct dpio_cfg - Structure representing DPIO configuration ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ */ ++struct dpio_cfg { ++ enum dpio_channel_mode channel_mode; ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpio_create() - Create the DPIO object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPIO object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpio_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpio_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpio_destroy() - Destroy the DPIO object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_enable() - Enable the DPIO, allow I/O portal operations. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_is_enabled() - Check if the DPIO is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpio_reset() - Reset the DPIO, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpio_set_stashing_destination() - Set the stashing destination. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @sdest: stashing destination value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t sdest); ++ ++/** ++ * dpio_get_stashing_destination() - Get the stashing destination.. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @sdest: Returns the stashing destination value ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t *sdest); ++ ++/** ++ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @dpcon_id: DPCON object ID ++ * @channel_index: Returned channel index to be used in qbman API ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id, ++ uint8_t *channel_index); ++ ++/** ++ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @dpcon_id: DPCON object ID ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int dpcon_id); ++ ++/** ++ * DPIO IRQ Index and Events ++ */ ++ ++/** ++ * Irq software-portal index ++ */ ++#define DPIO_IRQ_SWP_INDEX 0 ++ ++/** ++ * struct dpio_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpio_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpio_irq_cfg *irq_cfg); ++ ++/** ++ * dpio_get_irq() - Get IRQ information from the DPIO. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpio_irq_cfg *irq_cfg); ++ ++/** ++ * dpio_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpio_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpio_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpio_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpio_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpio_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpio_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpio_attr - Structure representing DPIO attributes ++ * @id: DPIO object ID ++ * @version: DPIO version ++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area ++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area ++ * @qbman_portal_id: Software portal ID ++ * @channel_mode: Notification channel mode ++ * @num_priorities: Number of priorities for the notification channel (1-8); ++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' ++ * @qbman_version: QBMAN version ++ */ ++struct dpio_attr { ++ int id; ++ /** ++ * struct version - DPIO version ++ * @major: DPIO major version ++ * @minor: DPIO minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint64_t qbman_portal_ce_offset; ++ uint64_t qbman_portal_ci_offset; ++ uint16_t qbman_portal_id; ++ enum dpio_channel_mode channel_mode; ++ uint8_t num_priorities; ++ uint32_t qbman_version; ++}; ++ ++/** ++ * dpio_get_attributes() - Retrieve DPIO attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPIO object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpio_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpio_attr *attr); ++#endif /* __FSL_DPIO_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h +new file mode 100644 +index 0000000..f339cd6 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h +@@ -0,0 +1,184 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPIO_CMD_H ++#define _FSL_DPIO_CMD_H ++ ++/* DPIO Version */ ++#define DPIO_VER_MAJOR 3 ++#define DPIO_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPIO_CMDID_CLOSE 0x800 ++#define DPIO_CMDID_OPEN 0x803 ++#define DPIO_CMDID_CREATE 0x903 ++#define DPIO_CMDID_DESTROY 0x900 ++ ++#define DPIO_CMDID_ENABLE 0x002 ++#define DPIO_CMDID_DISABLE 0x003 ++#define DPIO_CMDID_GET_ATTR 0x004 ++#define DPIO_CMDID_RESET 0x005 ++#define DPIO_CMDID_IS_ENABLED 0x006 ++ ++#define DPIO_CMDID_SET_IRQ 0x010 ++#define DPIO_CMDID_GET_IRQ 0x011 ++#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPIO_CMDID_SET_IRQ_MASK 0x014 ++#define DPIO_CMDID_GET_IRQ_MASK 0x015 ++#define DPIO_CMDID_GET_IRQ_STATUS 0x016 ++#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPIO_CMDID_SET_STASHING_DEST 0x120 ++#define DPIO_CMDID_GET_STASHING_DEST 0x121 ++#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 ++#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_OPEN(cmd, dpio_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_CREATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ ++ cfg->channel_mode);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ ++ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ ++ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ ++ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++#endif /* _FSL_DPIO_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h +new file mode 100644 +index 0000000..2874ff8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h +@@ -0,0 +1,123 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_QBMAN_BASE_H ++#define _FSL_QBMAN_BASE_H ++ ++/** ++ * struct qbman_block_desc - qbman block descriptor structure ++ * ++ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not ++ * control this QBMan instance, these values may simply be place-holders. The ++ * idea is simply that we be able to distinguish between them, eg. so that SWP ++ * descriptors can identify which QBMan instance they belong to. ++ */ ++struct qbman_block_desc { ++ void *ccsr_reg_bar; /* CCSR register map */ ++ int irq_rerr; /* Recoverable error interrupt line */ ++ int irq_nrerr; /* Non-recoverable error interrupt line */ ++}; ++ ++/** ++ * struct qbman_swp_desc - qbman software portal descriptor structure ++ * ++ * Descriptor for a QBMan software portal, expressed in terms that make sense to ++ * the user context. Ie. on MC, this information is likely to be true-physical, ++ * and instantiated statically at compile-time. On GPP, this information is ++ * likely to be obtained via "discovery" over a partition's "layerscape bus" ++ * (ie. in response to a MC portal command), and would take into account any ++ * virtualisation of the GPP user's address space and/or interrupt numbering. ++ */ ++struct qbman_swp_desc { ++ const struct qbman_block_desc *block; /* The QBMan instance */ ++ void *cena_bar; /* Cache-enabled portal register map */ ++ void *cinh_bar; /* Cache-inhibited portal register map */ ++ uint32_t qman_version; ++}; ++ ++/* Driver object for managing a QBMan portal */ ++struct qbman_swp; ++ ++/** ++ * struct qbman_fd - basci structure for qbman frame descriptor ++ * ++ * Place-holder for FDs, we represent it via the simplest form that we need for ++ * now. Different overlays may be needed to support different options, etc. (It ++ * is impractical to define One True Struct, because the resulting encoding ++ * routines (lots of read-modify-writes) would be worst-case performance whether ++ * or not circumstances required them.) ++ * ++ * Note, as with all data-structures exchanged between software and hardware (be ++ * they located in the portal register map or DMA'd to and from main-memory), ++ * the driver ensures that the caller of the driver API sees the data-structures ++ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words ++ * contained within this structure are represented in host-endianness, even if ++ * hardware always treats them as little-endian. As such, if any of these fields ++ * are interpreted in a binary (rather than numerical) fashion by hardware ++ * blocks (eg. accelerators), then the user should be careful. We illustrate ++ * with an example; ++ * ++ * Suppose the desired behaviour of an accelerator is controlled by the "frc" ++ * field of the FDs that are sent to it. Suppose also that the behaviour desired ++ * by the user corresponds to an "frc" value which is expressed as the literal ++ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit ++ * value in which 0xfe is the first byte and 0xba is the last byte, and as ++ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If ++ * the software is little-endian also, this can simply be achieved by setting ++ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set ++ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is ++ * to treat the 32-bit words as numerical values, in which the offset of a field ++ * from the beginning of the first byte (as required or generated by hardware) ++ * is numerically encoded by a left-shift (ie. by raising the field to a ++ * corresponding power of 2). Ie. in the current example, software could set ++ * "frc" in the following way, and it would work correctly on both little-endian ++ * and big-endian operation; ++ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); ++ */ ++struct qbman_fd { ++ union { ++ uint32_t words[8]; ++ struct qbman_fd_simple { ++ uint32_t addr_lo; ++ uint32_t addr_hi; ++ uint32_t len; ++ /* offset in the MS 16 bits, BPID in the LS 16 bits */ ++ uint32_t bpid_offset; ++ uint32_t frc; /* frame context */ ++ /* "err", "va", "cbmt", "asal", [...] */ ++ uint32_t ctrl; ++ /* flow context */ ++ uint32_t flc_lo; ++ uint32_t flc_hi; ++ } simple; ++ }; ++}; ++ ++#endif /* !_FSL_QBMAN_BASE_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h +new file mode 100644 +index 0000000..c9e543e +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h +@@ -0,0 +1,753 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_QBMAN_PORTAL_H ++#define _FSL_QBMAN_PORTAL_H ++ ++#include "fsl_qbman_base.h" ++ ++/** ++ * qbman_swp_init() - Create a functional object representing the given ++ * QBMan portal descriptor. ++ * @d: the given qbman swp descriptor ++ * ++ * Return qbman_swp portal object for success, NULL if the object cannot ++ * be created. ++ */ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); ++/** ++ * qbman_swp_finish() - Create and destroy a functional object representing ++ * the given QBMan portal descriptor. ++ * @p: the qbman_swp object to be destroyed. ++ * ++ */ ++void qbman_swp_finish(struct qbman_swp *p); ++ ++/** ++ * qbman_swp_get_desc() - Get the descriptor of the given portal object. ++ * @p: the given portal object. ++ * ++ * Return the descriptor for this portal. ++ */ ++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); ++ ++ /**************/ ++ /* Interrupts */ ++ /**************/ ++ ++/* See the QBMan driver API documentation for details on the interrupt ++ * mechanisms. */ ++#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) ++#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) ++#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) ++#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) ++#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) ++#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) ++ ++/** ++ * qbman_swp_interrupt_get_vanish() ++ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal ++ * interrupt status disable register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IDSR register. ++ * ++ * Return the settings in SWP_ISDR register for Get function. ++ */ ++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); ++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_read_status() ++ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal ++ * interrupt status register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_ISR register. ++ * ++ * Return the settings in SWP_ISR register for Get function. ++ * ++ */ ++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_get_trigger() ++ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal ++ * interrupt enable register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IER register. ++ * ++ * Return the settings in SWP_IER register for Get function. ++ */ ++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); ++ ++/** ++ * qbman_swp_interrupt_get_inhibit() ++ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal ++ * interrupt inhibit register. ++ * @p: the given software portal object. ++ * @mask: The mask to set in SWP_IIR register. ++ * ++ * Return the settings in SWP_IIR register for Get function. ++ */ ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); ++ ++ /************/ ++ /* Dequeues */ ++ /************/ ++ ++/* See the QBMan driver API documentation for details on the enqueue ++ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is ++ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan ++ * driver. The structure is defined in the DPIO interface, but to avoid circular ++ * dependencies we just pre/re-declare it here opaquely. */ ++struct dpaa2_dq; ++ ++/* ------------------- */ ++/* Push-mode dequeuing */ ++/* ------------------- */ ++ ++/** ++ * qbman_swp_push_get() - Get the push dequeue setup. ++ * @p: the software portal object. ++ * @channel_idx: the channel index to query. ++ * @enabled: returned boolean to show whether the push dequeue is enabled for ++ * the given channel. ++ */ ++void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled); ++/** ++ * qbman_swp_push_set() - Enable or disable push dequeue. ++ * @p: the software portal object. ++ * @channel_idx: the channel index.. ++ * @enable: enable or disable push dequeue. ++ * ++ * The user of a portal can enable and disable push-mode dequeuing of up to 16 ++ * channels independently. It does not specify this toggling by channel IDs, but ++ * rather by specifying the index (from 0 to 15) that has been mapped to the ++ * desired channel. ++ */ ++void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable); ++ ++/* ------------------- */ ++/* Pull-mode dequeuing */ ++/* ------------------- */ ++ ++/** ++ * struct qbman_pull_desc - the structure for pull dequeue descriptor ++ */ ++struct qbman_pull_desc { ++ uint32_t dont_manipulate_directly[6]; ++}; ++ ++enum qbman_pull_type_e { ++ /* dequeue with priority precedence, respect intra-class scheduling */ ++ qbman_pull_type_prio = 1, ++ /* dequeue with active FQ precedence, respect ICS */ ++ qbman_pull_type_active, ++ /* dequeue with active FQ precedence, no ICS */ ++ qbman_pull_type_active_noics ++}; ++ ++/** ++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ * @d: the pull dequeue descriptor to be cleared. ++ */ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d); ++ ++/** ++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage ++ * @d: the pull dequeue descriptor to be set. ++ * @storage: the pointer of the memory to store the dequeue result. ++ * @storage_phys: the physical address of the storage memory. ++ * @stash: to indicate whether write allocate is enabled. ++ * ++ * If not called, or if called with 'storage' as NULL, the result pull dequeues ++ * will produce results to DQRR. If 'storage' is non-NULL, then results are ++ * produced to the given memory location (using the physical/DMA address which ++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not ++ * those writes to main-memory express a cache-warming attribute. ++ */ ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash); ++/** ++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. ++ * @d: the pull dequeue descriptor to be set. ++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. ++ */ ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes); ++ ++/** ++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. ++ * @fqid: the frame queue index of the given FQ. ++ * ++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. ++ * @wqid: composed of channel id and wqid within the channel. ++ * @dct: the dequeue command type. ++ * ++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command ++ * dequeues. ++ * @chid: the channel id to be dequeued. ++ * @dct: the dequeue command type. ++ * ++ * Exactly one of the following descriptor "actions" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * - pull dequeue from the given frame queue (FQ) ++ * - pull dequeue from any FQ in the given work queue (WQ) ++ * - pull dequeue from any FQ in any WQ in the given channel ++ */ ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid); ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid, ++ enum qbman_pull_type_e dct); ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid, ++ enum qbman_pull_type_e dct); ++ ++/** ++ * qbman_swp_pull() - Issue the pull dequeue command ++ * @s: the software portal object. ++ * @d: the software portal descriptor which has been configured with ++ * the set of qbman_pull_desc_set_*() calls. ++ * ++ * Return 0 for success, and -EBUSY if the software portal is not ready ++ * to do pull dequeue. ++ */ ++int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d); ++ ++/* -------------------------------- */ ++/* Polling DQRR for dequeue results */ ++/* -------------------------------- */ ++ ++/** ++ * qbman_swp_dqrr_next() - Get an valid DQRR entry. ++ * @s: the software portal object. ++ * ++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry ++ * only once, so repeated calls can return a sequence of DQRR entries, without ++ * requiring they be consumed immediately or in any particular order. ++ */ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); ++ ++/** ++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from ++ * qbman_swp_dqrr_next(). ++ * @s: the software portal object. ++ * @dq: the DQRR entry to be consumed. ++ */ ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); ++ ++/* ------------------------------------------------- */ ++/* Polling user-provided storage for dequeue results */ ++/* ------------------------------------------------- */ ++/** ++ * qbman_result_has_new_result() - Check and get the dequeue response from the ++ * dq storage memory set in pull dequeue command ++ * @s: the software portal object. ++ * @dq: the dequeue result read from the memory. ++ * ++ * Only used for user-provided storage of dequeue results, not DQRR. For ++ * efficiency purposes, the driver will perform any required endianness ++ * conversion to ensure that the user's dequeue result storage is in host-endian ++ * format (whether or not that is the same as the little-endian format that ++ * hardware DMA'd to the user's storage). As such, once the user has called ++ * qbman_result_has_new_result() and been returned a valid dequeue result, ++ * they should not call it again on the same memory location (except of course ++ * if another dequeue command has been executed to produce a new result to that ++ * location). ++ * ++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid ++ * dequeue result. ++ */ ++int qbman_result_has_new_result(struct qbman_swp *, ++ const struct dpaa2_dq *); ++ ++/* -------------------------------------------------------- */ ++/* Parsing dequeue entries (DQRR and user-provided storage) */ ++/* -------------------------------------------------------- */ ++ ++/** ++ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not ++ * @dq: the dequeue result to be checked. ++ * ++ * DQRR entries may contain non-dequeue results, ie. notifications ++ */ ++int qbman_result_is_DQ(const struct dpaa2_dq *); ++ ++/** ++ * qbman_result_is_SCN() - Check the dequeue result is notification or not ++ * @dq: the dequeue result to be checked. ++ * ++ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change ++ * notifications" of one type or another. Some APIs apply to all of them, of the ++ * form qbman_result_SCN_***(). ++ */ ++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) ++{ ++ return !qbman_result_is_DQ(dq); ++} ++ ++/** ++ * Recognise different notification types, only required if the user allows for ++ * these to occur, and cares about them when they do. ++ */ ++int qbman_result_is_FQDAN(const struct dpaa2_dq *); ++ /* FQ Data Availability */ ++int qbman_result_is_CDAN(const struct dpaa2_dq *); ++ /* Channel Data Availability */ ++int qbman_result_is_CSCN(const struct dpaa2_dq *); ++ /* Congestion State Change */ ++int qbman_result_is_BPSCN(const struct dpaa2_dq *); ++ /* Buffer Pool State Change */ ++int qbman_result_is_CGCU(const struct dpaa2_dq *); ++ /* Congestion Group Count Update */ ++/* Frame queue state change notifications; (FQDAN in theory counts too as it ++ * leaves a FQ parked, but it is primarily a data availability notification) */ ++int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */ ++int qbman_result_is_FQRNI(const struct dpaa2_dq *); ++ /* Retirement Immediate */ ++int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */ ++ ++/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer ++ * dpaa2_dq_*() functions. */ ++ ++/* State-change notifications (FQDAN/CDAN/CSCN/...). */ ++/** ++ * qbman_result_SCN_state() - Get the state field in State-change notification ++ */ ++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_rid() - Get the resource id in State-change notification ++ */ ++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_ctx() - Get the context data in State-change notification ++ */ ++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_state_in_mem() - Get the state field in State-change ++ * notification which is written to memory instead of DQRR. ++ */ ++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *); ++/** ++ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change ++ * notification which is written to memory instead of DQRR. ++ */ ++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *); ++ ++/* Type-specific "resource IDs". Mainly for illustration purposes, though it ++ * also gives the appropriate type widths. */ ++#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) ++#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) ++#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) ++ ++/** ++ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN ++ * ++ * Return the buffer pool id. ++ */ ++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_has_free_bufs() - Check whether there are free ++ * buffers in the pool from BPSCN. ++ * ++ * Return the number of free buffers. ++ */ ++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the ++ * buffer pool is depleted. ++ * ++ * Return the status of buffer pool depletion. ++ */ ++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer ++ * pool is surplus or not. ++ * ++ * Return the status of buffer pool surplus. ++ */ ++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *); ++/** ++ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message ++ * ++ * Return the BPSCN context. ++ */ ++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *); ++ ++/* Parsing CGCU */ ++/** ++ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid ++ * ++ * Return the CGCU resource id. ++ */ ++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *); ++/** ++ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU ++ * ++ * Return instantaneous count in the CGCU notification. ++ */ ++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *); ++ ++ /************/ ++ /* Enqueues */ ++ /************/ ++/** ++ * struct qbman_eq_desc - structure of enqueue descriptor ++ */ ++struct qbman_eq_desc { ++ uint32_t dont_manipulate_directly[8]; ++}; ++ ++/** ++ * struct qbman_eq_response - structure of enqueue response ++ */ ++struct qbman_eq_response { ++ uint32_t dont_manipulate_directly[16]; ++}; ++ ++/** ++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_eq_desc_clear(struct qbman_eq_desc *); ++ ++/* Exactly one of the following descriptor "actions" should be set. (Calling ++ * any one of these will replace the effect of any prior call to one of these.) ++ * - enqueue without order-restoration ++ * - enqueue with order-restoration ++ * - fill a hole in the order-restoration sequence, without any enqueue ++ * - advance NESN (Next Expected Sequence Number), without any enqueue ++ * 'respond_success' indicates whether an enqueue response should be DMA'd ++ * after success (otherwise a response is DMA'd only after failure). ++ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to ++ * be enqueued. ++ */ ++/** ++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp ++ * @d: the enqueue descriptor. ++ * @response_success: 1 = enqueue with response always; 0 = enqueue with ++ * rejections returned on a FQ. ++ */ ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); ++ ++/** ++ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor ++ * @d: the enqueue descriptor. ++ * @response_success: 1 = enqueue with response always; 0 = enqueue with ++ * rejections returned on a FQ. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ * @incomplete: indiates whether this is the last fragments using the same ++ * sequeue number. ++ */ ++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, ++ uint32_t opr_id, uint32_t seqnum, int incomplete); ++ ++/** ++ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence ++ * without any enqueue ++ * @d: the enqueue descriptor. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ */ ++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum); ++ ++/** ++ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) ++ * without any enqueue ++ * @d: the enqueue descriptor. ++ * @opr_id: the order point record id. ++ * @seqnum: the order restoration sequence number. ++ */ ++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum); ++ ++/** ++ * qbman_eq_desc_set_response() - Set the enqueue response info. ++ * @d: the enqueue descriptor ++ * @storage_phys: the physical address of the enqueue response in memory. ++ * @stash: indicate that the write allocation enabled or not. ++ * ++ * In the case where an enqueue response is DMA'd, this determines where that ++ * response should go. (The physical/DMA address is given for hardware's ++ * benefit, but software should interpret it as a "struct qbman_eq_response" ++ * data structure.) 'stash' controls whether or not the write to main-memory ++ * expresses a cache-warming attribute. ++ */ ++void qbman_eq_desc_set_response(struct qbman_eq_desc *d, ++ dma_addr_t storage_phys, ++ int stash); ++/** ++ * qbman_eq_desc_set_token() - Set token for the enqueue command ++ * @d: the enqueue descriptor ++ * @token: the token to be set. ++ * ++ * token is the value that shows up in an enqueue response that can be used to ++ * detect when the results have been published. The easiest technique is to zero ++ * result "storage" before issuing an enqueue, and use any non-zero 'token' ++ * value. ++ */ ++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); ++ ++/** ++ * qbman_eq_desc_set_fq() ++ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue ++ * command. ++ * @d: the enqueue descriptor ++ * @fqid: the id of the frame queue to be enqueued. ++ * @qdid: the id of the queuing destination to be enqueued. ++ * @qd_bin: the queuing destination bin ++ * @qd_prio: the queuing destination priority. ++ * ++ * Exactly one of the following descriptor "targets" should be set. (Calling any ++ * one of these will replace the effect of any prior call to one of these.) ++ * - enqueue to a frame queue ++ * - enqueue to a queuing destination ++ * Note, that none of these will have any affect if the "action" type has been ++ * set to "orp_hole" or "orp_nesn". ++ */ ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid); ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid, ++ uint32_t qd_bin, uint32_t qd_prio); ++ ++/** ++ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt ++ * @d: the enqueue descriptor ++ * @enable: boolean to enable/disable EQDI ++ * ++ * Determines whether or not the portal's EQDI interrupt source should be ++ * asserted after the enqueue command is completed. ++ */ ++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable); ++ ++/** ++ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. ++ * @d: the enqueue descriptor. ++ * @enable: enabled/disable DCA mode. ++ * @dqrr_idx: DCAP_CI, the DCAP consumer index. ++ * @park: determine the whether park the FQ or not ++ * ++ * Determines whether or not a portal DQRR entry should be consumed once the ++ * enqueue command is completed. (And if so, and the DQRR entry corresponds ++ * to a held-active (order-preserving) FQ, whether the FQ should be parked ++ * instead of being rescheduled.) ++ */ ++void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable, ++ uint32_t dqrr_idx, int park); ++ ++/** ++ * qbman_swp_enqueue() - Issue an enqueue command. ++ * @s: the software portal used for enqueue. ++ * @d: the enqueue descriptor. ++ * @fd: the frame descriptor to be enqueued. ++ * ++ * Please note that 'fd' should only be NULL if the "action" of the ++ * descriptor is "orp_hole" or "orp_nesn". ++ * ++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. ++ */ ++int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *, ++ const struct qbman_fd *fd); ++ ++/** ++ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt. ++ * ++ * An EQRI interrupt can be generated when the fill-level of EQCR falls below ++ * the 'thresh' value set here. Setting thresh==0 (the default) disables. ++ */ ++int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh); ++ ++ /*******************/ ++ /* Buffer releases */ ++ /*******************/ ++/** ++ * struct qbman_release_desc - The structure for buffer release descriptor ++ */ ++struct qbman_release_desc { ++ uint32_t dont_manipulate_directly[1]; ++}; ++ ++/** ++ * qbman_release_desc_clear() - Clear the contents of a descriptor to ++ * default/starting state. ++ */ ++void qbman_release_desc_clear(struct qbman_release_desc *); ++ ++/** ++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to ++ */ ++void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid); ++ ++/** ++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI ++ * interrupt source should be asserted after the release command is completed. ++ */ ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable); ++ ++/** ++ * qbman_swp_release() - Issue a buffer release command. ++ * @s: the software portal object. ++ * @d: the release descriptor. ++ * @buffers: a pointer pointing to the buffer address to be released. ++ * @num_buffers: number of buffers to be released, must be less than 8. ++ * ++ * Return 0 for success, -EBUSY if the release command ring is not ready. ++ */ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const uint64_t *buffers, unsigned int num_buffers); ++ ++ /*******************/ ++ /* Buffer acquires */ ++ /*******************/ ++ ++/** ++ * qbman_swp_acquire() - Issue a buffer acquire command. ++ * @s: the software portal object. ++ * @bpid: the buffer pool index. ++ * @buffers: a pointer pointing to the acquired buffer address|es. ++ * @num_buffers: number of buffers to be acquired, must be less than 8. ++ * ++ * Return 0 for success, or negative error code if the acquire command ++ * fails. ++ */ ++int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers, ++ unsigned int num_buffers); ++ ++ /*****************/ ++ /* FQ management */ ++ /*****************/ ++ ++/** ++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue to be scheduled. ++ * ++ * There are a couple of different ways that a FQ can end up parked state, ++ * This schedules it. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); ++ ++/** ++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue to be forced. ++ * ++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled ++ * and thus be available for selection by any channel-dequeuing behaviour (push ++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still ++ * empty at the time this happens, the resulting dq_entry will have no FD. ++ * (qbman_result_DQ_fd() will return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); ++ ++/** ++ * qbman_swp_fq_xon() ++ * qbman_swp_fq_xoff() - XON/XOFF the frame queue. ++ * @s: the software portal object. ++ * @fqid: the index of frame queue. ++ * ++ * These functions change the FQ flow-control stuff between XON/XOFF. (The ++ * default is XON.) This setting doesn't affect enqueues to the FQ, just ++ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when ++ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is ++ * changed to XOFF after it had already become truly-scheduled to a channel, and ++ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, ++ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will ++ * return NULL.) ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); ++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); ++ ++ /**********************/ ++ /* Channel management */ ++ /**********************/ ++ ++/* If the user has been allocated a channel object that is going to generate ++ * CDANs to another channel, then these functions will be necessary. ++ * CDAN-enabled channels only generate a single CDAN notification, after which ++ * it they need to be reenabled before they'll generate another. (The idea is ++ * that pull dequeuing will occur in reaction to the CDAN, followed by a ++ * reenable step.) Each function generates a distinct command to hardware, so a ++ * combination function is provided if the user wishes to modify the "context" ++ * (which shows up in each CDAN message) each time they reenable, as a single ++ * command to hardware. */ ++/** ++ * qbman_swp_CDAN_set_context() - Set CDAN context ++ * @s: the software portal object. ++ * @channelid: the channel index. ++ * @ctx: the context to be set in CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid, ++ uint64_t ctx); ++ ++/** ++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid); ++ ++/** ++ * qbman_swp_CDAN_disable() - disable CDAN for the channel. ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid); ++ ++/** ++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN ++ * @s: the software portal object. ++ * @channelid: the index of the channel to generate CDAN. ++ * @ctx: the context set in CDAN. ++ * ++ * Return 0 for success, or negative error code for failure. ++ */ ++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid, ++ uint64_t ctx); ++ ++#endif /* !_FSL_QBMAN_PORTAL_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c +new file mode 100644 +index 0000000..12e33d3 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c +@@ -0,0 +1,846 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_portal.h" ++#include "qbman_debug.h" ++#include "fsl_qbman_portal.h" ++ ++/* QBMan portal management command code */ ++#define QBMAN_BP_QUERY 0x32 ++#define QBMAN_FQ_QUERY 0x44 ++#define QBMAN_FQ_QUERY_NP 0x45 ++#define QBMAN_CGR_QUERY 0x51 ++#define QBMAN_WRED_QUERY 0x54 ++#define QBMAN_CGR_STAT_QUERY 0x55 ++#define QBMAN_CGR_STAT_QUERY_CLR 0x56 ++ ++enum qbman_attr_usage_e { ++ qbman_attr_usage_fq, ++ qbman_attr_usage_bpool, ++ qbman_attr_usage_cgr, ++}; ++ ++struct int_qbman_attr { ++ uint32_t words[32]; ++ enum qbman_attr_usage_e usage; ++}; ++ ++#define attr_type_set(a, e) \ ++{ \ ++ struct qbman_attr *__attr = a; \ ++ enum qbman_attr_usage_e __usage = e; \ ++ ((struct int_qbman_attr *)__attr)->usage = __usage; \ ++} ++ ++#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) ++#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) ++ ++static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); ++static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); ++static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); ++static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); ++static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); ++static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); ++static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); ++static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); ++static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); ++static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); ++static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); ++static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); ++static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); ++static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); ++static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); ++static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); ++ ++void qbman_bp_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_bpool); ++} ++ ++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, ++ struct qbman_attr *a) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *attr = ATTR32(a); ++ ++ qbman_bp_attr_clear(a); ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_bp_bpid, p, bpid); ++ ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_BP_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); ++ return -EIO; ++ } ++ ++ /* For the query, word[0] of the result contains only the ++ * verb/rslt fields, so skip word[0]. ++ */ ++ word_copy(&attr[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); ++ *va = !!qb_attr_code_decode(&code_bp_va, p); ++ *wae = !!qb_attr_code_decode(&code_bp_wae, p); ++} ++ ++static uint32_t qbman_bp_thresh_to_value(uint32_t val) ++{ ++ return (val & 0xff) << ((val & 0xf00) >> 8); ++} ++ ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, ++ p)); ++} ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, ++ p)); ++} ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, ++ p)); ++} ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, ++ p)); ++} ++ ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, ++ p)); ++} ++ ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); ++} ++ ++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *icid = qb_attr_code_decode(&code_bp_icid, p); ++ *pl = !!qb_attr_code_decode(&code_bp_pl, p); ++} ++ ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, ++ p) << 32) | ++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) ++ << 32) | ++ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, ++ p); ++} ++ ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); ++} ++ ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); ++} ++ ++int qbman_bp_info_is_depleted(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); ++} ++ ++int qbman_bp_info_is_surplus(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); ++} ++ ++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_fill, p); ++} ++ ++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdptr, p); ++} ++ ++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sdcnt, p); ++} ++ ++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_hdcnt, p); ++} ++ ++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) ++{ ++ uint32_t *p = ATTR32(a); ++ ++ return qb_attr_code_decode(&code_bp_sscnt, p); ++} ++ ++static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); ++static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); ++static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); ++static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); ++static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); ++static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); ++static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); ++static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); ++static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); ++static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); ++static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); ++static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); ++static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); ++static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); ++static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); ++static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); ++ ++void qbman_fq_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_fq); ++} ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d = ATTR32(desc); ++ ++ qbman_fq_attr_clear(desc); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_FQ_QUERY); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ /* For the configure, word[0] of the command contains only the WE-mask. ++ * For the query, word[0] of the result contains only the verb/rslt ++ * fields. Skip word[0] in the latter case. */ ++ word_copy(&d[1], &p[1], 15); ++ return 0; ++} ++ ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); ++} ++ ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); ++} ++ ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *destwq = qb_attr_code_decode(&code_fq_destwq, p); ++} ++ ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *icscred = qb_attr_code_decode(&code_fq_icscred, p); ++} ++ ++static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); ++static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); ++static uint32_t qbman_thresh_to_value(uint32_t val) ++{ ++ uint32_t m, e; ++ ++ m = qb_attr_code_decode(&code_tdthresh_mant, &val); ++ e = qb_attr_code_decode(&code_tdthresh_exp, &val); ++ return m << e; ++} ++ ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, ++ p)); ++} ++ ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); ++ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); ++ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, ++ qb_attr_code_decode(&code_fq_oa_len, p)); ++} ++ ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); ++ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); ++ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); ++ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); ++} ++ ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); ++ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); ++} ++ ++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *icid = qb_attr_code_decode(&code_fq_icid, p); ++ *pl = !!qb_attr_code_decode(&code_fq_pl, p); ++} ++ ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); ++} ++ ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) ++{ ++ uint32_t *p = ATTR32(d); ++ ++ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); ++} ++ ++/* Query FQ Non-Programmalbe Fields */ ++static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); ++static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); ++static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); ++static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); ++static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); ++static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); ++static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); ++ ++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *state) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d = ATTR32(state); ++ ++ qbman_fq_attr_clear(state); ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ qb_attr_code_encode(&code_fq_fqid, p, fqid); ++ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_FQ_QUERY_NP); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", ++ fqid, rslt); ++ return -EIO; ++ } ++ word_copy(&d[0], &p[0], 16); ++ return 0; ++} ++ ++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_state, p); ++} ++ ++int qbman_fq_state_force_eligible(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_fe, p); ++} ++ ++int qbman_fq_state_xoff(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_x, p); ++} ++ ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_r, p); ++} ++ ++int qbman_fq_state_overflow_error(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return !!qb_attr_code_decode(&code_fq_np_oe, p); ++} ++ ++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); ++} ++ ++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) ++{ ++ const uint32_t *p = ATTR32(state); ++ ++ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); ++} ++ ++/* Query CGR */ ++static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); ++static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); ++static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); ++static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); ++static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); ++static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); ++static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); ++static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); ++static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); ++static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); ++static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); ++static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); ++static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); ++static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); ++static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); ++static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); ++static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); ++static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); ++static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); ++static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); ++static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); ++static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); ++static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); ++ ++void qbman_cgr_attr_clear(struct qbman_attr *a) ++{ ++ memset(a, 0, sizeof(*a)); ++ attr_type_set(a, qbman_attr_usage_cgr); ++} ++ ++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t *d[2]; ++ int i; ++ uint32_t query_verb; ++ ++ d[0] = ATTR32(attr); ++ d[1] = ATTR32_1(attr); ++ ++ qbman_cgr_attr_clear(attr); ++ ++ for (i = 0; i < 2; i++) { ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ /* For the configure, word[0] of the command contains only the ++ * verb/cgid. For the query, word[0] of the result contains ++ * only the verb/rslt fields. Skip word[0] in the latter case. ++ */ ++ word_copy(&d[i][1], &p[1], 15); ++ } ++ return 0; ++} ++ ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd) ++ { ++ uint32_t *p = ATTR32(d); ++ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, ++ p); ++ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); ++ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); ++} ++ ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, ++ int *rej_cnt_mode, int *cscn_bdi) ++{ ++ uint32_t *p = ATTR32(d); ++ *mode = qb_attr_code_decode(&code_cgr_mode, p); ++ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); ++ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); ++} ++ ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, ++ p); ++ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); ++ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); ++ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); ++ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); ++} ++ ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ uint32_t *i_cnt_wr_bnd) ++{ ++ uint32_t *p = ATTR32(d); ++ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); ++ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); ++} ++ ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) ++{ ++ uint32_t *p = ATTR32(d); ++ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); ++} ++ ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) ++{ ++ uint32_t *p = ATTR32(d); ++ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ uint32_t *cs_thres_x) ++{ ++ uint32_t *p = ATTR32(d); ++ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_cs_thres_x, p)); ++} ++ ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) ++{ ++ uint32_t *p = ATTR32(d); ++ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( ++ &code_cgr_td_thres, p)); ++} ++ ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); ++} ++ ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); ++} ++ ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ uint32_t *cscn_vcgid) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); ++} ++ ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, ++ int *pl) ++{ ++ uint32_t *p = ATTR32(d); ++ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); ++ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); ++} ++ ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ uint64_t *cg_wr_addr) ++{ ++ uint32_t *p = ATTR32(d); ++ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, ++ p) << 32) | ++ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, ++ p); ++} ++ ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) ++{ ++ uint32_t *p = ATTR32(d); ++ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) ++ << 32) | ++ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); ++} ++ ++#define WRED_EDP_WORD(n) (18 + n/4) ++#define WRED_EDP_OFFSET(n) (8 * (n % 4)) ++#define WRED_PARM_DP_WORD(n) (n + 20) ++#define WRED_WE_EDP(n) (16 + n * 2) ++#define WRED_WE_PARM_DP(n) (17 + n * 2) ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, ++ int *edp) ++{ ++ uint32_t *p = ATTR32(d); ++ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), ++ WRED_EDP_OFFSET(idx), 8); ++ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); ++} ++ ++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, ++ uint64_t *maxth, uint8_t *maxp) ++{ ++ uint8_t ma, mn, step_i, step_s, pn; ++ ++ ma = (uint8_t)(dp >> 24); ++ mn = (uint8_t)(dp >> 19) & 0x1f; ++ step_i = (uint8_t)(dp >> 11); ++ step_s = (uint8_t)(dp >> 6) & 0x1f; ++ pn = (uint8_t)dp & 0x3f; ++ ++ *maxp = ((pn<<2) * 100)/256; ++ ++ if (mn == 0) ++ *maxth = ma; ++ else ++ *maxth = ((ma+256) * (1<<(mn-1))); ++ ++ if (step_s == 0) ++ *minth = *maxth - step_i; ++ else ++ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); ++} ++ ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, ++ uint32_t *dp) ++{ ++ uint32_t *p = ATTR32(d); ++ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), ++ 0, 8); ++ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); ++} ++ ++/* Query CGR/CCGR/CQ statistics */ ++static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); ++static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); ++static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); ++static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, ++ int clear, uint32_t command_type, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ uint32_t query_verb; ++ uint32_t hi, lo; ++ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ qb_attr_code_encode(&code_cgr_cgid, p, cgid); ++ if (command_type < 2) ++ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); ++ query_verb = clear ? ++ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; ++ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != query_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Query statistics of CGID 0x%x failed,", cgid); ++ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); ++ return -EIO; ++ } ++ ++ if (*frame_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); ++ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; ++ } ++ if (*byte_cnt) { ++ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); ++ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); ++ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; ++ } ++ ++ return 0; ++} ++ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 1, ++ frame_cnt, byte_cnt); ++} ++ ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt) ++{ ++ return qbman_cgr_statistics_query(s, cgid, clear, 0, ++ frame_cnt, byte_cnt); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h +new file mode 100644 +index 0000000..1e6b002 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h +@@ -0,0 +1,136 @@ ++/* Copyright (C) 2015 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++struct qbman_attr { ++ uint32_t dont_manipulate_directly[40]; ++}; ++ ++/* Buffer pool query commands */ ++int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, ++ struct qbman_attr *a); ++void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); ++void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); ++void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); ++void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); ++void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); ++void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); ++void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); ++void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); ++void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); ++void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); ++void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); ++void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); ++int qbman_bp_info_has_free_bufs(struct qbman_attr *a); ++int qbman_bp_info_is_depleted(struct qbman_attr *a); ++int qbman_bp_info_is_surplus(struct qbman_attr *a); ++uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); ++uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); ++uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); ++uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); ++uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); ++ ++/* FQ query function for programmable fields */ ++int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *desc); ++void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); ++void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); ++void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); ++void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); ++void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); ++void qbman_fq_attr_get_oa(struct qbman_attr *d, ++ int *oa_ics, int *oa_cgr, int32_t *oa_len); ++void qbman_fq_attr_get_mctl(struct qbman_attr *d, ++ int *bdi, int *ff, int *va, int *ps); ++void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); ++void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); ++void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); ++void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); ++ ++/* FQ query command for non-programmable fields*/ ++enum qbman_fq_schedstate_e { ++ qbman_fq_schedstate_oos = 0, ++ qbman_fq_schedstate_retired, ++ qbman_fq_schedstate_tentatively_scheduled, ++ qbman_fq_schedstate_truly_scheduled, ++ qbman_fq_schedstate_parked, ++ qbman_fq_schedstate_held_active, ++}; ++ ++int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, ++ struct qbman_attr *state); ++uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); ++int qbman_fq_state_force_eligible(const struct qbman_attr *state); ++int qbman_fq_state_xoff(const struct qbman_attr *state); ++int qbman_fq_state_retirement_pending(const struct qbman_attr *state); ++int qbman_fq_state_overflow_error(const struct qbman_attr *state); ++uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); ++uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); ++ ++/* CGR query */ ++int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, ++ struct qbman_attr *attr); ++void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, ++ int *cscn_wq_en_exit, int *cscn_wq_icd); ++void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, ++ int *rej_cnt_mode, int *cscn_bdi); ++void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, ++ int *cscn_wr_en_exit, int *cg_wr_ae, ++ int *cscn_dcp_en, int *cg_wr_va); ++void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, ++ uint32_t *i_cnt_wr_bnd); ++void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); ++void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); ++void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, ++ uint32_t *cs_thres_x); ++void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); ++void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); ++void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); ++void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, ++ uint32_t *cscn_vcgid); ++void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, ++ int *pl); ++void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, ++ uint64_t *cg_wr_addr); ++void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); ++void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, ++ int *edp); ++void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, ++ uint64_t *maxth, uint8_t *maxp); ++void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, ++ uint32_t *dp); ++ ++/* CGR/CCGR/CQ statistics query */ ++int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); ++int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); ++int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, ++ uint64_t *frame_cnt, uint64_t *byte_cnt); +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c +new file mode 100644 +index 0000000..6c5638b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c +@@ -0,0 +1,1212 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_portal.h" ++ ++/* QBMan portal management command codes */ ++#define QBMAN_MC_ACQUIRE 0x30 ++#define QBMAN_WQCHAN_CONFIGURE 0x46 ++ ++/* CINH register offsets */ ++#define QBMAN_CINH_SWP_EQAR 0x8c0 ++#define QBMAN_CINH_SWP_DQPI 0xa00 ++#define QBMAN_CINH_SWP_DCAP 0xac0 ++#define QBMAN_CINH_SWP_SDQCR 0xb00 ++#define QBMAN_CINH_SWP_RAR 0xcc0 ++#define QBMAN_CINH_SWP_ISR 0xe00 ++#define QBMAN_CINH_SWP_IER 0xe40 ++#define QBMAN_CINH_SWP_ISDR 0xe80 ++#define QBMAN_CINH_SWP_IIR 0xec0 ++ ++/* CENA register offsets */ ++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) ++#define QBMAN_CENA_SWP_CR 0x600 ++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) ++#define QBMAN_CENA_SWP_VDQCR 0x780 ++ ++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ ++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) ++ ++/* QBMan FQ management command codes */ ++#define QBMAN_FQ_SCHEDULE 0x48 ++#define QBMAN_FQ_FORCE 0x49 ++#define QBMAN_FQ_XON 0x4d ++#define QBMAN_FQ_XOFF 0x4e ++ ++/*******************************/ ++/* Pre-defined attribute codes */ ++/*******************************/ ++ ++struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); ++struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); ++ ++/*************************/ ++/* SDQCR attribute codes */ ++/*************************/ ++ ++/* we put these here because at least some of them are required by ++ * qbman_swp_init() */ ++struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); ++struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); ++struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); ++#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) ++enum qbman_sdqcr_dct { ++ qbman_sdqcr_dct_null = 0, ++ qbman_sdqcr_dct_prio_ics, ++ qbman_sdqcr_dct_active_ics, ++ qbman_sdqcr_dct_active ++}; ++enum qbman_sdqcr_fc { ++ qbman_sdqcr_fc_one = 0, ++ qbman_sdqcr_fc_up_to_3 = 1 ++}; ++struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); ++ ++/*********************************/ ++/* Portal constructor/destructor */ ++/*********************************/ ++ ++/* Software portals should always be in the power-on state when we initialise, ++ * due to the CCSR-based portal reset functionality that MC has. ++ * ++ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR ++ * valid-bits, so we need to support a workaround where we don't trust ++ * valid-bits when detecting new entries until any stale ring entries have been ++ * overwritten at least once. The idea is that we read PI for the first few ++ * entries, then switch to valid-bit after that. The trick is to clear the ++ * bug-work-around boolean once the PI wraps around the ring for the first time. ++ * ++ * Note: this still carries a slight additional cost once the decrementer hits ++ * zero, so ideally the workaround should only be compiled in if the compiled ++ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for ++ * this. ++ */ ++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) ++{ ++ int ret; ++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); ++ ++ if (!p) ++ return NULL; ++ p->desc = d; ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_start; ++#endif ++ p->mc.valid_bit = QB_VALID_BIT; ++ p->sdq = 0; ++ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); ++ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); ++ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); ++ atomic_set(&p->vdq.busy, 1); ++ p->vdq.valid_bit = QB_VALID_BIT; ++ p->dqrr.next_idx = 0; ++ p->dqrr.valid_bit = QB_VALID_BIT; ++ /* TODO: should also read PI/CI type registers and check that they're on ++ * PoR values. If we're asked to initialise portals that aren't in reset ++ * state, bad things will follow. */ ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ p->dqrr.reset_bug = 1; ++#endif ++ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100) ++ p->dqrr.dqrr_size = 4; ++ else ++ p->dqrr.dqrr_size = 8; ++ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); ++ if (ret) { ++ kfree(p); ++ pr_err("qbman_swp_sys_init() failed %d\n", ret); ++ return NULL; ++ } ++ /* SDQCR needs to be initialized to 0 when no channels are ++ being dequeued from or else the QMan HW will indicate an ++ error. The values that were calculated above will be ++ applied when dequeues from a specific channel are enabled */ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); ++ return p; ++} ++ ++void qbman_swp_finish(struct qbman_swp *p) ++{ ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_start); ++#endif ++ qbman_swp_sys_finish(&p->sys); ++ kfree(p); ++} ++ ++const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) ++{ ++ return p->desc; ++} ++ ++/**************/ ++/* Interrupts */ ++/**************/ ++ ++uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); ++} ++ ++void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); ++} ++ ++uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); ++} ++ ++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); ++} ++ ++uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); ++} ++ ++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); ++} ++ ++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) ++{ ++ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); ++} ++ ++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) ++{ ++ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); ++} ++ ++/***********************/ ++/* Management commands */ ++/***********************/ ++ ++/* ++ * Internal code common to all types of management commands. ++ */ ++ ++void *qbman_swp_mc_start(struct qbman_swp *p) ++{ ++ void *ret; ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_start); ++#endif ++ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); ++#ifdef QBMAN_CHECKING ++ if (!ret) ++ p->mc.check = swp_mc_can_submit; ++#endif ++ return ret; ++} ++ ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) ++{ ++ uint32_t *v = cmd; ++#ifdef QBMAN_CHECKING ++ BUG_ON(!p->mc.check != swp_mc_can_submit); ++#endif ++ /* TBD: "|=" is going to hurt performance. Need to move as many fields ++ * out of word zero, and for those that remain, the "OR" needs to occur ++ * at the caller side. This debug check helps to catch cases where the ++ * caller wants to OR but has forgotten to do so. */ ++ BUG_ON((*v & cmd_verb) != *v); ++ *v = cmd_verb | p->mc.valid_bit; ++ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_poll; ++#endif ++} ++ ++void *qbman_swp_mc_result(struct qbman_swp *p) ++{ ++ uint32_t *ret, verb; ++#ifdef QBMAN_CHECKING ++ BUG_ON(p->mc.check != swp_mc_can_poll); ++#endif ++ qbman_cena_invalidate_prefetch(&p->sys, ++ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); ++ /* Remove the valid-bit - command completed iff the rest is non-zero */ ++ verb = ret[0] & ~QB_VALID_BIT; ++ if (!verb) ++ return NULL; ++#ifdef QBMAN_CHECKING ++ p->mc.check = swp_mc_can_start; ++#endif ++ p->mc.valid_bit ^= QB_VALID_BIT; ++ return ret; ++} ++ ++/***********/ ++/* Enqueue */ ++/***********/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); ++static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); ++static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); ++static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); ++static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); ++static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); ++static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); ++static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); ++static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); ++static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); ++static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); ++/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ ++static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); ++static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); ++static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); ++static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); ++static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); ++static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); ++ ++enum qbman_eq_cmd_e { ++ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ ++ qbman_eq_cmd_empty, ++ /* DMA an enqueue response once complete */ ++ qbman_eq_cmd_respond, ++ /* DMA an enqueue response only if the enqueue fails */ ++ qbman_eq_cmd_respond_reject ++}; ++ ++void qbman_eq_desc_clear(struct qbman_eq_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 0); ++ qb_attr_code_encode(&code_eq_cmd, cl, ++ respond_success ? qbman_eq_cmd_respond : ++ qbman_eq_cmd_respond_reject); ++} ++ ++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, ++ uint32_t opr_id, uint32_t seqnum, int incomplete) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, ++ respond_success ? qbman_eq_cmd_respond : ++ qbman_eq_cmd_respond_reject); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); ++} ++ ++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); ++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); ++} ++ ++void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, ++ uint32_t seqnum) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_orp_en, cl, 1); ++ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); ++ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); ++ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); ++ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); ++ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); ++} ++ ++void qbman_eq_desc_set_response(struct qbman_eq_desc *d, ++ dma_addr_t storage_phys, ++ int stash) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); ++ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); ++} ++ ++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); ++} ++ ++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_qd_en, cl, 0); ++ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); ++} ++ ++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, ++ uint32_t qd_bin, uint32_t qd_prio) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_qd_en, cl, 1); ++ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); ++ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); ++ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); ++} ++ ++void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); ++} ++ ++void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, ++ uint32_t dqrr_idx, int park) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); ++ if (enable) { ++ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); ++ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); ++ } ++} ++ ++#define EQAR_IDX(eqar) ((eqar) & 0x7) ++#define EQAR_VB(eqar) ((eqar) & 0x80) ++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) ++ ++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, ++ const struct qbman_fd *fd) ++{ ++ uint32_t *p; ++ const uint32_t *cl = qb_cl(d); ++ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); ++ ++ pr_debug("EQAR=%08x\n", eqar); ++ if (!EQAR_SUCCESS(eqar)) ++ return -EBUSY; ++ p = qbman_cena_write_start(&s->sys, ++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); ++ word_copy(&p[1], &cl[1], 7); ++ word_copy(&p[8], fd, sizeof(*fd) >> 2); ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ p[0] = cl[0] | EQAR_VB(eqar); ++ qbman_cena_write_complete(&s->sys, ++ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), ++ p); ++ return 0; ++} ++ ++/*************************/ ++/* Static (push) dequeue */ ++/*************************/ ++ ++void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) ++{ ++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); ++ ++ BUG_ON(channel_idx > 15); ++ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); ++} ++ ++void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) ++{ ++ uint16_t dqsrc; ++ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); ++ ++ BUG_ON(channel_idx > 15); ++ qb_attr_code_encode(&code, &s->sdq, !!enable); ++ /* Read make the complete src map. If no channels are enabled ++ the SDQCR must be 0 or else QMan will assert errors */ ++ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); ++ if (dqsrc != 0) ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); ++ else ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); ++} ++ ++/***************************/ ++/* Volatile (pull) dequeue */ ++/***************************/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); ++static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); ++static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); ++static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); ++static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); ++static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); ++static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); ++ ++enum qb_pull_dt_e { ++ qb_pull_dt_channel, ++ qb_pull_dt_workqueue, ++ qb_pull_dt_framequeue ++}; ++ ++void qbman_pull_desc_clear(struct qbman_pull_desc *d) ++{ ++ memset(d, 0, sizeof(*d)); ++} ++ ++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, ++ struct dpaa2_dq *storage, ++ dma_addr_t storage_phys, ++ int stash) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ /* Squiggle the pointer 'storage' into the extra 2 words of the ++ * descriptor (which aren't copied to the hw command) */ ++ *(void **)&cl[4] = storage; ++ if (!storage) { ++ qb_attr_code_encode(&code_pull_rls, cl, 0); ++ return; ++ } ++ qb_attr_code_encode(&code_pull_rls, cl, 1); ++ qb_attr_code_encode(&code_pull_stash, cl, !!stash); ++ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); ++} ++ ++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ BUG_ON(!numframes || (numframes > 16)); ++ qb_attr_code_encode(&code_pull_numframes, cl, ++ (uint32_t)(numframes - 1)); ++} ++ ++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_token, cl, token); ++} ++ ++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, 1); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); ++ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); ++} ++ ++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, ++ enum qbman_pull_type_e dct) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, dct); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); ++ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); ++} ++ ++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, ++ enum qbman_pull_type_e dct) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_pull_dct, cl, dct); ++ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); ++ qb_attr_code_encode(&code_pull_dqsource, cl, chid); ++} ++ ++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) ++{ ++ uint32_t *p; ++ uint32_t *cl = qb_cl(d); ++ ++ if (!atomic_dec_and_test(&s->vdq.busy)) { ++ atomic_inc(&s->vdq.busy); ++ return -EBUSY; ++ } ++ s->vdq.storage = *(void **)&cl[4]; ++ qb_attr_code_encode(&code_pull_token, cl, 1); ++ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); ++ word_copy(&p[1], &cl[1], 3); ++ /* Set the verb byte, have to substitute in the valid-bit */ ++ p[0] = cl[0] | s->vdq.valid_bit; ++ s->vdq.valid_bit ^= QB_VALID_BIT; ++ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); ++ return 0; ++} ++ ++/****************/ ++/* Polling DQRR */ ++/****************/ ++ ++static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); ++static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); ++static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); ++static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); ++static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); ++/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ ++static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); ++static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); ++static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); ++static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); ++ ++#define QBMAN_RESULT_DQ 0x60 ++#define QBMAN_RESULT_FQRN 0x21 ++#define QBMAN_RESULT_FQRNI 0x22 ++#define QBMAN_RESULT_FQPN 0x24 ++#define QBMAN_RESULT_FQDAN 0x25 ++#define QBMAN_RESULT_CDAN 0x26 ++#define QBMAN_RESULT_CSCN_MEM 0x27 ++#define QBMAN_RESULT_CGCU 0x28 ++#define QBMAN_RESULT_BPSCN 0x29 ++#define QBMAN_RESULT_CSCN_WQ 0x2a ++ ++static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); ++ ++/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry ++ * only once, so repeated calls can return a sequence of DQRR entries, without ++ * requiring they be consumed immediately or in any particular order. */ ++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) ++{ ++ uint32_t verb; ++ uint32_t response_verb; ++ uint32_t flags; ++ const struct dpaa2_dq *dq; ++ const uint32_t *p; ++ ++ /* Before using valid-bit to detect if something is there, we have to ++ * handle the case of the DQRR reset bug... */ ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ if (unlikely(s->dqrr.reset_bug)) { ++ /* We pick up new entries by cache-inhibited producer index, ++ * which means that a non-coherent mapping would require us to ++ * invalidate and read *only* once that PI has indicated that ++ * there's an entry here. The first trip around the DQRR ring ++ * will be much less efficient than all subsequent trips around ++ * it... ++ */ ++ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); ++ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); ++ /* there are new entries iff pi != next_idx */ ++ if (pi == s->dqrr.next_idx) ++ return NULL; ++ /* if next_idx is/was the last ring index, and 'pi' is ++ * different, we can disable the workaround as all the ring ++ * entries have now been DMA'd to so valid-bit checking is ++ * repaired. Note: this logic needs to be based on next_idx ++ * (which increments one at a time), rather than on pi (which ++ * can burst and wrap-around between our snapshots of it). ++ */ ++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { ++ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", ++ s->dqrr.next_idx, pi); ++ s->dqrr.reset_bug = 0; ++ } ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ } ++#endif ++ ++ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ p = qb_cl(dq); ++ verb = qb_attr_code_decode(&code_dqrr_verb, p); ++ ++ /* If the valid-bit isn't of the expected polarity, nothing there. Note, ++ * in the DQRR reset bug workaround, we shouldn't need to skip these ++ * check, because we've already determined that a new entry is available ++ * and we've invalidated the cacheline before reading it, so the ++ * valid-bit behaviour is repaired and should tell us what we already ++ * knew from reading PI. ++ */ ++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ return NULL; ++ } ++ /* There's something there. Move "next_idx" attention to the next ring ++ * entry (and prefetch it) before returning what we found. */ ++ s->dqrr.next_idx++; ++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ ++ /* TODO: it's possible to do all this without conditionals, optimise it ++ * later. */ ++ if (!s->dqrr.next_idx) ++ s->dqrr.valid_bit ^= QB_VALID_BIT; ++ ++ /* If this is the final response to a volatile dequeue command ++ indicate that the vdq is no longer busy */ ++ flags = dpaa2_dq_flags(dq); ++ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); ++ if ((response_verb == QBMAN_RESULT_DQ) && ++ (flags & DPAA2_DQ_STAT_VOLATILE) && ++ (flags & DPAA2_DQ_STAT_EXPIRED)) ++ atomic_inc(&s->vdq.busy); ++ ++ qbman_cena_invalidate_prefetch(&s->sys, ++ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); ++ return dq; ++} ++ ++/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ ++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) ++{ ++ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); ++} ++ ++/*********************************/ ++/* Polling user-provided storage */ ++/*********************************/ ++ ++int qbman_result_has_new_result(struct qbman_swp *s, ++ const struct dpaa2_dq *dq) ++{ ++ /* To avoid converting the little-endian DQ entry to host-endian prior ++ * to us knowing whether there is a valid entry or not (and run the ++ * risk of corrupting the incoming hardware LE write), we detect in ++ * hardware endianness rather than host. This means we need a different ++ * "code" depending on whether we are BE or LE in software, which is ++ * where DQRR_TOK_OFFSET comes in... */ ++ static struct qb_attr_code code_dqrr_tok_detect = ++ QB_CODE(0, DQRR_TOK_OFFSET, 8); ++ /* The user trying to poll for a result treats "dq" as const. It is ++ * however the same address that was provided to us non-const in the ++ * first place, for directing hardware DMA to. So we can cast away the ++ * const because it is mutable from our perspective. */ ++ uint32_t *p = qb_cl((struct dpaa2_dq *)dq); ++ uint32_t token; ++ ++ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); ++ if (token != 1) ++ return 0; ++ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); ++ ++ /* Only now do we convert from hardware to host endianness. Also, as we ++ * are returning success, the user has promised not to call us again, so ++ * there's no risk of us converting the endianness twice... */ ++ make_le32_n(p, 16); ++ ++ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the ++ * fact "VDQCR" shows busy doesn't mean that the result we're looking at ++ * is from the same command. Eg. we may be looking at our 10th dequeue ++ * result from our first VDQCR command, yet the second dequeue command ++ * could have been kicked off already, after seeing the 1st result. Ie. ++ * the result we're looking at is not necessarily proof that we can ++ * reset "busy". We instead base the decision on whether the current ++ * result is sitting at the first 'storage' location of the busy ++ * command. */ ++ if (s->vdq.storage == dq) { ++ s->vdq.storage = NULL; ++ atomic_inc(&s->vdq.busy); ++ } ++ return 1; ++} ++ ++/********************************/ ++/* Categorising qbman_result */ ++/********************************/ ++ ++static struct qb_attr_code code_result_in_mem = ++ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); ++ ++static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x) ++{ ++ const uint32_t *p = qb_cl(dq); ++ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); ++ ++ return response_verb == x; ++} ++ ++static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq, ++ uint32_t x) ++{ ++ const uint32_t *p = qb_cl(dq); ++ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); ++ ++ return (response_verb == x); ++} ++ ++int qbman_result_is_DQ(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); ++} ++ ++int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); ++} ++ ++int qbman_result_is_CDAN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); ++} ++ ++int qbman_result_is_CSCN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || ++ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); ++} ++ ++int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); ++} ++ ++int qbman_result_is_CGCU(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); ++} ++ ++int qbman_result_is_FQRN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); ++} ++ ++int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); ++} ++ ++int qbman_result_is_FQPN(const struct dpaa2_dq *dq) ++{ ++ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); ++} ++ ++/*********************************/ ++/* Parsing frame dequeue results */ ++/*********************************/ ++ ++/* These APIs assume qbman_result_is_DQ() is TRUE */ ++ ++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_stat, p); ++} ++ ++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); ++} ++ ++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); ++} ++ ++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_fqid, p); ++} ++ ++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_byte_count, p); ++} ++ ++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return qb_attr_code_decode(&code_dqrr_frame_count, p); ++} ++ ++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) ++{ ++ const uint64_t *p = (uint64_t *)qb_cl(dq); ++ ++ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); ++} ++EXPORT_SYMBOL(dpaa2_dq_fqd_ctx); ++ ++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) ++{ ++ const uint32_t *p = qb_cl(dq); ++ ++ return (const struct dpaa2_fd *)&p[8]; ++} ++EXPORT_SYMBOL(dpaa2_dq_fd); ++ ++/**************************************/ ++/* Parsing state-change notifications */ ++/**************************************/ ++ ++static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); ++static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); ++static struct qb_attr_code code_scn_state_in_mem = ++ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); ++static struct qb_attr_code code_scn_rid_in_mem = ++ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); ++static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); ++ ++uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); ++} ++ ++uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return qb_attr_code_decode(&code_scn_rid, p); ++} ++ ++uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn) ++{ ++ const uint64_t *p = (uint64_t *)qb_cl(scn); ++ ++ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); ++} ++ ++uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ ++ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); ++} ++ ++uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn) ++{ ++ const uint32_t *p = qb_cl(scn); ++ uint32_t result_rid; ++ ++ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); ++ return make_le24(result_rid); ++} ++ ++/*****************/ ++/* Parsing BPSCN */ ++/*****************/ ++uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn) ++{ ++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; ++} ++ ++int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn) ++{ ++ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); ++} ++ ++int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn) ++{ ++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); ++} ++ ++int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn) ++{ ++ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); ++} ++ ++uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn) ++{ ++ return qbman_result_SCN_ctx(scn); ++} ++ ++/*****************/ ++/* Parsing CGCU */ ++/*****************/ ++uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn) ++{ ++ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; ++} ++ ++uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn) ++{ ++ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF; ++} ++ ++/******************/ ++/* Buffer release */ ++/******************/ ++ ++/* These should be const, eventually */ ++/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ ++static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); ++static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); ++static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); ++ ++void qbman_release_desc_clear(struct qbman_release_desc *d) ++{ ++ uint32_t *cl; ++ ++ memset(d, 0, sizeof(*d)); ++ cl = qb_cl(d); ++ qb_attr_code_encode(&code_release_set_me, cl, 1); ++} ++ ++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_release_bpid, cl, bpid); ++} ++ ++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) ++{ ++ uint32_t *cl = qb_cl(d); ++ ++ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); ++} ++ ++#define RAR_IDX(rar) ((rar) & 0x7) ++#define RAR_VB(rar) ((rar) & 0x80) ++#define RAR_SUCCESS(rar) ((rar) & 0x100) ++ ++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, ++ const uint64_t *buffers, unsigned int num_buffers) ++{ ++ uint32_t *p; ++ const uint32_t *cl = qb_cl(d); ++ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); ++ ++ pr_debug("RAR=%08x\n", rar); ++ if (!RAR_SUCCESS(rar)) ++ return -EBUSY; ++ BUG_ON(!num_buffers || (num_buffers > 7)); ++ /* Start the release command */ ++ p = qbman_cena_write_start(&s->sys, ++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); ++ /* Copy the caller's buffer pointers to the command */ ++ u64_to_le32_copy(&p[2], buffers, num_buffers); ++ /* Set the verb byte, have to substitute in the valid-bit and the number ++ * of buffers. */ ++ p[0] = cl[0] | RAR_VB(rar) | num_buffers; ++ qbman_cena_write_complete(&s->sys, ++ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), ++ p); ++ return 0; ++} ++ ++/*******************/ ++/* Buffer acquires */ ++/*******************/ ++ ++/* These should be const, eventually */ ++static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); ++static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); ++static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); ++ ++int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, ++ unsigned int num_buffers) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt, num; ++ ++ BUG_ON(!num_buffers || (num_buffers > 7)); ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_acquire_bpid, p, bpid); ++ qb_attr_code_encode(&code_acquire_num, p, num_buffers); ++ ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ num = qb_attr_code_decode(&code_acquire_r_num, p); ++ BUG_ON(verb != QBMAN_MC_ACQUIRE); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", ++ bpid, rslt); ++ return -EIO; ++ } ++ BUG_ON(num > num_buffers); ++ /* Copy the acquired buffers to the caller's array */ ++ u64_from_le32_copy(buffers, &p[2], num); ++ return (int)num; ++} ++ ++/*****************/ ++/* FQ management */ ++/*****************/ ++ ++static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); ++ ++static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, ++ uint8_t alt_fq_verb) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != alt_fq_verb); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", ++ fqid, alt_fq_verb, rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); ++} ++ ++int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); ++} ++ ++int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); ++} ++ ++int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) ++{ ++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); ++} ++ ++/**********************/ ++/* Channel management */ ++/**********************/ ++ ++static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); ++static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); ++static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); ++static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); ++ ++/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it ++ * would be irresponsible to expose it. */ ++#define CODE_CDAN_WE_EN 0x1 ++#define CODE_CDAN_WE_CTX 0x4 ++ ++static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, ++ uint8_t we_mask, uint8_t cdan_en, ++ uint64_t ctx) ++{ ++ uint32_t *p; ++ uint32_t verb, rslt; ++ ++ /* Start the management command */ ++ p = qbman_swp_mc_start(s); ++ if (!p) ++ return -EBUSY; ++ ++ /* Encode the caller-provided attributes */ ++ qb_attr_code_encode(&code_cdan_cid, p, channelid); ++ qb_attr_code_encode(&code_cdan_we, p, we_mask); ++ qb_attr_code_encode(&code_cdan_en, p, cdan_en); ++ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); ++ /* Complete the management command */ ++ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); ++ ++ /* Decode the outcome */ ++ verb = qb_attr_code_decode(&code_generic_verb, p); ++ rslt = qb_attr_code_decode(&code_generic_rslt, p); ++ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE); ++ ++ /* Determine success or failure */ ++ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { ++ pr_err("CDAN cQID %d failed: code = 0x%02x\n", ++ channelid, rslt); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, ++ uint64_t ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_CTX, ++ 0, ctx); ++} ++ ++int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 1, 0); ++} ++int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN, ++ 0, 0); ++} ++int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, ++ uint64_t ctx) ++{ ++ return qbman_swp_CDAN_set(s, channelid, ++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, ++ 1, ctx); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h +new file mode 100644 +index 0000000..65ebf3f +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h +@@ -0,0 +1,261 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "qbman_private.h" ++#include "fsl_qbman_portal.h" ++#include "../../include/fsl_dpaa2_fd.h" ++ ++/* All QBMan command and result structures use this "valid bit" encoding */ ++#define QB_VALID_BIT ((uint32_t)0x80) ++ ++/* Management command result codes */ ++#define QBMAN_MC_RSLT_OK 0xf0 ++ ++/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ ++#define QBMAN_DQRR_SIZE 4 ++ ++/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */ ++#define WORKAROUND_DQRR_RESET_BUG ++ ++/* --------------------- */ ++/* portal data structure */ ++/* --------------------- */ ++ ++struct qbman_swp { ++ const struct qbman_swp_desc *desc; ++ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it ++ * needs in here. */ ++ struct qbman_swp_sys sys; ++ /* Management commands */ ++ struct { ++#ifdef QBMAN_CHECKING ++ enum swp_mc_check { ++ swp_mc_can_start, /* call __qbman_swp_mc_start() */ ++ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ ++ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ ++ } check; ++#endif ++ uint32_t valid_bit; /* 0x00 or 0x80 */ ++ } mc; ++ /* Push dequeues */ ++ uint32_t sdq; ++ /* Volatile dequeues */ ++ struct { ++ /* VDQCR supports a "1 deep pipeline", meaning that if you know ++ * the last-submitted command is already executing in the ++ * hardware (as evidenced by at least 1 valid dequeue result), ++ * you can write another dequeue command to the register, the ++ * hardware will start executing it as soon as the ++ * already-executing command terminates. (This minimises latency ++ * and stalls.) With that in mind, this "busy" variable refers ++ * to whether or not a command can be submitted, not whether or ++ * not a previously-submitted command is still executing. In ++ * other words, once proof is seen that the previously-submitted ++ * command is executing, "vdq" is no longer "busy". ++ */ ++ atomic_t busy; ++ uint32_t valid_bit; /* 0x00 or 0x80 */ ++ /* We need to determine when vdq is no longer busy. This depends ++ * on whether the "busy" (last-submitted) dequeue command is ++ * targeting DQRR or main-memory, and detected is based on the ++ * presence of the dequeue command's "token" showing up in ++ * dequeue entries in DQRR or main-memory (respectively). */ ++ struct dpaa2_dq *storage; /* NULL if DQRR */ ++ } vdq; ++ /* DQRR */ ++ struct { ++ uint32_t next_idx; ++ uint32_t valid_bit; ++ uint8_t dqrr_size; ++#ifdef WORKAROUND_DQRR_RESET_BUG ++ int reset_bug; ++#endif ++ } dqrr; ++}; ++ ++/* -------------------------- */ ++/* portal management commands */ ++/* -------------------------- */ ++ ++/* Different management commands all use this common base layer of code to issue ++ * commands and poll for results. The first function returns a pointer to where ++ * the caller should fill in their MC command (though they should ignore the ++ * verb byte), the second function commits merges in the caller-supplied command ++ * verb (which should not include the valid-bit) and submits the command to ++ * hardware, and the third function checks for a completed response (returns ++ * non-NULL if only if the response is complete). */ ++void *qbman_swp_mc_start(struct qbman_swp *p); ++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); ++void *qbman_swp_mc_result(struct qbman_swp *p); ++ ++/* Wraps up submit + poll-for-result */ ++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, ++ uint32_t cmd_verb) ++{ ++ int loopvar; ++ ++ qbman_swp_mc_submit(swp, cmd, cmd_verb); ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ cmd = qbman_swp_mc_result(swp); ++ } while (!cmd); ++ return cmd; ++} ++ ++/* ------------ */ ++/* qb_attr_code */ ++/* ------------ */ ++ ++/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which ++ * is either serving as a configuration command or a query result. The ++ * representation is inherently little-endian, as the indexing of the words is ++ * itself little-endian in nature and layerscape is little endian for anything ++ * that crosses a word boundary too (64-bit fields are the obvious examples). ++ */ ++struct qb_attr_code { ++ unsigned int word; /* which uint32_t[] array member encodes the field */ ++ unsigned int lsoffset; /* encoding offset from ls-bit */ ++ unsigned int width; /* encoding width. (bool must be 1.) */ ++}; ++ ++/* Some pre-defined codes */ ++extern struct qb_attr_code code_generic_verb; ++extern struct qb_attr_code code_generic_rslt; ++ ++/* Macros to define codes */ ++#define QB_CODE(a, b, c) { a, b, c} ++#define QB_CODE_NULL \ ++ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) ++ ++/* Rotate a code "ms", meaning that it moves from less-significant bytes to ++ * more-significant, from less-significant words to more-significant, etc. The ++ * "ls" version does the inverse, from more-significant towards ++ * less-significant. ++ */ ++static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ code->lsoffset += bits; ++ while (code->lsoffset > 31) { ++ code->word++; ++ code->lsoffset -= 32; ++ } ++} ++static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, ++ unsigned int bits) ++{ ++ /* Don't be fooled, this trick should work because the types are ++ * unsigned. So the case that interests the while loop (the rotate has ++ * gone too far and the word count needs to compensate for it), is ++ * manifested when lsoffset is negative. But that equates to a really ++ * large unsigned value, starting with lots of "F"s. As such, we can ++ * continue adding 32 back to it until it wraps back round above zero, ++ * to a value of 31 or less... ++ */ ++ code->lsoffset -= bits; ++ while (code->lsoffset > 31) { ++ code->word--; ++ code->lsoffset += 32; ++ } ++} ++/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ ++#define qb_attr_code_for_ms(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ms(code, bits)) ++#define qb_attr_code_for_ls(code, bits, expr) \ ++ for (; expr; qb_attr_code_rotate_ls(code, bits)) ++ ++/* decode a field from a cacheline */ ++static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, ++ const uint32_t *cacheline) ++{ ++ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); ++} ++static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, ++ const uint64_t *cacheline) ++{ ++ uint64_t res; ++ u64_from_le32_copy(&res, &cacheline[code->word/2], 1); ++ return res; ++} ++ ++/* encode a field to a cacheline */ ++static inline void qb_attr_code_encode(const struct qb_attr_code *code, ++ uint32_t *cacheline, uint32_t val) ++{ ++ cacheline[code->word] = ++ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) ++ | e32_uint32_t(code->lsoffset, code->width, val); ++} ++static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, ++ uint64_t *cacheline, uint64_t val) ++{ ++ u64_to_le32_copy(&cacheline[code->word/2], &val, 1); ++} ++ ++/* Small-width signed values (two's-complement) will decode into medium-width ++ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to ++ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value ++ * 249. Likewise -120 would decode as 136.) This function allows the caller to ++ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit ++ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). ++ */ ++static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, ++ uint32_t val) ++{ ++ BUG_ON(val >= (1 << code->width)); ++ /* If the high bit was set, it was encoding a negative */ ++ if (val >= (1 << (code->width - 1))) ++ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - ++ val); ++ /* Otherwise, it was encoding a positive */ ++ return (int32_t)val; ++} ++ ++/* ---------------------- */ ++/* Descriptors/cachelines */ ++/* ---------------------- */ ++ ++/* To avoid needless dynamic allocation, the driver API often gives the caller ++ * a "descriptor" type that the caller can instantiate however they like. ++ * Ultimately though, it is just a cacheline of binary storage (or something ++ * smaller when it is known that the descriptor doesn't need all 64 bytes) for ++ * holding pre-formatted pieces of hardware commands. The performance-critical ++ * code can then copy these descriptors directly into hardware command ++ * registers more efficiently than trying to construct/format commands ++ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in ++ * order for the compiler to know its size, but the internal details are not ++ * exposed. The following macro is used within the driver for converting *any* ++ * descriptor pointer to a usable array pointer. The use of a macro (instead of ++ * an inline) is necessary to work with different descriptor types and to work ++ * correctly with const and non-const inputs (and similarly-qualified outputs). ++ */ ++#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h +new file mode 100644 +index 0000000..e376b80 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h +@@ -0,0 +1,173 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++*/ ++ ++/* Perform extra checking */ ++#define QBMAN_CHECKING ++ ++/* To maximise the amount of logic that is common between the Linux driver and ++ * other targets (such as the embedded MC firmware), we pivot here between the ++ * inclusion of two platform-specific headers. ++ * ++ * The first, qbman_sys_decl.h, includes any and all required system headers as ++ * well as providing any definitions for the purposes of compatibility. The ++ * second, qbman_sys.h, is where platform-specific routines go. ++ * ++ * The point of the split is that the platform-independent code (including this ++ * header) may depend on platform-specific declarations, yet other ++ * platform-specific routines may depend on platform-independent definitions. ++ */ ++ ++#include "qbman_sys_decl.h" ++ ++#define QMAN_REV_4000 0x04000000 ++#define QMAN_REV_4100 0x04010000 ++#define QMAN_REV_4101 0x04010001 ++ ++/* When things go wrong, it is a convenient trick to insert a few FOO() ++ * statements in the code to trace progress. TODO: remove this once we are ++ * hacking the code less actively. ++ */ ++#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) ++ ++/* Any time there is a register interface which we poll on, this provides a ++ * "break after x iterations" scheme for it. It's handy for debugging, eg. ++ * where you don't want millions of lines of log output from a polling loop ++ * that won't, because such things tend to drown out the earlier log output ++ * that might explain what caused the problem. (NB: put ";" after each macro!) ++ * TODO: we should probably remove this once we're done sanitising the ++ * simulator... ++ */ ++#define DBG_POLL_START(loopvar) (loopvar = 10) ++#define DBG_POLL_CHECK(loopvar) \ ++ do {if (!(loopvar--)) BUG_ON(1); } while (0) ++ ++/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets ++ * and widths, these macro-generated encode/decode/isolate/remove inlines can ++ * be used. ++ * ++ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), ++ * where the field is located 3 bits "up" from the least-significant bit of the ++ * register (ie. the field location within the 32-bit register corresponds to a ++ * mask of 0x0001fff8), you would do; ++ * uint16_t field = d32_uint16_t(3, 14, reg_value); ++ * ++ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, ++ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" ++ * operator) into a register at bit location 0x00080000 (19 bits "in" from the ++ * LS bit), do; ++ * reg_value |= e32_int(19, 1, !!field); ++ * ++ * If you wish to read-modify-write a register, such that you leave the 14-bit ++ * field as-is but have all other fields set to zero, then "i"solate the 14-bit ++ * value using; ++ * reg_value = i32_uint16_t(3, 14, reg_value); ++ * ++ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to ++ * zero) but leaving all other fields as-is; ++ * reg_val = r32_int(19, 1, reg_value); ++ * ++ */ ++#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ ++ (uint32_t)((1 << width) - 1)) ++#define DECLARE_CODEC32(t) \ ++static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ ++} \ ++static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ ++} \ ++static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ ++ uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ ++} \ ++static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ ++ uint32_t val) \ ++{ \ ++ BUG_ON(width > (sizeof(t) * 8)); \ ++ return ~(MAKE_MASK32(width) << lsoffset) & val; \ ++} ++DECLARE_CODEC32(uint32_t) ++DECLARE_CODEC32(uint16_t) ++DECLARE_CODEC32(uint8_t) ++DECLARE_CODEC32(int) ++ ++ /*********************/ ++ /* Debugging assists */ ++ /*********************/ ++ ++static inline void __hexdump(unsigned long start, unsigned long end, ++ unsigned long p, size_t sz, const unsigned char *c) ++{ ++ while (start < end) { ++ unsigned int pos = 0; ++ char buf[64]; ++ int nl = 0; ++ ++ pos += sprintf(buf + pos, "%08lx: ", start); ++ do { ++ if ((start < p) || (start >= (p + sz))) ++ pos += sprintf(buf + pos, ".."); ++ else ++ pos += sprintf(buf + pos, "%02x", *(c++)); ++ if (!(++start & 15)) { ++ buf[pos++] = '\n'; ++ nl = 1; ++ } else { ++ nl = 0; ++ if (!(start & 1)) ++ buf[pos++] = ' '; ++ if (!(start & 3)) ++ buf[pos++] = ' '; ++ } ++ } while (start & 15); ++ if (!nl) ++ buf[pos++] = '\n'; ++ buf[pos] = '\0'; ++ pr_info("%s", buf); ++ } ++} ++static inline void hexdump(const void *ptr, size_t sz) ++{ ++ unsigned long p = (unsigned long)ptr; ++ unsigned long start = p & ~(unsigned long)15; ++ unsigned long end = (p + sz + 15) & ~(unsigned long)15; ++ const unsigned char *c = ptr; ++ ++ __hexdump(start, end, p, sz, c); ++} ++ ++#include "qbman_sys.h" +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h +new file mode 100644 +index 0000000..4849212 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h +@@ -0,0 +1,307 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the ++ * driver. They are only included via qbman_private.h, which is itself a ++ * platform-independent file and is included by all the other driver source. ++ * ++ * qbman_sys_decl.h is included prior to all other declarations and logic, and ++ * it exists to provide compatibility with any linux interfaces our ++ * single-source driver code is dependent on (eg. kmalloc). Ie. this file ++ * provides linux compatibility. ++ * ++ * This qbman_sys.h header, on the other hand, is included *after* any common ++ * and platform-neutral declarations and logic in qbman_private.h, and exists to ++ * implement any platform-specific logic of the qbman driver itself. Ie. it is ++ * *not* to provide linux compatibility. ++ */ ++ ++/* Trace the 3 different classes of read/write access to QBMan. #undef as ++ * required. */ ++#undef QBMAN_CCSR_TRACE ++#undef QBMAN_CINH_TRACE ++#undef QBMAN_CENA_TRACE ++ ++static inline void word_copy(void *d, const void *s, unsigned int cnt) ++{ ++ uint32_t *dd = d; ++ const uint32_t *ss = s; ++ ++ while (cnt--) ++ *(dd++) = *(ss++); ++} ++ ++/* Currently, the CENA support code expects each 32-bit word to be written in ++ * host order, and these are converted to hardware (little-endian) order on ++ * command submission. However, 64-bit quantities are must be written (and read) ++ * as two 32-bit words with the least-significant word first, irrespective of ++ * host endianness. */ ++static inline void u64_to_le32_copy(void *d, const uint64_t *s, ++ unsigned int cnt) ++{ ++ uint32_t *dd = d; ++ const uint32_t *ss = (const uint32_t *)s; ++ ++ while (cnt--) { ++ /* TBD: the toolchain was choking on the use of 64-bit types up ++ * until recently so this works entirely with 32-bit variables. ++ * When 64-bit types become usable again, investigate better ++ * ways of doing this. */ ++#if defined(__BIG_ENDIAN) ++ *(dd++) = ss[1]; ++ *(dd++) = ss[0]; ++ ss += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++static inline void u64_from_le32_copy(uint64_t *d, const void *s, ++ unsigned int cnt) ++{ ++ const uint32_t *ss = s; ++ uint32_t *dd = (uint32_t *)d; ++ ++ while (cnt--) { ++#if defined(__BIG_ENDIAN) ++ dd[1] = *(ss++); ++ dd[0] = *(ss++); ++ dd += 2; ++#else ++ *(dd++) = *(ss++); ++ *(dd++) = *(ss++); ++#endif ++ } ++} ++ ++/* Convert a host-native 32bit value into little endian */ ++#if defined(__BIG_ENDIAN) ++static inline uint32_t make_le32(uint32_t val) ++{ ++ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | ++ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); ++} ++static inline uint32_t make_le24(uint32_t val) ++{ ++ return (((val & 0xff) << 16) | (val & 0xff00) | ++ ((val & 0xff0000) >> 16)); ++} ++#else ++#define make_le32(val) (val) ++#define make_le24(val) (val) ++#endif ++static inline void make_le32_n(uint32_t *val, unsigned int num) ++{ ++ while (num--) { ++ *val = make_le32(*val); ++ val++; ++ } ++} ++ ++ /******************/ ++ /* Portal access */ ++ /******************/ ++struct qbman_swp_sys { ++ /* On GPP, the sys support for qbman_swp is here. The CENA region isi ++ * not an mmap() of the real portal registers, but an allocated ++ * place-holder, because the actual writes/reads to/from the portal are ++ * marshalled from these allocated areas using QBMan's "MC access ++ * registers". CINH accesses are atomic so there's no need for a ++ * place-holder. */ ++ void *cena; ++ void __iomem *addr_cena; ++ void __iomem *addr_cinh; ++}; ++ ++/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal ++ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) ++ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index ++ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) ++ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) ++ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) ++ */ ++ ++static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, ++ uint32_t val) ++{ ++ ++ writel_relaxed(val, s->addr_cinh + offset); ++#ifdef QBMAN_CINH_TRACE ++ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n", ++ s->addr_cinh, offset, val); ++#endif ++} ++ ++static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) ++{ ++ uint32_t reg = readl_relaxed(s->addr_cinh + offset); ++ ++#ifdef QBMAN_CINH_TRACE ++ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n", ++ s->addr_cinh, offset, reg); ++#endif ++ return reg; ++} ++ ++static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, ++ uint32_t offset) ++{ ++ void *shadow = s->cena + offset; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++#endif ++ BUG_ON(offset & 63); ++ dcbz(shadow); ++ return shadow; ++} ++ ++static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, ++ uint32_t offset, void *cmd) ++{ ++ const uint32_t *shadow = cmd; ++ int loop; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++ hexdump(cmd, 64); ++#endif ++ for (loop = 15; loop >= 1; loop--) ++ writel_relaxed(shadow[loop], s->addr_cena + ++ offset + loop * 4); ++ lwsync(); ++ writel_relaxed(shadow[0], s->addr_cena + offset); ++ dcbf(s->addr_cena + offset); ++} ++ ++static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) ++{ ++ uint32_t *shadow = s->cena + offset; ++ unsigned int loop; ++ ++#ifdef QBMAN_CENA_TRACE ++ pr_info("qbman_cena_read(%p:0x%03x) %p\n", ++ s->addr_cena, offset, shadow); ++#endif ++ ++ for (loop = 0; loop < 16; loop++) ++ shadow[loop] = readl_relaxed(s->addr_cena + offset ++ + loop * 4); ++#ifdef QBMAN_CENA_TRACE ++ hexdump(shadow, 64); ++#endif ++ return shadow; ++} ++ ++static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, ++ uint32_t offset) ++{ ++ dcivac(s->addr_cena + offset); ++ prefetch_for_load(s->addr_cena + offset); ++} ++ ++ /******************/ ++ /* Portal support */ ++ /******************/ ++ ++/* The SWP_CFG portal register is special, in that it is used by the ++ * platform-specific code rather than the platform-independent code in ++ * qbman_portal.c. So use of it is declared locally here. */ ++#define QBMAN_CINH_SWP_CFG 0xd00 ++ ++/* For MC portal use, we always configure with ++ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) ++ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0) ++ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) ++ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) ++ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3) ++ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE) ++ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) ++ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0) ++ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) ++ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0) ++ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE) ++ */ ++static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, ++ uint8_t est, uint8_t rpm, uint8_t dcm, ++ uint8_t epm, int sd, int sp, int se, ++ int dp, int de, int ep) ++{ ++ uint32_t reg; ++ ++ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | ++ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) | ++ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) | ++ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) | ++ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) | ++ e32_uint8_t(14, 1, wn); ++ return reg; ++} ++ ++static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, ++ const struct qbman_swp_desc *d, ++ uint8_t dqrr_size) ++{ ++ uint32_t reg; ++ ++ s->addr_cena = d->cena_bar; ++ s->addr_cinh = d->cinh_bar; ++ s->cena = (void *)get_zeroed_page(GFP_KERNEL); ++ if (!s->cena) { ++ pr_err("Could not allocate page for cena shadow\n"); ++ return -1; ++ } ++ ++#ifdef QBMAN_CHECKING ++ /* We should never be asked to initialise for a portal that isn't in ++ * the power-on state. (Ie. don't forget to reset portals when they are ++ * decommissioned!) ++ */ ++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); ++ BUG_ON(reg); ++#endif ++ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0); ++ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); ++ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); ++ if (!reg) { ++ pr_err("The portal is not enabled!\n"); ++ kfree(s->cena); ++ return -1; ++ } ++ return 0; ++} ++ ++static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) ++{ ++ free_page((unsigned long)s->cena); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h +new file mode 100644 +index 0000000..5b3a224 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h +@@ -0,0 +1,86 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "fsl_qbman_base.h" ++ ++/* The platform-independent code shouldn't need endianness, except for ++ * weird/fast-path cases like qbman_result_has_token(), which needs to ++ * perform a passive and endianness-specific test on a read-only data structure ++ * very quickly. It's an exception, and this symbol is used for that case. */ ++#if defined(__BIG_ENDIAN) ++#define DQRR_TOK_OFFSET 0 ++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 ++#define SCN_STATE_OFFSET_IN_MEM 8 ++#define SCN_RID_OFFSET_IN_MEM 8 ++#else ++#define DQRR_TOK_OFFSET 24 ++#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 ++#define SCN_STATE_OFFSET_IN_MEM 16 ++#define SCN_RID_OFFSET_IN_MEM 0 ++#endif ++ ++/* Similarly-named functions */ ++#define upper32(a) upper_32_bits(a) ++#define lower32(a) lower_32_bits(a) ++ ++ /****************/ ++ /* arch assists */ ++ /****************/ ++ ++#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } ++#define lwsync() { asm volatile("dmb st" : : : "memory"); } ++#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } ++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } ++static inline void prefetch_for_load(void *p) ++{ ++ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); ++} ++static inline void prefetch_for_store(void *p) ++{ ++ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); ++} +diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_test.c b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c +new file mode 100644 +index 0000000..28396e7 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c +@@ -0,0 +1,664 @@ ++/* Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++ ++#include "qbman_private.h" ++#include "fsl_qbman_portal.h" ++#include "qbman_debug.h" ++#include "../../include/fsl_dpaa2_fd.h" ++ ++#define QBMAN_SWP_CENA_BASE 0x818000000 ++#define QBMAN_SWP_CINH_BASE 0x81c000000 ++ ++#define QBMAN_PORTAL_IDX 2 ++#define QBMAN_TEST_FQID 19 ++#define QBMAN_TEST_BPID 23 ++#define QBMAN_USE_QD ++#ifdef QBMAN_USE_QD ++#define QBMAN_TEST_QDID 1 ++#endif ++#define QBMAN_TEST_LFQID 0xf00010 ++ ++#define NUM_EQ_FRAME 10 ++#define NUM_DQ_FRAME 10 ++#define NUM_DQ_IN_DQRR 5 ++#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR) ++ ++static struct qbman_swp *swp; ++static struct qbman_eq_desc eqdesc; ++static struct qbman_pull_desc pulldesc; ++static struct qbman_release_desc releasedesc; ++static struct qbman_eq_response eq_storage[1]; ++static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64); ++static dma_addr_t eq_storage_phys; ++static dma_addr_t dq_storage_phys; ++ ++/* FQ ctx attribute values for the test code. */ ++#define FQCTX_HI 0xabbaf00d ++#define FQCTX_LO 0x98765432 ++#define FQ_VFQID 0x123456 ++ ++/* Sample frame descriptor */ ++static struct qbman_fd_simple fd = { ++ .addr_lo = 0xbabaf33d, ++ .addr_hi = 0x01234567, ++ .len = 0x7777, ++ .frc = 0xdeadbeef, ++ .flc_lo = 0xcafecafe, ++ .flc_hi = 0xbeadabba ++}; ++ ++static void fd_inc(struct qbman_fd_simple *_fd) ++{ ++ _fd->addr_lo += _fd->len; ++ _fd->flc_lo += 0x100; ++ _fd->frc += 0x10; ++} ++ ++static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb) ++{ ++ int i; ++ ++ for (i = 0; i < 8; i++) ++ if (fda->words[i] - fdb->words[i]) ++ return 1; ++ return 0; ++} ++ ++struct qbman_fd fd_eq[NUM_EQ_FRAME]; ++struct qbman_fd fd_dq[NUM_DQ_FRAME]; ++ ++/* "Buffers" to be released (and storage for buffers to be acquired) */ ++static uint64_t rbufs[320]; ++static uint64_t abufs[320]; ++ ++static void do_enqueue(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ ++#ifdef QBMAN_USE_QD ++ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n", ++ NUM_EQ_FRAME, QBMAN_TEST_QDID); ++#else ++ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n", ++ NUM_EQ_FRAME, QBMAN_TEST_FQID); ++#endif ++ for (i = 0; i < NUM_EQ_FRAME; i++) { ++ /*********************************/ ++ /* Prepare a enqueue descriptor */ ++ /*********************************/ ++ memset(eq_storage, 0, sizeof(eq_storage)); ++ eq_storage_phys = virt_to_phys(eq_storage); ++ qbman_eq_desc_clear(&eqdesc); ++ qbman_eq_desc_set_no_orp(&eqdesc, 0); ++ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0); ++ qbman_eq_desc_set_token(&eqdesc, 0x99); ++#ifdef QBMAN_USE_QD ++ /**********************************/ ++ /* Prepare a Queueing Destination */ ++ /**********************************/ ++ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3); ++#else ++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID); ++#endif ++ ++ /******************/ ++ /* Try an enqueue */ ++ /******************/ ++ ret = qbman_swp_enqueue(swp, &eqdesc, ++ (const struct qbman_fd *)&fd); ++ BUG_ON(ret); ++ for (j = 0; j < 8; j++) ++ fd_eq[i].words[j] = *((uint32_t *)&fd + j); ++ fd_inc(&fd); ++ } ++} ++ ++static void do_push_dequeue(struct qbman_swp *swp) ++{ ++ int i, j; ++ const struct dpaa2_dq *dq_storage1; ++ const struct qbman_fd *__fd; ++ int loopvar; ++ ++ pr_info("*****QBMan_test: Start push dequeue\n"); ++ for (i = 0; i < NUM_DQ_FRAME; i++) { ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ dq_storage1 = qbman_swp_dqrr_next(swp); ++ } while (!dq_storage1); ++ if (dq_storage1) { ++ __fd = (const struct qbman_fd *) ++ dpaa2_dq_fd(dq_storage1); ++ for (j = 0; j < 8; j++) ++ fd_dq[i].words[j] = __fd->words[j]; ++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i], 32); ++ } ++ qbman_swp_dqrr_consume(swp, dq_storage1); ++ } else { ++ pr_info("The push dequeue fails\n"); ++ } ++ } ++} ++ ++static void do_pull_dequeue(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ const struct dpaa2_dq *dq_storage1; ++ const struct qbman_fd *__fd; ++ int loopvar; ++ ++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n", ++ NUM_DQ_IN_DQRR); ++ for (i = 0; i < NUM_DQ_IN_DQRR; i++) { ++ qbman_pull_desc_clear(&pulldesc); ++ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0); ++ qbman_pull_desc_set_numframes(&pulldesc, 1); ++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); ++ ++ ret = qbman_swp_pull(swp, &pulldesc); ++ BUG_ON(ret); ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ dq_storage1 = qbman_swp_dqrr_next(swp); ++ } while (!dq_storage1); ++ ++ if (dq_storage1) { ++ __fd = (const struct qbman_fd *) ++ dpaa2_dq_fd(dq_storage1); ++ for (j = 0; j < 8; j++) ++ fd_dq[i].words[j] = __fd->words[j]; ++ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i], 32); ++ } ++ qbman_swp_dqrr_consume(swp, dq_storage1); ++ } else { ++ pr_info("Dequeue with dq entry in DQRR fails\n"); ++ } ++ } ++ ++ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n", ++ NUM_DQ_IN_MEM); ++ for (i = 0; i < NUM_DQ_IN_MEM; i++) { ++ dq_storage_phys = virt_to_phys(&dq_storage[i]); ++ qbman_pull_desc_clear(&pulldesc); ++ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i], ++ dq_storage_phys, 1); ++ qbman_pull_desc_set_numframes(&pulldesc, 1); ++ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); ++ ret = qbman_swp_pull(swp, &pulldesc); ++ BUG_ON(ret); ++ ++ DBG_POLL_START(loopvar); ++ do { ++ DBG_POLL_CHECK(loopvar); ++ ret = qbman_result_has_new_result(swp, ++ &dq_storage[i]); ++ } while (!ret); ++ ++ if (ret) { ++ for (j = 0; j < 8; j++) ++ fd_dq[i + NUM_DQ_IN_DQRR].words[j] = ++ dq_storage[i].dont_manipulate_directly[j + 8]; ++ j = i + NUM_DQ_IN_DQRR; ++ if (fd_cmp(&fd_eq[j], &fd_dq[j])) { ++ pr_info("enqueue FD is\n"); ++ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32); ++ pr_info("dequeue FD is\n"); ++ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32); ++ hexdump(&dq_storage[i], 64); ++ } ++ } else { ++ pr_info("Dequeue with dq entry in memory fails\n"); ++ } ++ } ++} ++ ++static void release_buffer(struct qbman_swp *swp, unsigned int num) ++{ ++ int ret; ++ unsigned int i, j; ++ ++ qbman_release_desc_clear(&releasedesc); ++ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID); ++ pr_info("*****QBMan_test: Release %d buffers to BP %d\n", ++ num, QBMAN_TEST_BPID); ++ for (i = 0; i < (num / 7 + 1); i++) { ++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); ++ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j); ++ BUG_ON(ret); ++ } ++} ++ ++static void acquire_buffer(struct qbman_swp *swp, unsigned int num) ++{ ++ int ret; ++ unsigned int i, j; ++ ++ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n", ++ num, QBMAN_TEST_BPID); ++ ++ for (i = 0; i < (num / 7 + 1); i++) { ++ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); ++ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j); ++ BUG_ON(ret != j); ++ } ++} ++ ++static void buffer_pool_test(struct qbman_swp *swp) ++{ ++ struct qbman_attr info; ++ struct dpaa2_dq *bpscn_message; ++ dma_addr_t bpscn_phys; ++ uint64_t bpscn_ctx; ++ uint64_t ctx = 0xbbccddaadeadbeefull; ++ int i, ret; ++ uint32_t hw_targ; ++ ++ pr_info("*****QBMan_test: test buffer pool management\n"); ++ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys); ++ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys, ++ virt_to_phys(&info)); ++ bpscn_message = phys_to_virt(bpscn_phys); ++ ++ for (i = 0; i < 320; i++) ++ rbufs[i] = 0xf00dabba01234567ull + i * 0x40; ++ ++ release_buffer(swp, 320); ++ ++ pr_info("QBMan_test: query the buffer pool\n"); ++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ hexdump(&info, 64); ++ qbman_bp_attr_get_hw_targ(&info, &hw_targ); ++ pr_info("hw_targ is %d\n", hw_targ); ++ ++ /* Acquire buffers to trigger BPSCN */ ++ acquire_buffer(swp, 300); ++ /* BPSCN should be written to the memory */ ++ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); ++ hexdump(&info, 64); ++ hexdump(bpscn_message, 64); ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is depleted */ ++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); ++ /* The ctx should match */ ++ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message); ++ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx); ++ BUG_ON(ctx != bpscn_ctx); ++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); ++ ++ /* Re-seed the buffer pool to trigger BPSCN */ ++ release_buffer(swp, 240); ++ /* BPSCN should be written to the memory */ ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is not depleted */ ++ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message)); ++ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); ++ ++ acquire_buffer(swp, 260); ++ /* BPSCN should be written to the memory */ ++ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); ++ /* There should be free buffers in the pool while BPSCN generated */ ++ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); ++ /* Buffer pool is depletion */ ++ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); ++} ++ ++static void ceetm_test(struct qbman_swp *swp) ++{ ++ int i, j, ret; ++ ++ qbman_eq_desc_clear(&eqdesc); ++ qbman_eq_desc_set_no_orp(&eqdesc, 0); ++ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID); ++ pr_info("*****QBMan_test: Enqueue to LFQID %x\n", ++ QBMAN_TEST_LFQID); ++ for (i = 0; i < NUM_EQ_FRAME; i++) { ++ ret = qbman_swp_enqueue(swp, &eqdesc, ++ (const struct qbman_fd *)&fd); ++ BUG_ON(ret); ++ for (j = 0; j < 8; j++) ++ fd_eq[i].words[j] = *((uint32_t *)&fd + j); ++ fd_inc(&fd); ++ } ++} ++ ++int qbman_test(void) ++{ ++ struct qbman_swp_desc pd; ++ uint32_t reg; ++ ++ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE + ++ QBMAN_PORTAL_IDX * 0x10000, 0x10000); ++ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE + ++ QBMAN_PORTAL_IDX * 0x10000, 0x10000); ++ ++ /* Detect whether the mc image is the test image with GPP setup */ ++ reg = readl_relaxed(pd.cena_bar + 0x4); ++ if (reg != 0xdeadbeef) { ++ pr_err("The MC image doesn't have GPP test setup, stop!\n"); ++ iounmap(pd.cena_bar); ++ iounmap(pd.cinh_bar); ++ return -1; ++ } ++ ++ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX); ++ swp = qbman_swp_init(&pd); ++ if (!swp) { ++ iounmap(pd.cena_bar); ++ iounmap(pd.cinh_bar); ++ return -1; ++ } ++ ++ /*******************/ ++ /* Enqueue frames */ ++ /*******************/ ++ do_enqueue(swp); ++ ++ /*******************/ ++ /* Do pull dequeue */ ++ /*******************/ ++ do_pull_dequeue(swp); ++ ++ /*******************/ ++ /* Enqueue frames */ ++ /*******************/ ++ qbman_swp_push_set(swp, 0, 1); ++ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID); ++ do_enqueue(swp); ++ ++ /*******************/ ++ /* Do push dequeue */ ++ /*******************/ ++ do_push_dequeue(swp); ++ ++ /**************************/ ++ /* Test buffer pool funcs */ ++ /**************************/ ++ buffer_pool_test(swp); ++ ++ /******************/ ++ /* CEETM test */ ++ /******************/ ++ ceetm_test(swp); ++ ++ qbman_swp_finish(swp); ++ pr_info("*****QBMan_test: Kernel test Passed\n"); ++ return 0; ++} ++ ++/* user-space test-case, definitions: ++ * ++ * 1 portal only, using portal index 3. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define QBMAN_TEST_US_SWP 3 /* portal index for user space */ ++ ++#define QBMAN_TEST_MAGIC 'q' ++struct qbman_test_swp_ioctl { ++ unsigned long portal1_cinh; ++ unsigned long portal1_cena; ++}; ++struct qbman_test_dma_ioctl { ++ unsigned long ptr; ++ uint64_t phys_addr; ++}; ++ ++struct qbman_test_priv { ++ int has_swp_map; ++ int has_dma_map; ++ unsigned long pgoff; ++}; ++ ++#define QBMAN_TEST_SWP_MAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl) ++#define QBMAN_TEST_SWP_UNMAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl) ++#define QBMAN_TEST_DMA_MAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl) ++#define QBMAN_TEST_DMA_UNMAP \ ++ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl) ++ ++#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \ ++ 0x10000) >> PAGE_SHIFT) ++#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \ ++ 0x10000) >> PAGE_SHIFT) ++ ++static int qbman_test_open(struct inode *inode, struct file *filp) ++{ ++ struct qbman_test_priv *priv; ++ ++ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL); ++ if (!priv) ++ return -EIO; ++ filp->private_data = priv; ++ priv->has_swp_map = 0; ++ priv->has_dma_map = 0; ++ priv->pgoff = 0; ++ return 0; ++} ++ ++static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ int ret; ++ struct qbman_test_priv *priv = filp->private_data; ++ ++ BUG_ON(!priv); ++ ++ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF) ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF) ++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); ++ else if (vma->vm_pgoff == priv->pgoff) ++ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); ++ else { ++ pr_err("Damn, unrecognised pg_off!!\n"); ++ return -EINVAL; ++ } ++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot); ++ return ret; ++} ++ ++static long qbman_test_ioctl(struct file *fp, unsigned int cmd, ++ unsigned long arg) ++{ ++ void __user *a = (void __user *)arg; ++ unsigned long longret, populate; ++ int ret = 0; ++ struct qbman_test_priv *priv = fp->private_data; ++ ++ BUG_ON(!priv); ++ ++ switch (cmd) { ++ case QBMAN_TEST_SWP_MAP: ++ { ++ struct qbman_test_swp_ioctl params; ++ ++ if (priv->has_swp_map) ++ return -EINVAL; ++ down_write(¤t->mm->mmap_sem); ++ /* Map portal1 CINH */ ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ TEST_PORTAL1_CINH_PGOFF, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ goto out; ++ } ++ params.portal1_cinh = longret; ++ /* Map portal1 CENA */ ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ TEST_PORTAL1_CENA_PGOFF, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ goto out; ++ } ++ params.portal1_cena = longret; ++ priv->has_swp_map = 1; ++out: ++ up_write(¤t->mm->mmap_sem); ++ if (!ret && copy_to_user(a, ¶ms, sizeof(params))) ++ return -EFAULT; ++ return ret; ++ } ++ case QBMAN_TEST_SWP_UNMAP: ++ { ++ struct qbman_test_swp_ioctl params; ++ ++ if (!priv->has_swp_map) ++ return -EINVAL; ++ ++ if (copy_from_user(¶ms, a, sizeof(params))) ++ return -EFAULT; ++ down_write(¤t->mm->mmap_sem); ++ do_munmap(current->mm, params.portal1_cena, 0x10000); ++ do_munmap(current->mm, params.portal1_cinh, 0x10000); ++ up_write(¤t->mm->mmap_sem); ++ priv->has_swp_map = 0; ++ return 0; ++ } ++ case QBMAN_TEST_DMA_MAP: ++ { ++ struct qbman_test_dma_ioctl params; ++ void *vaddr; ++ ++ if (priv->has_dma_map) ++ return -EINVAL; ++ vaddr = (void *)get_zeroed_page(GFP_KERNEL); ++ params.phys_addr = virt_to_phys(vaddr); ++ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT; ++ down_write(¤t->mm->mmap_sem); ++ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ priv->pgoff, &populate); ++ if (longret & ~PAGE_MASK) { ++ ret = (int)longret; ++ return ret; ++ } ++ params.ptr = longret; ++ priv->has_dma_map = 1; ++ up_write(¤t->mm->mmap_sem); ++ if (copy_to_user(a, ¶ms, sizeof(params))) ++ return -EFAULT; ++ return 0; ++ } ++ case QBMAN_TEST_DMA_UNMAP: ++ { ++ struct qbman_test_dma_ioctl params; ++ ++ if (!priv->has_dma_map) ++ return -EINVAL; ++ if (copy_from_user(¶ms, a, sizeof(params))) ++ return -EFAULT; ++ down_write(¤t->mm->mmap_sem); ++ do_munmap(current->mm, params.ptr, PAGE_SIZE); ++ up_write(¤t->mm->mmap_sem); ++ free_page((unsigned long)phys_to_virt(params.phys_addr)); ++ priv->has_dma_map = 0; ++ return 0; ++ } ++ default: ++ pr_err("Bad ioctl cmd!\n"); ++ } ++ return -EINVAL; ++} ++ ++static const struct file_operations qbman_fops = { ++ .open = qbman_test_open, ++ .mmap = qbman_test_mmap, ++ .unlocked_ioctl = qbman_test_ioctl ++}; ++ ++static struct miscdevice qbman_miscdev = { ++ .name = "qbman-test", ++ .fops = &qbman_fops, ++ .minor = MISC_DYNAMIC_MINOR, ++}; ++ ++static int qbman_miscdev_init; ++ ++static int test_init(void) ++{ ++ int ret = qbman_test(); ++ ++ if (!ret) { ++ /* MC image supports the test cases, so instantiate the ++ * character devic that the user-space test case will use to do ++ * its memory mappings. */ ++ ret = misc_register(&qbman_miscdev); ++ if (ret) { ++ pr_err("qbman-test: failed to register misc device\n"); ++ return ret; ++ } ++ pr_info("qbman-test: misc device registered!\n"); ++ qbman_miscdev_init = 1; ++ } ++ return 0; ++} ++ ++static void test_exit(void) ++{ ++ if (qbman_miscdev_init) { ++ misc_deregister(&qbman_miscdev); ++ qbman_miscdev_init = 0; ++ } ++} ++ ++module_init(test_init); ++module_exit(test_exit); +diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +new file mode 100644 +index 0000000..c9b52dd +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h +@@ -0,0 +1,56 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMCP_CMD_H ++#define _FSL_DPMCP_CMD_H ++ ++/* Minimal supported DPMCP Version */ ++#define DPMCP_MIN_VER_MAJOR 3 ++#define DPMCP_MIN_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPMCP_CMDID_CLOSE 0x800 ++#define DPMCP_CMDID_OPEN 0x80b ++#define DPMCP_CMDID_CREATE 0x90b ++#define DPMCP_CMDID_DESTROY 0x900 ++ ++#define DPMCP_CMDID_GET_ATTR 0x004 ++#define DPMCP_CMDID_RESET 0x005 ++ ++#define DPMCP_CMDID_SET_IRQ 0x010 ++#define DPMCP_CMDID_GET_IRQ 0x011 ++#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMCP_CMDID_SET_IRQ_MASK 0x014 ++#define DPMCP_CMDID_GET_IRQ_MASK 0x015 ++#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 ++ ++#endif /* _FSL_DPMCP_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c +new file mode 100644 +index 0000000..e23592a +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp.c +@@ -0,0 +1,318 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "dpmcp.h" ++#include "dpmcp-cmd.h" ++ ++int dpmcp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmcp_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, ++ cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, dpmcp_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return err; ++} ++ ++int dpmcp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmcp_cfg *cfg, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, ++ cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, cfg->portal_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++ ++int dpmcp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmcp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, irq_index); ++ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmcp_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ return 0; ++} ++ ++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); ++ return 0; ++} ++ ++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ return 0; ++} ++ ++int dpmcp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmcp_attr *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->id = (int)mc_dec(cmd.params[0], 32, 32); ++ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); ++ return 0; ++} +diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h +new file mode 100644 +index 0000000..e434a24 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmcp.h +@@ -0,0 +1,323 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMCP_H ++#define __FSL_DPMCP_H ++ ++/* Data Path Management Command Portal API ++ * Contains initialization APIs and runtime control APIs for DPMCP ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmcp_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpmcp_id: DPMCP unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmcp_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpmcp_id, ++ uint16_t *token); ++ ++/* Get portal ID from pool */ ++#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) ++ ++/** ++ * dpmcp_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpmcp_cfg - Structure representing DPMCP configuration ++ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID ++ * from pool ++ */ ++struct dpmcp_cfg { ++ int portal_id; ++}; ++ ++/** ++ * dpmcp_create() - Create the DPMCP object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMCP object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmcp_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpmcp_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmcp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/* IRQ */ ++/* IRQ Index */ ++#define DPMCP_IRQ_INDEX 0 ++/* irq event - Indicates that the link state changed */ ++#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 ++ ++/** ++ * struct dpmcp_irq_cfg - IRQ configuration ++ * @paddr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpmcp_irq_cfg { ++ uint64_t paddr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpmcp_irq_cfg *irq_cfg); ++ ++/** ++ * dpmcp_get_irq() - Get IRQ information from the DPMCP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpmcp_irq_cfg *irq_cfg); ++ ++/** ++ * dpmcp_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmcp_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmcp_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmcp_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * struct dpmcp_attr - Structure representing DPMCP attributes ++ * @id: DPMCP object ID ++ * @version: DPMCP version ++ */ ++struct dpmcp_attr { ++ int id; ++ /** ++ * struct version - Structure representing DPMCP version ++ * @major: DPMCP major version ++ * @minor: DPMCP minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmcp_get_attributes - Retrieve DPMCP attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPMCP object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmcp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpmcp_attr *attr); ++ ++#endif /* __FSL_DPMCP_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h +new file mode 100644 +index 0000000..ba8cfa9 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h +@@ -0,0 +1,47 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/*************************************************************************//* ++ dpmng-cmd.h ++ ++ defines portal commands ++ ++ *//**************************************************************************/ ++ ++#ifndef __FSL_DPMNG_CMD_H ++#define __FSL_DPMNG_CMD_H ++ ++/* Command IDs */ ++#define DPMNG_CMDID_GET_CONT_ID 0x830 ++#define DPMNG_CMDID_GET_VERSION 0x831 ++ ++#endif /* __FSL_DPMNG_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c +new file mode 100644 +index 0000000..387390b +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dpmng.c +@@ -0,0 +1,85 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpmng.h" ++#include "dpmng-cmd.h" ++ ++int mc_get_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ struct mc_version *mc_ver_info) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32); ++ mc_ver_info->major = mc_dec(cmd.params[0], 32, 32); ++ mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(mc_get_version); ++ ++int dpmng_get_container_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int *container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, ++ cmd_flags, ++ 0); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *container_id = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++ +diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h +new file mode 100644 +index 0000000..9b854fa +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h +@@ -0,0 +1,87 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/*************************************************************************//* ++ dprc-cmd.h ++ ++ defines dprc portal commands ++ ++ *//**************************************************************************/ ++ ++#ifndef _FSL_DPRC_CMD_H ++#define _FSL_DPRC_CMD_H ++ ++/* Minimal supported DPRC Version */ ++#define DPRC_MIN_VER_MAJOR 5 ++#define DPRC_MIN_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPRC_CMDID_CLOSE 0x800 ++#define DPRC_CMDID_OPEN 0x805 ++#define DPRC_CMDID_CREATE 0x905 ++ ++#define DPRC_CMDID_GET_ATTR 0x004 ++#define DPRC_CMDID_RESET_CONT 0x005 ++ ++#define DPRC_CMDID_SET_IRQ 0x010 ++#define DPRC_CMDID_GET_IRQ 0x011 ++#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPRC_CMDID_SET_IRQ_MASK 0x014 ++#define DPRC_CMDID_GET_IRQ_MASK 0x015 ++#define DPRC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPRC_CMDID_CREATE_CONT 0x151 ++#define DPRC_CMDID_DESTROY_CONT 0x152 ++#define DPRC_CMDID_SET_RES_QUOTA 0x155 ++#define DPRC_CMDID_GET_RES_QUOTA 0x156 ++#define DPRC_CMDID_ASSIGN 0x157 ++#define DPRC_CMDID_UNASSIGN 0x158 ++#define DPRC_CMDID_GET_OBJ_COUNT 0x159 ++#define DPRC_CMDID_GET_OBJ 0x15A ++#define DPRC_CMDID_GET_RES_COUNT 0x15B ++#define DPRC_CMDID_GET_RES_IDS 0x15C ++#define DPRC_CMDID_GET_OBJ_REG 0x15E ++#define DPRC_CMDID_SET_OBJ_IRQ 0x15F ++#define DPRC_CMDID_GET_OBJ_IRQ 0x160 ++#define DPRC_CMDID_SET_OBJ_LABEL 0x161 ++#define DPRC_CMDID_GET_OBJ_DESC 0x162 ++ ++#define DPRC_CMDID_CONNECT 0x167 ++#define DPRC_CMDID_DISCONNECT 0x168 ++#define DPRC_CMDID_GET_POOL 0x169 ++#define DPRC_CMDID_GET_POOL_COUNT 0x16A ++ ++#define DPRC_CMDID_GET_CONNECTION 0x16C ++ ++#endif /* _FSL_DPRC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c +new file mode 100644 +index 0000000..f8d8cbe +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c +@@ -0,0 +1,1084 @@ ++/* ++ * Freescale data path resource container (DPRC) driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include "../include/mc-sys.h" ++#include ++#include ++#include ++#include "dprc-cmd.h" ++#include "dpmcp.h" ++ ++struct dprc_child_objs { ++ int child_count; ++ struct dprc_obj_desc *child_array; ++}; ++ ++static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) ++{ ++ int i; ++ struct dprc_child_objs *objs; ++ struct fsl_mc_device *mc_dev; ++ ++ WARN_ON(!dev); ++ WARN_ON(!data); ++ mc_dev = to_fsl_mc_device(dev); ++ objs = data; ++ ++ for (i = 0; i < objs->child_count; i++) { ++ struct dprc_obj_desc *obj_desc = &objs->child_array[i]; ++ ++ if (strlen(obj_desc->type) != 0 && ++ FSL_MC_DEVICE_MATCH(mc_dev, obj_desc)) ++ break; ++ } ++ ++ if (i == objs->child_count) ++ fsl_mc_device_remove(mc_dev); ++ ++ return 0; ++} ++ ++static int __fsl_mc_device_remove(struct device *dev, void *data) ++{ ++ WARN_ON(!dev); ++ WARN_ON(data); ++ fsl_mc_device_remove(to_fsl_mc_device(dev)); ++ return 0; ++} ++ ++/** ++ * dprc_remove_devices - Removes devices for objects removed from a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @obj_desc_array: array of object descriptors for child objects currently ++ * present in the DPRC in the MC. ++ * @num_child_objects_in_mc: number of entries in obj_desc_array ++ * ++ * Synchronizes the state of the Linux bus driver with the actual state of ++ * the MC by removing devices that represent MC objects that have ++ * been dynamically removed in the physical DPRC. ++ */ ++static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, ++ struct dprc_obj_desc *obj_desc_array, ++ int num_child_objects_in_mc) ++{ ++ if (num_child_objects_in_mc != 0) { ++ /* ++ * Remove child objects that are in the DPRC in Linux, ++ * but not in the MC: ++ */ ++ struct dprc_child_objs objs; ++ ++ objs.child_count = num_child_objects_in_mc; ++ objs.child_array = obj_desc_array; ++ device_for_each_child(&mc_bus_dev->dev, &objs, ++ __fsl_mc_device_remove_if_not_in_mc); ++ } else { ++ /* ++ * There are no child objects for this DPRC in the MC. ++ * So, remove all the child devices from Linux: ++ */ ++ device_for_each_child(&mc_bus_dev->dev, NULL, ++ __fsl_mc_device_remove); ++ } ++} ++ ++static int __fsl_mc_device_match(struct device *dev, void *data) ++{ ++ struct dprc_obj_desc *obj_desc = data; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc); ++} ++ ++static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc ++ *obj_desc, ++ struct fsl_mc_device ++ *mc_bus_dev) ++{ ++ struct device *dev; ++ ++ dev = device_find_child(&mc_bus_dev->dev, obj_desc, ++ __fsl_mc_device_match); ++ ++ return dev ? to_fsl_mc_device(dev) : NULL; ++} ++ ++/** ++ * check_plugged_state_change - Check change in an MC object's plugged state ++ * ++ * @mc_dev: pointer to the fsl-mc device for a given MC object ++ * @obj_desc: pointer to the MC object's descriptor in the MC ++ * ++ * If the plugged state has changed from unplugged to plugged, the fsl-mc ++ * device is bound to the corresponding device driver. ++ * If the plugged state has changed from plugged to unplugged, the fsl-mc ++ * device is unbound from the corresponding device driver. ++ */ ++static void check_plugged_state_change(struct fsl_mc_device *mc_dev, ++ struct dprc_obj_desc *obj_desc) ++{ ++ int error; ++ uint32_t plugged_flag_at_mc = ++ (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); ++ ++ if (plugged_flag_at_mc != ++ (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { ++ if (plugged_flag_at_mc) { ++ mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED; ++ error = device_attach(&mc_dev->dev); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "device_attach() failed: %d\n", ++ error); ++ } ++ } else { ++ mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED; ++ device_release_driver(&mc_dev->dev); ++ } ++ } ++} ++ ++/** ++ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the DPRC, ++ * or NULL, if none. ++ * @obj_desc_array: array of device descriptors for child devices currently ++ * present in the physical DPRC. ++ * @num_child_objects_in_mc: number of entries in obj_desc_array ++ * ++ * Synchronizes the state of the Linux bus driver with the actual ++ * state of the MC by adding objects that have been newly discovered ++ * in the physical DPRC. ++ */ ++static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ struct dprc_obj_desc *obj_desc_array, ++ int num_child_objects_in_mc) ++{ ++ int error; ++ int i; ++ ++ for (i = 0; i < num_child_objects_in_mc; i++) { ++ struct fsl_mc_device *child_dev; ++ struct dprc_obj_desc *obj_desc = &obj_desc_array[i]; ++ ++ if (strlen(obj_desc->type) == 0) ++ continue; ++ ++ /* ++ * Check if device is already known to Linux: ++ */ ++ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev); ++ if (child_dev) { ++ check_plugged_state_change(child_dev, obj_desc); ++ continue; ++ } ++ ++ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, ++ driver_override, &child_dev); ++ if (error < 0) ++ continue; ++ } ++} ++ ++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) ++{ ++ int pool_type; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[pool_type]; ++ ++ res_pool->type = pool_type; ++ res_pool->max_count = 0; ++ res_pool->free_count = 0; ++ res_pool->mc_bus = mc_bus; ++ INIT_LIST_HEAD(&res_pool->free_list); ++ mutex_init(&res_pool->mutex); ++ } ++} ++ ++static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, ++ enum fsl_mc_pool_type pool_type) ++{ ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_resource *next; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[pool_type]; ++ int free_count = 0; ++ ++ WARN_ON(res_pool->type != pool_type); ++ WARN_ON(res_pool->free_count != res_pool->max_count); ++ ++ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) { ++ free_count++; ++ WARN_ON(resource->type != res_pool->type); ++ WARN_ON(resource->parent_pool != res_pool); ++ devm_kfree(&mc_bus_dev->dev, resource); ++ } ++ ++ WARN_ON(free_count != res_pool->free_count); ++} ++ ++/* ++ * Clean up all resource pools other than the IRQ pool ++ */ ++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) ++{ ++ int pool_type; ++ ++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { ++ if (pool_type != FSL_MC_POOL_IRQ) ++ dprc_cleanup_resource_pool(mc_bus_dev, pool_type); ++ } ++} ++ ++/** ++ * dprc_scan_objects - Discover objects in a DPRC ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * @driver_override: driver override to apply to new objects found in the DPRC, ++ * or NULL, if none. ++ * @total_irq_count: total number of IRQs needed by objects in the DPRC. ++ * ++ * Detects objects added and removed from a DPRC and synchronizes the ++ * state of the Linux bus driver, MC by adding and removing ++ * devices accordingly. ++ * Two types of devices can be found in a DPRC: allocatable objects (e.g., ++ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni). ++ * All allocatable devices needed to be probed before all non-allocatable ++ * devices, to ensure that device drivers for non-allocatable ++ * devices can allocate any type of allocatable devices. ++ * That is, we need to ensure that the corresponding resource pools are ++ * populated before they can get allocation requests from probe callbacks ++ * of the device drivers for the non-allocatable devices. ++ */ ++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ unsigned int *total_irq_count) ++{ ++ int num_child_objects; ++ int dprc_get_obj_failures; ++ int error; ++ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; ++ struct dprc_obj_desc *child_obj_desc_array = NULL; ++ ++ error = dprc_get_obj_count(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ &num_child_objects); ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ if (num_child_objects != 0) { ++ int i; ++ ++ child_obj_desc_array = ++ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects, ++ sizeof(*child_obj_desc_array), ++ GFP_KERNEL); ++ if (!child_obj_desc_array) ++ return -ENOMEM; ++ ++ /* ++ * Discover objects currently present in the physical DPRC: ++ */ ++ dprc_get_obj_failures = 0; ++ for (i = 0; i < num_child_objects; i++) { ++ struct dprc_obj_desc *obj_desc = ++ &child_obj_desc_array[i]; ++ ++ error = dprc_get_obj(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ i, obj_desc); ++ ++ /* ++ * -ENXIO means object index was invalid. ++ * This is caused when the DPRC was changed at ++ * the MC during the scan. In this case, ++ * abort the current scan. ++ */ ++ if (error == -ENXIO) ++ return error; ++ ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, ++ "dprc_get_obj(i=%d) failed: %d\n", ++ i, error); ++ /* ++ * Mark the obj entry as "invalid", by using the ++ * empty string as obj type: ++ */ ++ obj_desc->type[0] = '\0'; ++ obj_desc->id = error; ++ dprc_get_obj_failures++; ++ continue; ++ } ++ ++ /* ++ * for DPRC versions that do not support the ++ * shareability attribute, make simplifying assumption ++ * that only SEC is not shareable. ++ */ ++ if ((strcmp(obj_desc->type, "dpseci") == 0) && ++ (obj_desc->ver_major < 4)) ++ obj_desc->flags |= ++ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY; ++ ++ irq_count += obj_desc->irq_count; ++ dev_dbg(&mc_bus_dev->dev, ++ "Discovered object: type %s, id %d\n", ++ obj_desc->type, obj_desc->id); ++ } ++ ++ if (dprc_get_obj_failures != 0) { ++ dev_err(&mc_bus_dev->dev, ++ "%d out of %d devices could not be retrieved\n", ++ dprc_get_obj_failures, num_child_objects); ++ } ++ } ++ ++ *total_irq_count = irq_count; ++ dprc_remove_devices(mc_bus_dev, child_obj_desc_array, ++ num_child_objects); ++ ++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, ++ num_child_objects); ++ ++ if (child_obj_desc_array) ++ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dprc_scan_objects); ++ ++/** ++ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state ++ * ++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object ++ * ++ * Scans the physical DPRC and synchronizes the state of the Linux ++ * bus driver with the actual state of the MC by adding and removing ++ * devices as appropriate. ++ */ ++static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) ++{ ++ int error; ++ unsigned int irq_count; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ dprc_init_all_resource_pools(mc_bus_dev); ++ ++ /* ++ * Discover objects in the DPRC: ++ */ ++ mutex_lock(&mc_bus->scan_mutex); ++ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); ++ mutex_unlock(&mc_bus->scan_mutex); ++ if (error < 0) ++ goto error; ++ ++ if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { ++ irq_count += FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; ++ error = fsl_mc_populate_irq_pool(mc_bus, irq_count); ++ if (error < 0) ++ goto error; ++ } ++ ++ return 0; ++error: ++ device_for_each_child(&mc_bus_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_bus_dev); ++ return error; ++} ++ ++/** ++ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) ++{ ++ return IRQ_WAKE_THREAD; ++} ++ ++/** ++ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) ++{ ++ int error; ++ uint32_t status; ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ struct fsl_mc_io *mc_io = mc_dev->mc_io; ++ int irq_index = 0; ++ ++ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", ++ irq_num, smp_processor_id()); ++ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) ++ return IRQ_HANDLED; ++ ++ mutex_lock(&mc_bus->scan_mutex); ++ if (WARN_ON(mc_dev->irqs[irq_index]->irq_number != (uint32_t)irq_num)) ++ goto out; ++ ++ status = 0; ++ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, ++ &status); ++ if (error < 0) { ++ dev_err(dev, ++ "dprc_get_irq_status() failed: %d\n", error); ++ goto out; ++ } ++ ++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, ++ status); ++ if (error < 0) { ++ dev_err(dev, ++ "dprc_clear_irq_status() failed: %d\n", error); ++ goto out; ++ } ++ ++ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | ++ DPRC_IRQ_EVENT_OBJ_REMOVED | ++ DPRC_IRQ_EVENT_CONTAINER_DESTROYED | ++ DPRC_IRQ_EVENT_OBJ_DESTROYED | ++ DPRC_IRQ_EVENT_OBJ_CREATED)) { ++ unsigned int irq_count; ++ ++ error = dprc_scan_objects(mc_dev, NULL, &irq_count); ++ if (error < 0) { ++ if (error != -ENXIO) /* don't need to report aborted scan */ ++ dev_err(dev, "dprc_scan_objects() failed: %d\n", error); ++ goto out; ++ } ++ ++ WARN_ON((int16_t)irq_count < 0); ++ ++ if ((int16_t)irq_count > ++ mc_bus->resource_pools[FSL_MC_POOL_IRQ].max_count) { ++ dev_warn(dev, ++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", ++ irq_count, ++ mc_bus->resource_pools[FSL_MC_POOL_IRQ]. ++ max_count); ++ } ++ } ++ ++out: ++ mutex_unlock(&mc_bus->scan_mutex); ++ return IRQ_HANDLED; ++} ++ ++/* ++ * Disable and clear interrupts for a given DPRC object ++ */ ++static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int error; ++ struct fsl_mc_io *mc_io = mc_dev->mc_io; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (WARN_ON(irq_count == 0)) ++ return -EINVAL; ++ ++ for (i = 0; i < irq_count; i++) { ++ /* ++ * Disable generation of interrupt i, while we configure it: ++ */ ++ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, i, 0); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Disable all interrupt causes for interrupt i: ++ */ ++ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, i, 0x0); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Clear any leftover interrupt i: ++ */ ++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, i, ++ ~0x0U); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Disabling DPRC IRQ %d failed: dprc_clear_irq_status() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ } ++ ++ return 0; ++} ++ ++static void unregister_dprc_irq_handlers(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ struct fsl_mc_device_irq *irq; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = mc_dev->irqs[i]; ++ devm_free_irq(&mc_dev->dev, irq->irq_number, ++ &mc_dev->dev); ++ } ++} ++ ++static int register_dprc_irq_handlers(struct fsl_mc_device *mc_dev) ++{ ++ static const struct irq_handler { ++ irq_handler_t irq_handler; ++ irq_handler_t irq_handler_thread; ++ const char *irq_name; ++ } irq_handlers[] = { ++ [0] = { ++ .irq_handler = dprc_irq0_handler, ++ .irq_handler_thread = dprc_irq0_handler_thread, ++ .irq_name = "FSL MC DPRC irq0", ++ }, ++ }; ++ ++ unsigned int i; ++ int error; ++ struct fsl_mc_device_irq *irq; ++ unsigned int num_irq_handlers_registered = 0; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (WARN_ON(irq_count != ARRAY_SIZE(irq_handlers))) ++ return -EINVAL; ++ ++ for (i = 0; i < ARRAY_SIZE(irq_handlers); i++) { ++ irq = mc_dev->irqs[i]; ++ ++ /* ++ * NOTE: devm_request_threaded_irq() invokes the device-specific ++ * function that programs the MSI physically in the device ++ */ ++ error = devm_request_threaded_irq(&mc_dev->dev, ++ irq->irq_number, ++ irq_handlers[i].irq_handler, ++ irq_handlers[i]. ++ irq_handler_thread, ++ IRQF_NO_SUSPEND | ++ IRQF_ONESHOT, ++ irq_handlers[i].irq_name, ++ &mc_dev->dev); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "devm_request_threaded_irq() failed: %d\n", ++ error); ++ goto error_unregister_irq_handlers; ++ } ++ ++ num_irq_handlers_registered++; ++ } ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ for (i = 0; i < num_irq_handlers_registered; i++) { ++ irq = mc_dev->irqs[i]; ++ devm_free_irq(&mc_dev->dev, irq->irq_number, ++ &mc_dev->dev); ++ } ++ ++ return error; ++} ++ ++static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int error; ++ int irq_count = mc_dev->obj_desc.irq_count; ++ ++ for (i = 0; i < irq_count; i++) { ++ /* ++ * Enable all interrupt causes for the interrupt: ++ */ ++ error = dprc_set_irq_mask(mc_dev->mc_io, ++ 0, ++ mc_dev->mc_handle, ++ i, ++ ~0x0u); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Enabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ ++ /* ++ * Enable generation of the interrupt: ++ */ ++ error = dprc_set_irq_enable(mc_dev->mc_io, ++ 0, ++ mc_dev->mc_handle, ++ i, 1); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, ++ "Enabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", ++ i, error); ++ ++ return error; ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * Setup interrupts for a given DPRC device ++ */ ++static int dprc_setup_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ ++ error = fsl_mc_allocate_irqs(mc_dev); ++ if (error < 0) ++ return error; ++ ++ error = disable_dprc_irqs(mc_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = register_dprc_irq_handlers(mc_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = enable_dprc_irqs(mc_dev); ++ if (error < 0) ++ goto error_unregister_irq_handlers; ++ ++ return 0; ++ ++error_unregister_irq_handlers: ++ unregister_dprc_irq_handlers(mc_dev); ++ ++error_free_irqs: ++ fsl_mc_free_irqs(mc_dev); ++ return error; ++} ++ ++/* ++ * Creates a DPMCP for a DPRC's built-in MC portal ++ */ ++static int dprc_create_dpmcp(struct fsl_mc_device *dprc_dev) ++{ ++ int error; ++ struct dpmcp_cfg dpmcp_cfg; ++ uint16_t dpmcp_handle; ++ struct dprc_res_req res_req; ++ struct dpmcp_attr dpmcp_attr; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); ++ ++ dpmcp_cfg.portal_id = mc_bus->dprc_attr.portal_id; ++ error = dpmcp_create(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ &dpmcp_cfg, ++ &dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_create() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ /* ++ * Set the state of the newly created DPMCP object to be "plugged": ++ */ ++ ++ error = dpmcp_get_attributes(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle, ++ &dpmcp_attr); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_get_attributes() failed: %d\n", ++ error); ++ goto error_destroy_dpmcp; ++ } ++ ++ if (WARN_ON(dpmcp_attr.id != mc_bus->dprc_attr.portal_id)) { ++ error = -EINVAL; ++ goto error_destroy_dpmcp; ++ } ++ ++ strcpy(res_req.type, "dpmcp"); ++ res_req.num = 1; ++ res_req.options = ++ (DPRC_RES_REQ_OPT_EXPLICIT | DPRC_RES_REQ_OPT_PLUGGED); ++ res_req.id_base_align = dpmcp_attr.id; ++ ++ error = dprc_assign(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dprc_dev->mc_handle, ++ dprc_dev->obj_desc.id, ++ &res_req); ++ ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dprc_assign() failed: %d\n", error); ++ goto error_destroy_dpmcp; ++ } ++ ++ (void)dpmcp_close(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ return 0; ++ ++error_destroy_dpmcp: ++ (void)dpmcp_destroy(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ return error; ++} ++ ++/* ++ * Destroys the DPMCP for a DPRC's built-in MC portal ++ */ ++static void dprc_destroy_dpmcp(struct fsl_mc_device *dprc_dev) ++{ ++ int error; ++ uint16_t dpmcp_handle; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); ++ ++ if (WARN_ON(!dprc_dev->mc_io || dprc_dev->mc_io->dpmcp_dev)) ++ return; ++ ++ error = dpmcp_open(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ mc_bus->dprc_attr.portal_id, ++ &dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_open() failed: %d\n", ++ error); ++ return; ++ } ++ ++ error = dpmcp_destroy(dprc_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_handle); ++ if (error < 0) { ++ dev_err(&dprc_dev->dev, "dpmcp_destroy() failed: %d\n", ++ error); ++ return; ++ } ++} ++ ++/** ++ * dprc_probe - callback invoked when a DPRC is being bound to this driver ++ * ++ * @mc_dev: Pointer to fsl-mc device representing a DPRC ++ * ++ * It opens the physical DPRC in the MC. ++ * It scans the DPRC to discover the MC objects contained in it. ++ * It creates the interrupt pool for the MC bus associated with the DPRC. ++ * It configures the interrupts for the DPRC device itself. ++ */ ++static int dprc_probe(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ size_t region_size; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ bool mc_io_created = false; ++ bool dev_root_set = false; ++ ++ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) ++ return -EINVAL; ++ ++ if (mc_dev->mc_io) { ++ /* ++ * This is the root DPRC ++ */ ++ if (WARN_ON(fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ fsl_mc_bus_type.dev_root = &mc_dev->dev; ++ dev_root_set = true; ++ } else { ++ /* ++ * This is a child DPRC ++ */ ++ if (WARN_ON(!fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_dev->obj_desc.region_count == 0)) ++ return -EINVAL; ++ ++ region_size = mc_dev->regions[0].end - ++ mc_dev->regions[0].start + 1; ++ ++ error = fsl_create_mc_io(&mc_dev->dev, ++ mc_dev->regions[0].start, ++ region_size, ++ NULL, 0, &mc_dev->mc_io); ++ if (error < 0) ++ return error; ++ ++ mc_io_created = true; ++ } ++ ++ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, ++ &mc_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle, ++ &mc_bus->dprc_attr); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", ++ error); ++ goto error_cleanup_open; ++ } ++ ++ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || ++ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && ++ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { ++ dev_err(&mc_dev->dev, ++ "ERROR: DPRC version %d.%d not supported\n", ++ mc_bus->dprc_attr.version.major, ++ mc_bus->dprc_attr.version.minor); ++ error = -ENOTSUPP; ++ goto error_cleanup_open; ++ } ++ ++ if (fsl_mc_interrupts_supported()) { ++ /* ++ * Create DPMCP for the DPRC's built-in portal: ++ */ ++ error = dprc_create_dpmcp(mc_dev); ++ if (error < 0) ++ goto error_cleanup_open; ++ } ++ ++ mutex_init(&mc_bus->scan_mutex); ++ ++ /* ++ * Discover MC objects in the DPRC object: ++ */ ++ error = dprc_scan_container(mc_dev); ++ if (error < 0) ++ goto error_destroy_dpmcp; ++ ++ if (fsl_mc_interrupts_supported()) { ++ /* ++ * The fsl_mc_device object associated with the DPMCP object ++ * created above was created as part of the ++ * dprc_scan_container() call above: ++ */ ++ if (WARN_ON(!mc_dev->mc_io->dpmcp_dev)) { ++ error = -EINVAL; ++ goto error_cleanup_dprc_scan; ++ } ++ ++ /* ++ * Allocate MC portal to be used in atomic context ++ * (e.g., to program MSIs from program_msi_at_mc()) ++ */ ++ error = fsl_mc_portal_allocate(NULL, ++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, ++ &mc_bus->atomic_mc_io); ++ if (error < 0) ++ goto error_cleanup_dprc_scan; ++ ++ pr_info("fsl-mc: Allocated dpmcp.%d to dprc.%d for atomic MC I/O\n", ++ mc_bus->atomic_mc_io->dpmcp_dev->obj_desc.id, ++ mc_dev->obj_desc.id); ++ ++ /* ++ * Open DPRC handle to be used with mc_bus->atomic_mc_io: ++ */ ++ error = dprc_open(mc_bus->atomic_mc_io, 0, mc_dev->obj_desc.id, ++ &mc_bus->atomic_dprc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", ++ error); ++ goto error_cleanup_atomic_mc_io; ++ } ++ ++ /* ++ * Configure interrupt for the DPMCP object associated with the ++ * DPRC object's built-in portal: ++ * ++ * NOTE: We have to do this after calling dprc_scan_container(), ++ * since dprc_scan_container() populates the IRQ pool for ++ * this DPRC. ++ */ ++ error = fsl_mc_io_setup_dpmcp_irq(mc_dev->mc_io); ++ if (error < 0) ++ goto error_cleanup_atomic_dprc_handle; ++ ++ /* ++ * Configure interrupts for the DPRC object associated with ++ * this MC bus: ++ */ ++ error = dprc_setup_irqs(mc_dev); ++ if (error < 0) ++ goto error_cleanup_atomic_dprc_handle; ++ } ++ ++ dev_info(&mc_dev->dev, "DPRC device bound to driver"); ++ return 0; ++ ++error_cleanup_atomic_dprc_handle: ++ (void)dprc_close(mc_bus->atomic_mc_io, 0, mc_bus->atomic_dprc_handle); ++ ++error_cleanup_atomic_mc_io: ++ fsl_mc_portal_free(mc_bus->atomic_mc_io); ++ ++error_cleanup_dprc_scan: ++ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); ++ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_dev); ++ if (fsl_mc_interrupts_supported()) ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++error_destroy_dpmcp: ++ dprc_destroy_dpmcp(mc_dev); ++ ++error_cleanup_open: ++ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ ++error_cleanup_mc_io: ++ if (mc_io_created) { ++ fsl_destroy_mc_io(mc_dev->mc_io); ++ mc_dev->mc_io = NULL; ++ } ++ ++ if (dev_root_set) ++ fsl_mc_bus_type.dev_root = NULL; ++ ++ return error; ++} ++ ++/* ++ * Tear down interrupts for a given DPRC object ++ */ ++static void dprc_teardown_irqs(struct fsl_mc_device *mc_dev) ++{ ++ (void)disable_dprc_irqs(mc_dev); ++ unregister_dprc_irq_handlers(mc_dev); ++ fsl_mc_free_irqs(mc_dev); ++} ++ ++/** ++ * dprc_remove - callback invoked when a DPRC is being unbound from this driver ++ * ++ * @mc_dev: Pointer to fsl-mc device representing the DPRC ++ * ++ * It removes the DPRC's child objects from Linux (not from the MC) and ++ * closes the DPRC device in the MC. ++ * It tears down the interrupts that were configured for the DPRC device. ++ * It destroys the interrupt pool associated with this MC bus. ++ */ ++static int dprc_remove(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) ++ return -EINVAL; ++ if (WARN_ON(!mc_dev->mc_io)) ++ return -EINVAL; ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return -EINVAL; ++ ++ if (fsl_mc_interrupts_supported()) { ++ dprc_teardown_irqs(mc_dev); ++ error = dprc_close(mc_bus->atomic_mc_io, 0, ++ mc_bus->atomic_dprc_handle); ++ if (error < 0) { ++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", ++ error); ++ } ++ ++ fsl_mc_portal_free(mc_bus->atomic_mc_io); ++ } ++ ++ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); ++ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); ++ dprc_cleanup_all_resource_pools(mc_dev); ++ dprc_destroy_dpmcp(mc_dev); ++ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); ++ if (error < 0) ++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); ++ ++ if (fsl_mc_interrupts_supported()) ++ fsl_mc_cleanup_irq_pool(mc_bus); ++ ++ fsl_destroy_mc_io(mc_dev->mc_io); ++ mc_dev->mc_io = NULL; ++ ++ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) ++ fsl_mc_bus_type.dev_root = NULL; ++ ++ dev_info(&mc_dev->dev, "DPRC device unbound from driver"); ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dprc"}, ++ {.vendor = 0x0}, ++}; ++ ++static struct fsl_mc_driver dprc_driver = { ++ .driver = { ++ .name = FSL_MC_DPRC_DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ }, ++ .match_id_table = match_id_table, ++ .probe = dprc_probe, ++ .remove = dprc_remove, ++}; ++ ++int __init dprc_driver_init(void) ++{ ++ return fsl_mc_driver_register(&dprc_driver); ++} ++ ++void __exit dprc_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&dprc_driver); ++} +diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c +new file mode 100644 +index 0000000..4d86438 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/dprc.c +@@ -0,0 +1,1218 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++* ++* Redistribution and use in source and binary forms, with or without ++* modification, are permitted provided that the following conditions are met: ++* * Redistributions of source code must retain the above copyright ++* notice, this list of conditions and the following disclaimer. ++* * Redistributions in binary form must reproduce the above copyright ++* notice, this list of conditions and the following disclaimer in the ++* documentation and/or other materials provided with the distribution. ++* * Neither the name of the above-listed copyright holders nor the ++* names of any contributors may be used to endorse or promote products ++* derived from this software without specific prior written permission. ++* ++* ++* ALTERNATIVELY, this software may be distributed under the terms of the ++* GNU General Public License ("GPL") as published by the Free Software ++* Foundation, either version 2 of that License or (at your option) any ++* later version. ++* ++* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++* POSSIBILITY OF SUCH DAMAGE. ++*/ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dprc.h" ++#include "dprc-cmd.h" ++ ++int dprc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int container_id, ++ uint16_t *token) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, ++ 0); ++ cmd.params[0] |= mc_enc(0, 32, container_id); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_open); ++ ++int dprc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_close); ++ ++int dprc_create_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_cfg *cfg, ++ int *child_container_id, ++ uint64_t *child_portal_offset) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.params[0] |= mc_enc(32, 16, cfg->icid); ++ cmd.params[0] |= mc_enc(0, 32, cfg->options); ++ cmd.params[1] |= mc_enc(32, 32, cfg->portal_id); ++ cmd.params[2] |= mc_enc(0, 8, cfg->label[0]); ++ cmd.params[2] |= mc_enc(8, 8, cfg->label[1]); ++ cmd.params[2] |= mc_enc(16, 8, cfg->label[2]); ++ cmd.params[2] |= mc_enc(24, 8, cfg->label[3]); ++ cmd.params[2] |= mc_enc(32, 8, cfg->label[4]); ++ cmd.params[2] |= mc_enc(40, 8, cfg->label[5]); ++ cmd.params[2] |= mc_enc(48, 8, cfg->label[6]); ++ cmd.params[2] |= mc_enc(56, 8, cfg->label[7]); ++ cmd.params[3] |= mc_enc(0, 8, cfg->label[8]); ++ cmd.params[3] |= mc_enc(8, 8, cfg->label[9]); ++ cmd.params[3] |= mc_enc(16, 8, cfg->label[10]); ++ cmd.params[3] |= mc_enc(24, 8, cfg->label[11]); ++ cmd.params[3] |= mc_enc(32, 8, cfg->label[12]); ++ cmd.params[3] |= mc_enc(40, 8, cfg->label[13]); ++ cmd.params[3] |= mc_enc(48, 8, cfg->label[14]); ++ cmd.params[3] |= mc_enc(56, 8, cfg->label[15]); ++ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *child_container_id = mc_dec(cmd.params[1], 0, 32); ++ *child_portal_offset = mc_dec(cmd.params[2], 0, 64); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_create_container); ++ ++int dprc_destroy_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_destroy_container); ++ ++int dprc_reset_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_reset_container); ++ ++int dprc_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32); ++ *type = mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq); ++ ++int dprc_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq); ++ ++int dprc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *en = mc_dec(cmd.params[0], 0, 8); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_enable); ++ ++int dprc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 8, en); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq_enable); ++ ++int dprc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *mask = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_mask); ++ ++int dprc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, mask); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_irq_mask); ++ ++int dprc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, *status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *status = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_irq_status); ++ ++int dprc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, status); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_clear_irq_status); ++ ++int dprc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_attributes *attr) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ attr->container_id = mc_dec(cmd.params[0], 0, 32); ++ attr->icid = mc_dec(cmd.params[0], 32, 16); ++ attr->options = mc_dec(cmd.params[1], 0, 32); ++ attr->portal_id = mc_dec(cmd.params[1], 32, 32); ++ attr->version.major = mc_dec(cmd.params[2], 0, 16); ++ attr->version.minor = mc_dec(cmd.params[2], 16, 16); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_attributes); ++ ++int dprc_set_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t quota) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[0] |= mc_enc(32, 16, quota); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_res_quota); ++ ++int dprc_get_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t *quota) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *quota = mc_dec(cmd.params[0], 32, 16); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_quota); ++ ++int dprc_assign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int container_id, ++ struct dprc_res_req *res_req) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, container_id); ++ cmd.params[0] |= mc_enc(32, 32, res_req->options); ++ cmd.params[1] |= mc_enc(0, 32, res_req->num); ++ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); ++ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_assign); ++ ++int dprc_unassign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ struct dprc_res_req *res_req) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, child_container_id); ++ cmd.params[0] |= mc_enc(32, 32, res_req->options); ++ cmd.params[1] |= mc_enc(0, 32, res_req->num); ++ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); ++ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_unassign); ++ ++int dprc_get_pool_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *pool_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *pool_count = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_pool_count); ++ ++int dprc_get_pool(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int pool_index, ++ char *type) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, pool_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ type[0] = mc_dec(cmd.params[1], 0, 8); ++ type[1] = mc_dec(cmd.params[1], 8, 8); ++ type[2] = mc_dec(cmd.params[1], 16, 8); ++ type[3] = mc_dec(cmd.params[1], 24, 8); ++ type[4] = mc_dec(cmd.params[1], 32, 8); ++ type[5] = mc_dec(cmd.params[1], 40, 8); ++ type[6] = mc_dec(cmd.params[1], 48, 8); ++ type[7] = mc_dec(cmd.params[1], 56, 8); ++ type[8] = mc_dec(cmd.params[2], 0, 8); ++ type[9] = mc_dec(cmd.params[2], 8, 8); ++ type[10] = mc_dec(cmd.params[2], 16, 8); ++ type[11] = mc_dec(cmd.params[2], 24, 8); ++ type[12] = mc_dec(cmd.params[2], 32, 8); ++ type[13] = mc_dec(cmd.params[2], 40, 8); ++ type[14] = mc_dec(cmd.params[2], 48, 8); ++ type[15] = '\0'; ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_pool); ++ ++int dprc_get_obj_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *obj_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, ++ cmd_flags, ++ token); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *obj_count = mc_dec(cmd.params[0], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_count); ++ ++int dprc_get_obj(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int obj_index, ++ struct dprc_obj_desc *obj_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_index); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ obj_desc->id = mc_dec(cmd.params[0], 32, 32); ++ obj_desc->vendor = mc_dec(cmd.params[1], 0, 16); ++ obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8); ++ obj_desc->region_count = mc_dec(cmd.params[1], 24, 8); ++ obj_desc->state = mc_dec(cmd.params[1], 32, 32); ++ obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); ++ obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); ++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); ++ obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); ++ obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); ++ obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); ++ obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8); ++ obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8); ++ obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8); ++ obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8); ++ obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8); ++ obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8); ++ obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8); ++ obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8); ++ obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8); ++ obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8); ++ obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8); ++ obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8); ++ obj_desc->type[15] = '\0'; ++ obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8); ++ obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8); ++ obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8); ++ obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8); ++ obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8); ++ obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8); ++ obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8); ++ obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8); ++ obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8); ++ obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8); ++ obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8); ++ obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8); ++ obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8); ++ obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8); ++ obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8); ++ obj_desc->label[15] = '\0'; ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj); ++ ++int dprc_get_obj_desc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ struct dprc_obj_desc *obj_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32); ++ obj_desc->vendor = (uint16_t)mc_dec(cmd.params[1], 0, 16); ++ obj_desc->vendor = (uint8_t)mc_dec(cmd.params[1], 16, 8); ++ obj_desc->region_count = (uint8_t)mc_dec(cmd.params[1], 24, 8); ++ obj_desc->state = (uint32_t)mc_dec(cmd.params[1], 32, 32); ++ obj_desc->ver_major = (uint16_t)mc_dec(cmd.params[2], 0, 16); ++ obj_desc->ver_minor = (uint16_t)mc_dec(cmd.params[2], 16, 16); ++ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); ++ obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); ++ obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); ++ obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); ++ obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8); ++ obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8); ++ obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8); ++ obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8); ++ obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8); ++ obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8); ++ obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8); ++ obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8); ++ obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8); ++ obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8); ++ obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8); ++ obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8); ++ obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8); ++ obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8); ++ obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8); ++ obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8); ++ obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8); ++ obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8); ++ obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8); ++ obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8); ++ obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8); ++ obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8); ++ obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8); ++ obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8); ++ obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8); ++ obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8); ++ obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8); ++ obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8); ++ obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_desc); ++ ++int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); ++ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); ++ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); ++ cmd.params[2] |= mc_enc(32, 32, obj_id); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_obj_irq); ++ ++int dprc_get_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[0] |= mc_enc(32, 8, irq_index); ++ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); ++ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); ++ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); ++ *type = (int)mc_dec(cmd.params[2], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_irq); ++ ++int dprc_get_res_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ int *res_count) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ *res_count = 0; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, ++ cmd_flags, ++ token); ++ cmd.params[1] |= mc_enc(0, 8, type[0]); ++ cmd.params[1] |= mc_enc(8, 8, type[1]); ++ cmd.params[1] |= mc_enc(16, 8, type[2]); ++ cmd.params[1] |= mc_enc(24, 8, type[3]); ++ cmd.params[1] |= mc_enc(32, 8, type[4]); ++ cmd.params[1] |= mc_enc(40, 8, type[5]); ++ cmd.params[1] |= mc_enc(48, 8, type[6]); ++ cmd.params[1] |= mc_enc(56, 8, type[7]); ++ cmd.params[2] |= mc_enc(0, 8, type[8]); ++ cmd.params[2] |= mc_enc(8, 8, type[9]); ++ cmd.params[2] |= mc_enc(16, 8, type[10]); ++ cmd.params[2] |= mc_enc(24, 8, type[11]); ++ cmd.params[2] |= mc_enc(32, 8, type[12]); ++ cmd.params[2] |= mc_enc(40, 8, type[13]); ++ cmd.params[2] |= mc_enc(48, 8, type[14]); ++ cmd.params[2] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ *res_count = mc_dec(cmd.params[0], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_count); ++ ++int dprc_get_res_ids(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ struct dprc_res_ids_range_desc *range_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status); ++ cmd.params[1] |= mc_enc(0, 32, range_desc->base_id); ++ cmd.params[1] |= mc_enc(32, 32, range_desc->last_id); ++ cmd.params[2] |= mc_enc(0, 8, type[0]); ++ cmd.params[2] |= mc_enc(8, 8, type[1]); ++ cmd.params[2] |= mc_enc(16, 8, type[2]); ++ cmd.params[2] |= mc_enc(24, 8, type[3]); ++ cmd.params[2] |= mc_enc(32, 8, type[4]); ++ cmd.params[2] |= mc_enc(40, 8, type[5]); ++ cmd.params[2] |= mc_enc(48, 8, type[6]); ++ cmd.params[2] |= mc_enc(56, 8, type[7]); ++ cmd.params[3] |= mc_enc(0, 8, type[8]); ++ cmd.params[3] |= mc_enc(8, 8, type[9]); ++ cmd.params[3] |= mc_enc(16, 8, type[10]); ++ cmd.params[3] |= mc_enc(24, 8, type[11]); ++ cmd.params[3] |= mc_enc(32, 8, type[12]); ++ cmd.params[3] |= mc_enc(40, 8, type[13]); ++ cmd.params[3] |= mc_enc(48, 8, type[14]); ++ cmd.params[3] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ range_desc->iter_status = mc_dec(cmd.params[0], 42, 7); ++ range_desc->base_id = mc_dec(cmd.params[1], 0, 32); ++ range_desc->last_id = mc_dec(cmd.params[1], 32, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_res_ids); ++ ++int dprc_get_obj_region(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t region_index, ++ struct dprc_region_desc *region_desc) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[0] |= mc_enc(48, 8, region_index); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, '\0'); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ region_desc->base_offset = mc_dec(cmd.params[1], 0, 64); ++ region_desc->size = mc_dec(cmd.params[2], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_obj_region); ++ ++int dprc_set_obj_label(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ char *label) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, ++ cmd_flags, ++ token); ++ ++ cmd.params[0] |= mc_enc(0, 32, obj_id); ++ cmd.params[1] |= mc_enc(0, 8, label[0]); ++ cmd.params[1] |= mc_enc(8, 8, label[1]); ++ cmd.params[1] |= mc_enc(16, 8, label[2]); ++ cmd.params[1] |= mc_enc(24, 8, label[3]); ++ cmd.params[1] |= mc_enc(32, 8, label[4]); ++ cmd.params[1] |= mc_enc(40, 8, label[5]); ++ cmd.params[1] |= mc_enc(48, 8, label[6]); ++ cmd.params[1] |= mc_enc(56, 8, label[7]); ++ cmd.params[2] |= mc_enc(0, 8, label[8]); ++ cmd.params[2] |= mc_enc(8, 8, label[9]); ++ cmd.params[2] |= mc_enc(16, 8, label[10]); ++ cmd.params[2] |= mc_enc(24, 8, label[11]); ++ cmd.params[2] |= mc_enc(32, 8, label[12]); ++ cmd.params[2] |= mc_enc(40, 8, label[13]); ++ cmd.params[2] |= mc_enc(48, 8, label[14]); ++ cmd.params[2] |= mc_enc(56, 8, label[15]); ++ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); ++ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); ++ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); ++ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); ++ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); ++ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); ++ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); ++ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); ++ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); ++ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); ++ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); ++ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); ++ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); ++ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); ++ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); ++ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_set_obj_label); ++ ++int dprc_connect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ const struct dprc_endpoint *endpoint2, ++ const struct dprc_connection_cfg *cfg) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); ++ cmd.params[1] |= mc_enc(0, 32, endpoint2->id); ++ cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id); ++ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]); ++ cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]); ++ cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]); ++ cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]); ++ cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]); ++ cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]); ++ cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]); ++ cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]); ++ cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]); ++ cmd.params[4] |= mc_enc(0, 32, cfg->max_rate); ++ cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate); ++ cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]); ++ cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]); ++ cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]); ++ cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]); ++ cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]); ++ cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]); ++ cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]); ++ cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]); ++ cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]); ++ cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]); ++ cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]); ++ cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]); ++ cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]); ++ cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]); ++ cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]); ++ cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_connect); ++ ++int dprc_disconnect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint) ++{ ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint->if_id); ++ cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]); ++ cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]); ++ cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]); ++ cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]); ++ cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]); ++ cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]); ++ cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]); ++ cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]); ++ cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++EXPORT_SYMBOL(dprc_disconnect); ++ ++int dprc_get_connection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ struct dprc_endpoint *endpoint2, ++ int *state) ++{ ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, ++ cmd_flags, ++ token); ++ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); ++ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); ++ cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]); ++ cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]); ++ cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]); ++ cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]); ++ cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]); ++ cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]); ++ cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]); ++ cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]); ++ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]); ++ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]); ++ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]); ++ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]); ++ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]); ++ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]); ++ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]); ++ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]); ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ /* retrieve response parameters */ ++ endpoint2->id = mc_dec(cmd.params[3], 0, 32); ++ endpoint2->if_id = mc_dec(cmd.params[3], 32, 32); ++ endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8); ++ endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8); ++ endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8); ++ endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8); ++ endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8); ++ endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8); ++ endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8); ++ endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8); ++ endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8); ++ endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8); ++ endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8); ++ endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8); ++ endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8); ++ endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8); ++ endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8); ++ endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8); ++ *state = mc_dec(cmd.params[6], 0, 32); ++ ++ return 0; ++} ++EXPORT_SYMBOL(dprc_get_connection); +diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c +new file mode 100644 +index 0000000..a3940a0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-allocator.c +@@ -0,0 +1,716 @@ ++/* ++ * Freescale MC object device allocator driver ++ * ++ * Copyright (C) 2013 Freescale Semiconductor, Inc. ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include "../include/mc-sys.h" ++#include ++#include "../include/dpbp-cmd.h" ++#include "../include/dpcon-cmd.h" ++#include "dpmcp-cmd.h" ++#include "dpmcp.h" ++ ++/** ++ * fsl_mc_resource_pool_add_device - add allocatable device to a resource ++ * pool of a given MC bus ++ * ++ * @mc_bus: pointer to the MC bus ++ * @pool_type: MC bus pool type ++ * @mc_dev: Pointer to allocatable MC object device ++ * ++ * It adds an allocatable MC object device to a container's resource pool of ++ * the given resource type ++ */ ++static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus ++ *mc_bus, ++ enum fsl_mc_pool_type ++ pool_type, ++ struct fsl_mc_device ++ *mc_dev) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) ++ goto out; ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto out; ++ if (WARN_ON(mc_dev->resource)) ++ goto out; ++ ++ res_pool = &mc_bus->resource_pools[pool_type]; ++ if (WARN_ON(res_pool->type != pool_type)) ++ goto out; ++ if (WARN_ON(res_pool->mc_bus != mc_bus)) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ ++ if (WARN_ON(res_pool->max_count < 0)) ++ goto out; ++ if (WARN_ON(res_pool->free_count < 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto out; ++ ++ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), ++ GFP_KERNEL); ++ if (!resource) { ++ error = -ENOMEM; ++ dev_err(&mc_bus_dev->dev, ++ "Failed to allocate memory for fsl_mc_resource\n"); ++ goto out; ++ } ++ ++ resource->type = pool_type; ++ resource->id = mc_dev->obj_desc.id; ++ resource->data = mc_dev; ++ resource->parent_pool = res_pool; ++ INIT_LIST_HEAD(&resource->node); ++ list_add_tail(&resource->node, &res_pool->free_list); ++ mc_dev->resource = resource; ++ res_pool->free_count++; ++ res_pool->max_count++; ++ error = 0; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a ++ * resource pool ++ * ++ * @mc_dev: Pointer to allocatable MC object device ++ * ++ * It permanently removes an allocatable MC object device from the resource ++ * pool, the device is currently in, as long as it is in the pool's free list. ++ */ ++static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device ++ *mc_dev) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto out; ++ ++ resource = mc_dev->resource; ++ if (WARN_ON(!resource || resource->data != mc_dev)) ++ goto out; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ res_pool = resource->parent_pool; ++ if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type])) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ ++ if (WARN_ON(res_pool->max_count <= 0)) ++ goto out; ++ if (WARN_ON(res_pool->free_count <= 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto out; ++ ++ /* ++ * If the device is currently allocated, its resource is not ++ * in the free list and thus, the device cannot be removed. ++ */ ++ if (list_empty(&resource->node)) { ++ error = -EBUSY; ++ dev_err(&mc_bus_dev->dev, ++ "Device %s cannot be removed from resource pool\n", ++ dev_name(&mc_dev->dev)); ++ goto out; ++ } ++ ++ list_del(&resource->node); ++ INIT_LIST_HEAD(&resource->node); ++ res_pool->free_count--; ++ res_pool->max_count--; ++ ++ devm_kfree(&mc_bus_dev->dev, resource); ++ mc_dev->resource = NULL; ++ error = 0; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++ ++static const char *const fsl_mc_pool_type_strings[] = { ++ [FSL_MC_POOL_DPMCP] = "dpmcp", ++ [FSL_MC_POOL_DPBP] = "dpbp", ++ [FSL_MC_POOL_DPCON] = "dpcon", ++ [FSL_MC_POOL_IRQ] = "irq", ++}; ++ ++static int __must_check object_type_to_pool_type(const char *object_type, ++ enum fsl_mc_pool_type ++ *pool_type) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) { ++ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) { ++ *pool_type = i; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_resource **new_resource) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc_resource *resource; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ int error = -EINVAL; ++ bool mutex_locked = false; ++ ++ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != ++ FSL_MC_NUM_POOL_TYPES); ++ ++ *new_resource = NULL; ++ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) ++ goto error; ++ ++ res_pool = &mc_bus->resource_pools[pool_type]; ++ if (WARN_ON(res_pool->mc_bus != mc_bus)) ++ goto error; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ resource = list_first_entry_or_null(&res_pool->free_list, ++ struct fsl_mc_resource, node); ++ ++ if (!resource) { ++ WARN_ON(res_pool->free_count != 0); ++ error = -ENXIO; ++ dev_err(&mc_bus_dev->dev, ++ "No more resources of type %s left\n", ++ fsl_mc_pool_type_strings[pool_type]); ++ goto error; ++ } ++ ++ if (WARN_ON(resource->type != pool_type)) ++ goto error; ++ if (WARN_ON(resource->parent_pool != res_pool)) ++ goto error; ++ if (WARN_ON(res_pool->free_count <= 0 || ++ res_pool->free_count > res_pool->max_count)) ++ goto error; ++ ++ list_del(&resource->node); ++ INIT_LIST_HEAD(&resource->node); ++ ++ res_pool->free_count--; ++ mutex_unlock(&res_pool->mutex); ++ *new_resource = resource; ++ return 0; ++error: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); ++ ++void fsl_mc_resource_free(struct fsl_mc_resource *resource) ++{ ++ struct fsl_mc_resource_pool *res_pool; ++ bool mutex_locked = false; ++ ++ res_pool = resource->parent_pool; ++ if (WARN_ON(resource->type != res_pool->type)) ++ goto out; ++ ++ mutex_lock(&res_pool->mutex); ++ mutex_locked = true; ++ if (WARN_ON(res_pool->free_count < 0 || ++ res_pool->free_count >= res_pool->max_count)) ++ goto out; ++ ++ if (WARN_ON(!list_empty(&resource->node))) ++ goto out; ++ ++ list_add_tail(&resource->node, &res_pool->free_list); ++ res_pool->free_count++; ++out: ++ if (mutex_locked) ++ mutex_unlock(&res_pool->mutex); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_resource_free); ++ ++/** ++ * fsl_mc_portal_allocate - Allocates an MC portal ++ * ++ * @mc_dev: MC device for which the MC portal is to be allocated ++ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated ++ * MC portal. ++ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object ++ * that wraps the allocated MC portal is to be returned ++ * ++ * This function allocates an MC portal from the device's parent DPRC, ++ * from the corresponding MC bus' pool of MC portals and wraps ++ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the ++ * portal is allocated from its own MC bus. ++ */ ++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, ++ uint16_t mc_io_flags, ++ struct fsl_mc_io **new_mc_io) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ phys_addr_t mc_portal_phys_addr; ++ size_t mc_portal_size; ++ struct fsl_mc_device *dpmcp_dev; ++ int error = -EINVAL; ++ struct fsl_mc_resource *resource = NULL; ++ struct fsl_mc_io *mc_io = NULL; ++ ++ if (!mc_dev) { ++ if (WARN_ON(!fsl_mc_bus_type.dev_root)) ++ return error; ++ ++ mc_bus_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ } else if (mc_dev->flags & FSL_MC_IS_DPRC) { ++ mc_bus_dev = mc_dev; ++ } else { ++ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) ++ return error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ } ++ ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ *new_mc_io = NULL; ++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource); ++ if (error < 0) ++ return error; ++ ++ error = -EINVAL; ++ dpmcp_dev = resource->data; ++ if (WARN_ON(!dpmcp_dev || ++ strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) ++ goto error_cleanup_resource; ++ ++ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR || ++ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR && ++ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) { ++ dev_err(&dpmcp_dev->dev, ++ "ERROR: Version %d.%d of DPMCP not supported.\n", ++ dpmcp_dev->obj_desc.ver_major, ++ dpmcp_dev->obj_desc.ver_minor); ++ error = -ENOTSUPP; ++ goto error_cleanup_resource; ++ } ++ ++ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) ++ goto error_cleanup_resource; ++ ++ mc_portal_phys_addr = dpmcp_dev->regions[0].start; ++ mc_portal_size = dpmcp_dev->regions[0].end - ++ dpmcp_dev->regions[0].start + 1; ++ ++ if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) ++ goto error_cleanup_resource; ++ ++ error = fsl_create_mc_io(&mc_bus_dev->dev, ++ mc_portal_phys_addr, ++ mc_portal_size, dpmcp_dev, ++ mc_io_flags, &mc_io); ++ if (error < 0) ++ goto error_cleanup_resource; ++ ++ *new_mc_io = mc_io; ++ return 0; ++ ++error_cleanup_resource: ++ fsl_mc_resource_free(resource); ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate); ++ ++/** ++ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals ++ * of a given MC bus ++ * ++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free ++ */ ++void fsl_mc_portal_free(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev; ++ struct fsl_mc_resource *resource; ++ ++ /* ++ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed ++ * to have a DPMCP object associated with. ++ */ ++ dpmcp_dev = mc_io->dpmcp_dev; ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ if (WARN_ON(strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) ++ return; ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return; ++ ++ resource = dpmcp_dev->resource; ++ if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP)) ++ return; ++ ++ if (WARN_ON(resource->data != dpmcp_dev)) ++ return; ++ ++ fsl_destroy_mc_io(mc_io); ++ fsl_mc_resource_free(resource); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_free); ++ ++/** ++ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object ++ * ++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free ++ */ ++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); ++ return error; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_portal_reset); ++ ++/** ++ * fsl_mc_object_allocate - Allocates a MC object device of the given ++ * pool type from a given MC bus ++ * ++ * @mc_dev: MC device for which the MC object device is to be allocated ++ * @pool_type: MC bus resource pool type ++ * @new_mc_dev: Pointer to area where the pointer to the allocated ++ * MC object device is to be returned ++ * ++ * This function allocates a MC object device from the device's parent DPRC, ++ * from the corresponding MC bus' pool of allocatable MC object devices of ++ * the given resource type. mc_dev cannot be a DPRC itself. ++ * ++ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC ++ * portals are allocated using fsl_mc_portal_allocate(), instead of ++ * this function. ++ */ ++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_device **new_mc_adev) ++{ ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_device *mc_adev; ++ int error = -EINVAL; ++ struct fsl_mc_resource *resource = NULL; ++ ++ *new_mc_adev = NULL; ++ if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC)) ++ goto error; ++ ++ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) ++ goto error; ++ ++ if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP)) ++ goto error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource); ++ if (error < 0) ++ goto error; ++ ++ mc_adev = resource->data; ++ if (WARN_ON(!mc_adev)) ++ goto error; ++ ++ *new_mc_adev = mc_adev; ++ return 0; ++error: ++ if (resource) ++ fsl_mc_resource_free(resource); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); ++ ++/** ++ * fsl_mc_object_free - Returns an allocatable MC object device to the ++ * corresponding resource pool of a given MC bus. ++ * ++ * @mc_adev: Pointer to the MC object device ++ */ ++void fsl_mc_object_free(struct fsl_mc_device *mc_adev) ++{ ++ struct fsl_mc_resource *resource; ++ ++ resource = mc_adev->resource; ++ if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP)) ++ return; ++ if (WARN_ON(resource->data != mc_adev)) ++ return; ++ ++ fsl_mc_resource_free(resource); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_object_free); ++ ++/** ++ * It allocates the IRQs required by a given MC object device. The ++ * IRQs are allocated from the interrupt pool associated with the ++ * MC bus that contains the device, if the device is not a DPRC device. ++ * Otherwise, the IRQs are allocated from the interrupt pool associated ++ * with the MC bus that represents the DPRC device itself. ++ */ ++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int irq_count; ++ int res_allocated_count = 0; ++ int error = -EINVAL; ++ struct fsl_mc_device_irq **irqs = NULL; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_resource_pool *res_pool; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ if (!mc->gic_supported) ++ return -ENOTSUPP; ++ ++ if (WARN_ON(mc_dev->irqs)) ++ goto error; ++ ++ irq_count = mc_dev->obj_desc.irq_count; ++ if (WARN_ON(irq_count == 0)) ++ goto error; ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ else ++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ goto error; ++ ++ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ if (res_pool->free_count < irq_count) { ++ dev_err(&mc_dev->dev, ++ "Not able to allocate %u irqs for device\n", irq_count); ++ error = -ENOSPC; ++ goto error; ++ } ++ ++ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), ++ GFP_KERNEL); ++ if (!irqs) { ++ error = -ENOMEM; ++ dev_err(&mc_dev->dev, "No memory to allocate irqs[]\n"); ++ goto error; ++ } ++ ++ for (i = 0; i < irq_count; i++) { ++ struct fsl_mc_resource *resource; ++ ++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, ++ &resource); ++ if (error < 0) ++ goto error; ++ ++ irqs[i] = to_fsl_mc_irq(resource); ++ res_allocated_count++; ++ ++ WARN_ON(irqs[i]->mc_dev); ++ irqs[i]->mc_dev = mc_dev; ++ irqs[i]->dev_irq_index = i; ++ } ++ ++ mc_dev->irqs = irqs; ++ return 0; ++error: ++ for (i = 0; i < res_allocated_count; i++) { ++ irqs[i]->mc_dev = NULL; ++ fsl_mc_resource_free(&irqs[i]->resource); ++ } ++ ++ if (irqs) ++ devm_kfree(&mc_dev->dev, irqs); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); ++ ++/* ++ * It frees the IRQs that were allocated for a MC object device, by ++ * returning them to the corresponding interrupt pool. ++ */ ++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) ++{ ++ int i; ++ int irq_count; ++ struct fsl_mc_bus *mc_bus; ++ struct fsl_mc_device_irq **irqs = mc_dev->irqs; ++ ++ if (WARN_ON(!irqs)) ++ return; ++ ++ irq_count = mc_dev->obj_desc.irq_count; ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ else ++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return; ++ ++ for (i = 0; i < irq_count; i++) { ++ WARN_ON(!irqs[i]->mc_dev); ++ irqs[i]->mc_dev = NULL; ++ fsl_mc_resource_free(&irqs[i]->resource); ++ } ++ ++ devm_kfree(&mc_dev->dev, mc_dev->irqs); ++ mc_dev->irqs = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); ++ ++/** ++ * fsl_mc_allocator_probe - callback invoked when an allocatable device is ++ * being added to the system ++ */ ++static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) ++{ ++ enum fsl_mc_pool_type pool_type; ++ struct fsl_mc_device *mc_bus_dev; ++ struct fsl_mc_bus *mc_bus; ++ int error = -EINVAL; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ goto error; ++ ++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); ++ if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type)) ++ goto error; ++ ++ mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ ++ /* ++ * If mc_dev is the DPMCP object for the parent DPRC's built-in ++ * portal, we don't add this DPMCP to the DPMCP object pool, ++ * but instead allocate it directly to the parent DPRC (mc_bus_dev): ++ */ ++ if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0 && ++ mc_dev->obj_desc.id == mc_bus->dprc_attr.portal_id) { ++ error = fsl_mc_io_set_dpmcp(mc_bus_dev->mc_io, mc_dev); ++ if (error < 0) ++ goto error; ++ } else { ++ error = object_type_to_pool_type(mc_dev->obj_desc.type, ++ &pool_type); ++ if (error < 0) ++ goto error; ++ ++ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, ++ mc_dev); ++ if (error < 0) ++ goto error; ++ } ++ ++ dev_dbg(&mc_dev->dev, ++ "Allocatable MC object device bound to fsl_mc_allocator driver"); ++ return 0; ++error: ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_allocator_remove - callback invoked when an allocatable device is ++ * being removed from the system ++ */ ++static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) ++{ ++ int error; ++ ++ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) ++ return -EINVAL; ++ ++ if (mc_dev->resource) { ++ error = fsl_mc_resource_pool_remove_device(mc_dev); ++ if (error < 0) ++ return error; ++ } ++ ++ dev_dbg(&mc_dev->dev, ++ "Allocatable MC object device unbound from fsl_mc_allocator driver"); ++ return 0; ++} ++ ++static const struct fsl_mc_device_match_id match_id_table[] = { ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpbp", ++ }, ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpmcp", ++ }, ++ { ++ .vendor = FSL_MC_VENDOR_FREESCALE, ++ .obj_type = "dpcon", ++ }, ++ {.vendor = 0x0}, ++}; ++ ++static struct fsl_mc_driver fsl_mc_allocator_driver = { ++ .driver = { ++ .name = "fsl_mc_allocator", ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ }, ++ .match_id_table = match_id_table, ++ .probe = fsl_mc_allocator_probe, ++ .remove = fsl_mc_allocator_remove, ++}; ++ ++int __init fsl_mc_allocator_driver_init(void) ++{ ++ return fsl_mc_driver_register(&fsl_mc_allocator_driver); ++} ++ ++void __exit fsl_mc_allocator_driver_exit(void) ++{ ++ fsl_mc_driver_unregister(&fsl_mc_allocator_driver); ++} +diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c +new file mode 100644 +index 0000000..f173b35 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-bus.c +@@ -0,0 +1,1347 @@ ++/* ++ * Freescale Management Complex (MC) bus driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../include/dpmng.h" ++#include "../include/mc-sys.h" ++#include "dprc-cmd.h" ++ ++/* ++ * IOMMU stream ID flags ++ */ ++#define STREAM_ID_PL_MASK BIT(9) /* privilege level */ ++#define STREAM_ID_BMT_MASK BIT(8) /* bypass memory translation */ ++#define STREAM_ID_VA_MASK BIT(7) /* virtual address translation ++ * (two-stage translation) */ ++#define STREAM_ID_ICID_MASK (BIT(7) - 1) /* isolation context ID ++ * (translation context) */ ++ ++#define MAX_STREAM_ID_ICID STREAM_ID_ICID_MASK ++ ++static struct kmem_cache *mc_dev_cache; ++ ++/** ++ * fsl_mc_bus_match - device to driver matching callback ++ * @dev: the MC object device structure to match against ++ * @drv: the device driver to search for matching MC object device id ++ * structures ++ * ++ * Returns 1 on success, 0 otherwise. ++ */ ++static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) ++{ ++ const struct fsl_mc_device_match_id *id; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); ++ bool found = false; ++ ++ /* When driver_override is set, only bind to the matching driver */ ++ if (mc_dev->driver_override) { ++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); ++ goto out; ++ } ++ ++ if (!mc_drv->match_id_table) ++ goto out; ++ ++ /* ++ * If the object is not 'plugged' don't match. ++ * Only exception is the root DPRC, which is a special case. ++ * ++ * NOTE: Only when this function is invoked for the root DPRC, ++ * mc_dev->mc_io is not NULL ++ */ ++ if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 && ++ !mc_dev->mc_io) ++ goto out; ++ ++ /* ++ * Traverse the match_id table of the given driver, trying to find ++ * a matching for the given MC object device. ++ */ ++ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { ++ if (id->vendor == mc_dev->obj_desc.vendor && ++ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { ++ found = true; ++ ++ break; ++ } ++ } ++ ++out: ++ dev_dbg(dev, "%smatched\n", found ? "" : "not "); ++ return found; ++} ++ ++/** ++ * fsl_mc_bus_uevent - callback invoked when a device is added ++ */ ++static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ++{ ++ pr_debug("%s invoked\n", __func__); ++ return 0; ++} ++ ++static ssize_t driver_override_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ const char *driver_override, *old = mc_dev->driver_override; ++ char *cp; ++ ++ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) ++ return -EINVAL; ++ ++ if (count > PATH_MAX) ++ return -EINVAL; ++ ++ driver_override = kstrndup(buf, count, GFP_KERNEL); ++ if (!driver_override) ++ return -ENOMEM; ++ ++ cp = strchr(driver_override, '\n'); ++ if (cp) ++ *cp = '\0'; ++ ++ if (strlen(driver_override)) { ++ mc_dev->driver_override = driver_override; ++ } else { ++ kfree(driver_override); ++ mc_dev->driver_override = NULL; ++ } ++ ++ kfree(old); ++ ++ return count; ++} ++ ++static ssize_t driver_override_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ return sprintf(buf, "%s\n", mc_dev->driver_override); ++} ++ ++static DEVICE_ATTR_RW(driver_override); ++ ++static ssize_t rescan_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (!is_root_dprc(dev)) ++ return -EINVAL; ++ ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) { ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return count; ++} ++ ++static DEVICE_ATTR_WO(rescan); ++ ++static struct attribute *fsl_mc_dev_attrs[] = { ++ &dev_attr_driver_override.attr, ++ &dev_attr_rescan.attr, ++ NULL, ++}; ++ ++static const struct attribute_group fsl_mc_dev_group = { ++ .attrs = fsl_mc_dev_attrs, ++}; ++ ++static const struct attribute_group *fsl_mc_dev_groups[] = { ++ &fsl_mc_dev_group, ++ NULL, ++}; ++ ++static int scan_fsl_mc_bus(struct device *dev, void *data) ++{ ++ unsigned int irq_count; ++ struct fsl_mc_device *root_mc_dev; ++ struct fsl_mc_bus *root_mc_bus; ++ ++ if (is_root_dprc(dev)) { ++ root_mc_dev = to_fsl_mc_device(dev); ++ root_mc_bus = to_fsl_mc_bus(root_mc_dev); ++ mutex_lock(&root_mc_bus->scan_mutex); ++ dprc_scan_objects(root_mc_dev, NULL, &irq_count); ++ mutex_unlock(&root_mc_bus->scan_mutex); ++ } ++ ++ return 0; ++} ++ ++static ssize_t bus_rescan_store(struct bus_type *bus, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ ++ if (kstrtoul(buf, 0, &val) < 0) ++ return -EINVAL; ++ ++ if (val) ++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); ++ ++ return count; ++} ++static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); ++ ++static struct attribute *fsl_mc_bus_attrs[] = { ++ &bus_attr_rescan.attr, ++ NULL, ++}; ++ ++static const struct attribute_group fsl_mc_bus_group = { ++ .attrs = fsl_mc_bus_attrs, ++}; ++ ++static const struct attribute_group *fsl_mc_bus_groups[] = { ++ &fsl_mc_bus_group, ++ NULL, ++}; ++ ++struct bus_type fsl_mc_bus_type = { ++ .name = "fsl-mc", ++ .match = fsl_mc_bus_match, ++ .uevent = fsl_mc_bus_uevent, ++ .dev_groups = fsl_mc_dev_groups, ++ .bus_groups = fsl_mc_bus_groups, ++}; ++EXPORT_SYMBOL_GPL(fsl_mc_bus_type); ++ ++static int fsl_mc_driver_probe(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv; ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ int error; ++ ++ if (WARN_ON(!dev->driver)) ++ return -EINVAL; ++ ++ mc_drv = to_fsl_mc_driver(dev->driver); ++ if (WARN_ON(!mc_drv->probe)) ++ return -EINVAL; ++ ++ error = mc_drv->probe(mc_dev); ++ if (error < 0) { ++ dev_err(dev, "MC object device probe callback failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static int fsl_mc_driver_remove(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ int error; ++ ++ if (WARN_ON(!dev->driver)) ++ return -EINVAL; ++ ++ error = mc_drv->remove(mc_dev); ++ if (error < 0) { ++ dev_err(dev, ++ "MC object device remove callback failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static void fsl_mc_driver_shutdown(struct device *dev) ++{ ++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); ++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); ++ ++ mc_drv->shutdown(mc_dev); ++} ++ ++/** ++ * __fsl_mc_driver_register - registers a child device driver with the ++ * MC bus ++ * ++ * This function is implicitly invoked from the registration function of ++ * fsl_mc device drivers, which is generated by the ++ * module_fsl_mc_driver() macro. ++ */ ++int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver, ++ struct module *owner) ++{ ++ int error; ++ ++ mc_driver->driver.owner = owner; ++ mc_driver->driver.bus = &fsl_mc_bus_type; ++ ++ if (mc_driver->probe) ++ mc_driver->driver.probe = fsl_mc_driver_probe; ++ ++ if (mc_driver->remove) ++ mc_driver->driver.remove = fsl_mc_driver_remove; ++ ++ if (mc_driver->shutdown) ++ mc_driver->driver.shutdown = fsl_mc_driver_shutdown; ++ ++ error = driver_register(&mc_driver->driver); ++ if (error < 0) { ++ pr_err("driver_register() failed for %s: %d\n", ++ mc_driver->driver.name, error); ++ return error; ++ } ++ ++ pr_info("MC object device driver %s registered\n", ++ mc_driver->driver.name); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); ++ ++/** ++ * fsl_mc_driver_unregister - unregisters a device driver from the ++ * MC bus ++ */ ++void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver) ++{ ++ driver_unregister(&mc_driver->driver); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister); ++ ++bool fsl_mc_interrupts_supported(void) ++{ ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ return mc->gic_supported; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_interrupts_supported); ++ ++static int get_dprc_attr(struct fsl_mc_io *mc_io, ++ int container_id, struct dprc_attributes *attr) ++{ ++ uint16_t dprc_handle; ++ int error; ++ ++ error = dprc_open(mc_io, 0, container_id, &dprc_handle); ++ if (error < 0) { ++ pr_err("dprc_open() failed: %d\n", error); ++ return error; ++ } ++ ++ memset(attr, 0, sizeof(struct dprc_attributes)); ++ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr); ++ if (error < 0) { ++ pr_err("dprc_get_attributes() failed: %d\n", error); ++ goto common_cleanup; ++ } ++ ++ error = 0; ++ ++common_cleanup: ++ (void)dprc_close(mc_io, 0, dprc_handle); ++ return error; ++} ++ ++static int get_dprc_icid(struct fsl_mc_io *mc_io, ++ int container_id, uint16_t *icid) ++{ ++ struct dprc_attributes attr; ++ int error; ++ ++ error = get_dprc_attr(mc_io, container_id, &attr); ++ if (error == 0) ++ *icid = attr.icid; ++ ++ return error; ++} ++ ++static int get_dprc_version(struct fsl_mc_io *mc_io, ++ int container_id, uint16_t *major, uint16_t *minor) ++{ ++ struct dprc_attributes attr; ++ int error; ++ ++ error = get_dprc_attr(mc_io, container_id, &attr); ++ if (error == 0) { ++ *major = attr.version.major; ++ *minor = attr.version.minor; ++ } ++ ++ return error; ++} ++ ++static int translate_mc_addr(enum fsl_mc_region_types mc_region_type, ++ uint64_t mc_offset, phys_addr_t *phys_addr) ++{ ++ int i; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ ++ if (mc->num_translation_ranges == 0) { ++ /* ++ * Do identity mapping: ++ */ ++ *phys_addr = mc_offset; ++ return 0; ++ } ++ ++ for (i = 0; i < mc->num_translation_ranges; i++) { ++ struct fsl_mc_addr_translation_range *range = ++ &mc->translation_ranges[i]; ++ ++ if (mc_region_type == range->mc_region_type && ++ mc_offset >= range->start_mc_offset && ++ mc_offset < range->end_mc_offset) { ++ *phys_addr = range->start_phys_addr + ++ (mc_offset - range->start_mc_offset); ++ return 0; ++ } ++ } ++ ++ return -EFAULT; ++} ++ ++static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, ++ struct fsl_mc_device *mc_bus_dev) ++{ ++ int i; ++ int error; ++ struct resource *regions; ++ struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc; ++ struct device *parent_dev = mc_dev->dev.parent; ++ enum fsl_mc_region_types mc_region_type; ++ ++ if (strcmp(obj_desc->type, "dprc") == 0 || ++ strcmp(obj_desc->type, "dpmcp") == 0) { ++ mc_region_type = FSL_MC_PORTAL; ++ } else if (strcmp(obj_desc->type, "dpio") == 0) { ++ mc_region_type = FSL_QBMAN_PORTAL; ++ } else { ++ /* ++ * This function should not have been called for this MC object ++ * type, as this object type is not supposed to have MMIO ++ * regions ++ */ ++ WARN_ON(true); ++ return -EINVAL; ++ } ++ ++ regions = kmalloc_array(obj_desc->region_count, ++ sizeof(regions[0]), GFP_KERNEL); ++ if (!regions) ++ return -ENOMEM; ++ ++ for (i = 0; i < obj_desc->region_count; i++) { ++ struct dprc_region_desc region_desc; ++ ++ error = dprc_get_obj_region(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, ++ obj_desc->type, ++ obj_desc->id, i, ®ion_desc); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "dprc_get_obj_region() failed: %d\n", error); ++ goto error_cleanup_regions; ++ } ++ ++ WARN_ON(region_desc.size == 0); ++ error = translate_mc_addr(mc_region_type, ++ region_desc.base_offset, ++ ®ions[i].start); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", ++ region_desc.base_offset, ++ obj_desc->type, obj_desc->id, i); ++ goto error_cleanup_regions; ++ } ++ ++ regions[i].end = regions[i].start + region_desc.size - 1; ++ regions[i].name = "fsl-mc object MMIO region"; ++ regions[i].flags = IORESOURCE_IO; ++ if (region_desc.flags & DPRC_REGION_CACHEABLE) ++ regions[i].flags |= IORESOURCE_CACHEABLE; ++ } ++ ++ mc_dev->regions = regions; ++ return 0; ++ ++error_cleanup_regions: ++ kfree(regions); ++ return error; ++} ++ ++/** ++ * Add a newly discovered MC object device to be visible in Linux ++ */ ++int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, ++ struct fsl_mc_io *mc_io, ++ struct device *parent_dev, ++ const char *driver_override, ++ struct fsl_mc_device **new_mc_dev) ++{ ++ int error; ++ struct fsl_mc_device *mc_dev = NULL; ++ struct fsl_mc_bus *mc_bus = NULL; ++ struct fsl_mc_device *parent_mc_dev; ++ ++ if (parent_dev->bus == &fsl_mc_bus_type) ++ parent_mc_dev = to_fsl_mc_device(parent_dev); ++ else ++ parent_mc_dev = NULL; ++ ++ if (strcmp(obj_desc->type, "dprc") == 0) { ++ /* ++ * Allocate an MC bus device object: ++ */ ++ mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL); ++ if (!mc_bus) ++ return -ENOMEM; ++ ++ mc_dev = &mc_bus->mc_dev; ++ } else { ++ /* ++ * Allocate a regular fsl_mc_device object: ++ */ ++ mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL); ++ if (!mc_dev) ++ return -ENOMEM; ++ } ++ ++ mc_dev->obj_desc = *obj_desc; ++ mc_dev->mc_io = mc_io; ++ if (driver_override) { ++ /* ++ * We trust driver_override, so we don't need to use ++ * kstrndup() here ++ */ ++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); ++ if (!mc_dev->driver_override) { ++ error = -ENOMEM; ++ goto error_cleanup_dev; ++ } ++ } ++ ++ device_initialize(&mc_dev->dev); ++ INIT_LIST_HEAD(&mc_dev->dev.msi_list); ++ mc_dev->dev.parent = parent_dev; ++ mc_dev->dev.bus = &fsl_mc_bus_type; ++ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id); ++ ++ if (strcmp(obj_desc->type, "dprc") == 0) { ++ struct fsl_mc_io *mc_io2; ++ ++ mc_dev->flags |= FSL_MC_IS_DPRC; ++ ++ /* ++ * To get the DPRC's ICID, we need to open the DPRC ++ * in get_dprc_icid(). For child DPRCs, we do so using the ++ * parent DPRC's MC portal instead of the child DPRC's MC ++ * portal, in case the child DPRC is already opened with ++ * its own portal (e.g., the DPRC used by AIOP). ++ * ++ * NOTE: There cannot be more than one active open for a ++ * given MC object, using the same MC portal. ++ */ ++ if (parent_mc_dev) { ++ /* ++ * device being added is a child DPRC device ++ */ ++ mc_io2 = parent_mc_dev->mc_io; ++ } else { ++ /* ++ * device being added is the root DPRC device ++ */ ++ if (WARN_ON(!mc_io)) { ++ error = -EINVAL; ++ goto error_cleanup_dev; ++ } ++ ++ mc_io2 = mc_io; ++ } ++ ++ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid); ++ if (error < 0) ++ goto error_cleanup_dev; ++ } else { ++ /* ++ * A non-DPRC MC object device has to be a child of another ++ * MC object (specifically a DPRC object) ++ */ ++ mc_dev->icid = parent_mc_dev->icid; ++ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; ++ mc_dev->dev.dma_mask = &mc_dev->dma_mask; ++ } ++ ++ /* ++ * Get MMIO regions for the device from the MC: ++ * ++ * NOTE: the root DPRC is a special case as its MMIO region is ++ * obtained from the device tree ++ */ ++ if (parent_mc_dev && obj_desc->region_count != 0) { ++ error = fsl_mc_device_get_mmio_regions(mc_dev, ++ parent_mc_dev); ++ if (error < 0) ++ goto error_cleanup_dev; ++ } ++ ++ /* ++ * Objects are coherent, unless 'no shareability' flag set. ++ * FIXME: fill up @dma_base, @size, @iommu ++ */ ++ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) ++ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); ++ ++ /* ++ * The device-specific probe callback will get invoked by device_add() ++ */ ++ error = device_add(&mc_dev->dev); ++ if (error < 0) { ++ dev_err(parent_dev, ++ "device_add() failed for device %s: %d\n", ++ dev_name(&mc_dev->dev), error); ++ goto error_cleanup_dev; ++ } ++ ++ (void)get_device(&mc_dev->dev); ++ dev_dbg(parent_dev, "Added MC object device %s\n", ++ dev_name(&mc_dev->dev)); ++ ++ *new_mc_dev = mc_dev; ++ return 0; ++ ++error_cleanup_dev: ++ kfree(mc_dev->regions); ++ if (mc_bus) ++ devm_kfree(parent_dev, mc_bus); ++ else ++ kmem_cache_free(mc_dev_cache, mc_dev); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_device_add); ++ ++/** ++ * fsl_mc_device_remove - Remove a MC object device from being visible to ++ * Linux ++ * ++ * @mc_dev: Pointer to a MC object device object ++ */ ++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) ++{ ++ struct fsl_mc_bus *mc_bus = NULL; ++ ++ kfree(mc_dev->regions); ++ ++ /* ++ * The device-specific remove callback will get invoked by device_del() ++ */ ++ device_del(&mc_dev->dev); ++ put_device(&mc_dev->dev); ++ ++ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { ++ mc_bus = to_fsl_mc_bus(mc_dev); ++ ++ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) ++ fsl_mc_bus_type.dev_root = NULL; ++ } else ++ WARN_ON(mc_dev->mc_io != NULL); ++ ++ kfree(mc_dev->driver_override); ++ mc_dev->driver_override = NULL; ++ if (mc_bus) ++ devm_kfree(mc_dev->dev.parent, mc_bus); ++ else ++ kmem_cache_free(mc_dev_cache, mc_dev); ++} ++EXPORT_SYMBOL_GPL(fsl_mc_device_remove); ++ ++static int mc_bus_msi_prepare(struct irq_domain *domain, struct device *dev, ++ int nvec, msi_alloc_info_t *info) ++{ ++ int error; ++ u32 its_dev_id; ++ struct dprc_attributes dprc_attr; ++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(dev); ++ ++ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) ++ return -EINVAL; ++ ++ error = dprc_get_attributes(mc_bus_dev->mc_io, ++ 0, ++ mc_bus_dev->mc_handle, &dprc_attr); ++ if (error < 0) { ++ dev_err(&mc_bus_dev->dev, ++ "dprc_get_attributes() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ /* ++ * Build the device Id to be passed to the GIC-ITS: ++ * ++ * NOTE: This device id corresponds to the IOMMU stream ID ++ * associated with the DPRC object. ++ */ ++ its_dev_id = mc_bus_dev->icid; ++ if (its_dev_id > STREAM_ID_ICID_MASK) { ++ dev_err(&mc_bus_dev->dev, ++ "Invalid ICID: %#x\n", its_dev_id); ++ return -ERANGE; ++ } ++ ++ if (dprc_attr.options & DPRC_CFG_OPT_AIOP) ++ its_dev_id |= STREAM_ID_PL_MASK | STREAM_ID_BMT_MASK; ++ ++ return __its_msi_prepare(domain, its_dev_id, dev, nvec, info); ++} ++ ++static void mc_bus_mask_msi_irq(struct irq_data *d) ++{ ++ /* Bus specefic Mask */ ++ irq_chip_mask_parent(d); ++} ++ ++static void mc_bus_unmask_msi_irq(struct irq_data *d) ++{ ++ /* Bus specefic unmask */ ++ irq_chip_unmask_parent(d); ++} ++ ++static void program_msi_at_mc(struct fsl_mc_device *mc_bus_dev, ++ struct fsl_mc_device_irq *irq) ++{ ++ int error; ++ struct fsl_mc_device *owner_mc_dev = irq->mc_dev; ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct dprc_irq_cfg irq_cfg; ++ ++ /* ++ * irq->msi_paddr is 0x0 when this function is invoked in the ++ * free_irq() code path. In this case, for the MC, we don't ++ * really need to "unprogram" the MSI, so we just return. ++ * This helps avoid subtle ordering problems in the MC ++ * bus IRQ teardown logic. ++ * FIXME: evaluate whether there is a better way to address ++ * the underlying issue (upstreamability concern) ++ */ ++ if (irq->msi_paddr == 0x0) ++ return; ++ ++ if (WARN_ON(!owner_mc_dev)) ++ return; ++ ++ irq_cfg.paddr = irq->msi_paddr; ++ irq_cfg.val = irq->msi_value; ++ irq_cfg.irq_num = irq->irq_number; ++ ++ if (owner_mc_dev == mc_bus_dev) { ++ /* ++ * IRQ is for the mc_bus_dev's DPRC itself ++ */ ++ error = dprc_set_irq(mc_bus->atomic_mc_io, ++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, ++ mc_bus->atomic_dprc_handle, ++ irq->dev_irq_index, ++ &irq_cfg); ++ if (error < 0) { ++ dev_err(&owner_mc_dev->dev, ++ "dprc_set_irq() failed: %d\n", error); ++ } ++ } else { ++ error = dprc_set_obj_irq(mc_bus->atomic_mc_io, ++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, ++ mc_bus->atomic_dprc_handle, ++ owner_mc_dev->obj_desc.type, ++ owner_mc_dev->obj_desc.id, ++ irq->dev_irq_index, ++ &irq_cfg); ++ if (error < 0) { ++ dev_err(&owner_mc_dev->dev, ++ "dprc_obj_set_irq() failed: %d\n", error); ++ } ++ } ++} ++ ++/* ++ * This function is invoked from devm_request_irq(), ++ * devm_request_threaded_irq(), dev_free_irq() ++ */ ++static void mc_bus_msi_domain_write_msg(struct irq_data *irq_data, ++ struct msi_msg *msg) ++{ ++ struct msi_desc *msi_entry = irq_data->msi_desc; ++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_entry->dev); ++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); ++ struct fsl_mc_device_irq *irq_res = ++ &mc_bus->irq_resources[msi_entry->msi_attrib.entry_nr]; ++ ++ /* ++ * NOTE: This function is invoked with interrupts disabled ++ */ ++ ++ if (irq_res->irq_number == irq_data->irq) { ++ irq_res->msi_paddr = ++ ((u64)msg->address_hi << 32) | msg->address_lo; ++ ++ irq_res->msi_value = msg->data; ++ ++ /* ++ * Program the MSI (paddr, value) pair in the device: ++ */ ++ program_msi_at_mc(mc_bus_dev, irq_res); ++ } ++} ++ ++static struct irq_chip mc_bus_msi_irq_chip = { ++ .name = "fsl-mc-bus-msi", ++ .irq_unmask = mc_bus_unmask_msi_irq, ++ .irq_mask = mc_bus_mask_msi_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_write_msi_msg = mc_bus_msi_domain_write_msg, ++}; ++ ++static struct msi_domain_ops mc_bus_msi_ops = { ++ .msi_prepare = mc_bus_msi_prepare, ++}; ++ ++static struct msi_domain_info mc_bus_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), ++ .ops = &mc_bus_msi_ops, ++ .chip = &mc_bus_msi_irq_chip, ++}; ++ ++static int create_mc_irq_domain(struct platform_device *mc_pdev, ++ struct irq_domain **new_irq_domain) ++{ ++ int error; ++ struct device_node *its_of_node; ++ struct irq_domain *its_domain; ++ struct irq_domain *irq_domain; ++ struct device_node *mc_of_node = mc_pdev->dev.of_node; ++ ++ its_of_node = of_parse_phandle(mc_of_node, "msi-parent", 0); ++ if (!its_of_node) { ++ dev_err(&mc_pdev->dev, ++ "msi-parent phandle missing for %s\n", ++ mc_of_node->full_name); ++ return -ENOENT; ++ } ++ ++ /* ++ * Extract MSI parent node: ++ */ ++ its_domain = irq_find_host(its_of_node); ++ if (!its_domain) { ++ dev_err(&mc_pdev->dev, "Unable to find parent domain\n"); ++ error = -ENOENT; ++ goto cleanup_its_of_node; ++ } ++ ++ irq_domain = msi_create_irq_domain(mc_of_node, &mc_bus_msi_domain_info, ++ its_domain->parent); ++ if (!irq_domain) { ++ dev_err(&mc_pdev->dev, "Failed to allocate msi_domain\n"); ++ error = -ENOMEM; ++ goto cleanup_its_of_node; ++ } ++ ++ dev_dbg(&mc_pdev->dev, "Allocated MSI domain\n"); ++ *new_irq_domain = irq_domain; ++ return 0; ++ ++cleanup_its_of_node: ++ of_node_put(its_of_node); ++ return error; ++} ++ ++/* ++ * Initialize the interrupt pool associated with a MC bus. ++ * It allocates a block of IRQs from the GIC-ITS ++ */ ++int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, ++ unsigned int irq_count) ++{ ++ unsigned int i; ++ struct msi_desc *msi_entry; ++ struct msi_desc *next_msi_entry; ++ struct fsl_mc_device_irq *irq_resources; ++ struct fsl_mc_device_irq *irq_res; ++ int error; ++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ ++ /* ++ * Detect duplicate invocations of this function: ++ */ ++ if (WARN_ON(!list_empty(&mc_bus_dev->dev.msi_list))) ++ return -EINVAL; ++ ++ if (WARN_ON(irq_count == 0 || ++ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) ++ return -EINVAL; ++ ++ irq_resources = ++ devm_kzalloc(&mc_bus_dev->dev, ++ sizeof(*irq_resources) * irq_count, ++ GFP_KERNEL); ++ if (!irq_resources) ++ return -ENOMEM; ++ ++ for (i = 0; i < irq_count; i++) { ++ irq_res = &irq_resources[i]; ++ msi_entry = alloc_msi_entry(&mc_bus_dev->dev); ++ if (!msi_entry) { ++ dev_err(&mc_bus_dev->dev, "Failed to allocate msi entry\n"); ++ error = -ENOMEM; ++ goto cleanup_msi_entries; ++ } ++ ++ msi_entry->msi_attrib.is_msix = 1; ++ msi_entry->msi_attrib.is_64 = 1; ++ msi_entry->msi_attrib.entry_nr = i; ++ msi_entry->nvec_used = 1; ++ list_add_tail(&msi_entry->list, &mc_bus_dev->dev.msi_list); ++ ++ /* ++ * NOTE: irq_res->msi_paddr will be set by the ++ * mc_bus_msi_domain_write_msg() callback ++ */ ++ irq_res->resource.type = res_pool->type; ++ irq_res->resource.data = irq_res; ++ irq_res->resource.parent_pool = res_pool; ++ INIT_LIST_HEAD(&irq_res->resource.node); ++ list_add_tail(&irq_res->resource.node, &res_pool->free_list); ++ } ++ ++ /* ++ * NOTE: Calling this function will trigger the invocation of the ++ * mc_bus_msi_prepare() callback ++ */ ++ error = msi_domain_alloc_irqs(mc->irq_domain, ++ &mc_bus_dev->dev, irq_count); ++ ++ if (error) { ++ dev_err(&mc_bus_dev->dev, "Failed to allocate IRQs\n"); ++ goto cleanup_msi_entries; ++ } ++ ++ for_each_msi_entry(msi_entry, &mc_bus_dev->dev) { ++ u32 irq_num = msi_entry->irq; ++ ++ irq_res = &irq_resources[msi_entry->msi_attrib.entry_nr]; ++ irq_res->irq_number = irq_num; ++ irq_res->resource.id = irq_num; ++ } ++ ++ res_pool->max_count = irq_count; ++ res_pool->free_count = irq_count; ++ mc_bus->irq_resources = irq_resources; ++ return 0; ++ ++cleanup_msi_entries: ++ list_for_each_entry_safe(msi_entry, next_msi_entry, ++ &mc_bus_dev->dev.msi_list, list) { ++ list_del(&msi_entry->list); ++ kfree(msi_entry); ++ } ++ ++ devm_kfree(&mc_bus_dev->dev, irq_resources); ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); ++ ++/** ++ * Teardown the interrupt pool associated with an MC bus. ++ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. ++ */ ++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) ++{ ++ struct msi_desc *msi_entry; ++ struct msi_desc *next_msi_entry; ++ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); ++ struct fsl_mc_resource_pool *res_pool = ++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; ++ ++ if (WARN_ON(!mc_bus->irq_resources)) ++ return; ++ ++ if (WARN_ON(res_pool->max_count == 0)) ++ return; ++ ++ if (WARN_ON(res_pool->free_count != res_pool->max_count)) ++ return; ++ ++ msi_domain_free_irqs(mc->irq_domain, &mc_bus->mc_dev.dev); ++ list_for_each_entry_safe(msi_entry, next_msi_entry, ++ &mc_bus->mc_dev.dev.msi_list, list) { ++ list_del(&msi_entry->list); ++ kfree(msi_entry); ++ } ++ ++ devm_kfree(&mc_bus->mc_dev.dev, mc_bus->irq_resources); ++ res_pool->max_count = 0; ++ res_pool->free_count = 0; ++ mc_bus->irq_resources = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); ++ ++static int parse_mc_ranges(struct device *dev, ++ int *paddr_cells, ++ int *mc_addr_cells, ++ int *mc_size_cells, ++ const __be32 **ranges_start, ++ uint8_t *num_ranges) ++{ ++ const __be32 *prop; ++ int range_tuple_cell_count; ++ int ranges_len; ++ int tuple_len; ++ struct device_node *mc_node = dev->of_node; ++ ++ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); ++ if (!(*ranges_start) || !ranges_len) { ++ dev_warn(dev, ++ "missing or empty ranges property for device tree node '%s'\n", ++ mc_node->name); ++ ++ *num_ranges = 0; ++ return 0; ++ } ++ ++ *paddr_cells = of_n_addr_cells(mc_node); ++ ++ prop = of_get_property(mc_node, "#address-cells", NULL); ++ if (prop) ++ *mc_addr_cells = be32_to_cpup(prop); ++ else ++ *mc_addr_cells = *paddr_cells; ++ ++ prop = of_get_property(mc_node, "#size-cells", NULL); ++ if (prop) ++ *mc_size_cells = be32_to_cpup(prop); ++ else ++ *mc_size_cells = of_n_size_cells(mc_node); ++ ++ range_tuple_cell_count = *paddr_cells + *mc_addr_cells + ++ *mc_size_cells; ++ ++ tuple_len = range_tuple_cell_count * sizeof(__be32); ++ if (ranges_len % tuple_len != 0) { ++ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name); ++ return -EINVAL; ++ } ++ ++ *num_ranges = ranges_len / tuple_len; ++ return 0; ++} ++ ++static int get_mc_addr_translation_ranges(struct device *dev, ++ struct fsl_mc_addr_translation_range ++ **ranges, ++ uint8_t *num_ranges) ++{ ++ int error; ++ int paddr_cells; ++ int mc_addr_cells; ++ int mc_size_cells; ++ int i; ++ const __be32 *ranges_start; ++ const __be32 *cell; ++ ++ error = parse_mc_ranges(dev, ++ &paddr_cells, ++ &mc_addr_cells, ++ &mc_size_cells, ++ &ranges_start, ++ num_ranges); ++ if (error < 0) ++ return error; ++ ++ if (!(*num_ranges)) { ++ /* ++ * Missing or empty ranges property ("ranges;") for the ++ * 'fsl,qoriq-mc' node. In this case, identity mapping ++ * will be used. ++ */ ++ *ranges = NULL; ++ return 0; ++ } ++ ++ *ranges = devm_kcalloc(dev, *num_ranges, ++ sizeof(struct fsl_mc_addr_translation_range), ++ GFP_KERNEL); ++ if (!(*ranges)) ++ return -ENOMEM; ++ ++ cell = ranges_start; ++ for (i = 0; i < *num_ranges; ++i) { ++ struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; ++ ++ range->mc_region_type = of_read_number(cell, 1); ++ range->start_mc_offset = of_read_number(cell + 1, ++ mc_addr_cells - 1); ++ cell += mc_addr_cells; ++ range->start_phys_addr = of_read_number(cell, paddr_cells); ++ cell += paddr_cells; ++ range->end_mc_offset = range->start_mc_offset + ++ of_read_number(cell, mc_size_cells); ++ ++ cell += mc_size_cells; ++ } ++ ++ return 0; ++} ++ ++/** ++ * fsl_mc_bus_probe - callback invoked when the root MC bus is being ++ * added ++ */ ++static int fsl_mc_bus_probe(struct platform_device *pdev) ++{ ++ struct dprc_obj_desc obj_desc; ++ int error; ++ struct fsl_mc *mc; ++ struct fsl_mc_device *mc_bus_dev = NULL; ++ struct fsl_mc_io *mc_io = NULL; ++ int container_id; ++ phys_addr_t mc_portal_phys_addr; ++ uint32_t mc_portal_size; ++ struct mc_version mc_version; ++ struct resource res; ++ ++ dev_info(&pdev->dev, "Root MC bus device probed"); ++ ++ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); ++ if (!mc) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, mc); ++ error = create_mc_irq_domain(pdev, &mc->irq_domain); ++ if (error < 0) { ++ dev_warn(&pdev->dev, ++ "WARNING: MC bus driver will run without interrupt support\n"); ++ } else { ++ mc->gic_supported = true; ++ } ++ ++ /* ++ * Get physical address of MC portal for the root DPRC: ++ */ ++ error = of_address_to_resource(pdev->dev.of_node, 0, &res); ++ if (error < 0) { ++ dev_err(&pdev->dev, ++ "of_address_to_resource() failed for %s\n", ++ pdev->dev.of_node->full_name); ++ goto error_cleanup_irq_domain; ++ } ++ ++ mc_portal_phys_addr = res.start; ++ mc_portal_size = resource_size(&res); ++ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, ++ mc_portal_size, NULL, 0, &mc_io); ++ if (error < 0) ++ goto error_cleanup_irq_domain; ++ ++ error = mc_get_version(mc_io, 0, &mc_version); ++ if (error != 0) { ++ dev_err(&pdev->dev, ++ "mc_get_version() failed with error %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ dev_info(&pdev->dev, ++ "Freescale Management Complex Firmware version: %u.%u.%u\n", ++ mc_version.major, mc_version.minor, mc_version.revision); ++ ++ error = get_mc_addr_translation_ranges(&pdev->dev, ++ &mc->translation_ranges, ++ &mc->num_translation_ranges); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ error = dpmng_get_container_id(mc_io, 0, &container_id); ++ if (error < 0) { ++ dev_err(&pdev->dev, ++ "dpmng_get_container_id() failed: %d\n", error); ++ goto error_cleanup_mc_io; ++ } ++ ++ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); ++ error = get_dprc_version(mc_io, container_id, ++ &obj_desc.ver_major, &obj_desc.ver_minor); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; ++ strcpy(obj_desc.type, "dprc"); ++ obj_desc.id = container_id; ++ obj_desc.irq_count = 1; ++ obj_desc.region_count = 0; ++ ++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, ++ &mc_bus_dev); ++ if (error < 0) ++ goto error_cleanup_mc_io; ++ ++ mc->root_mc_bus_dev = mc_bus_dev; ++ return 0; ++ ++error_cleanup_mc_io: ++ fsl_destroy_mc_io(mc_io); ++ ++error_cleanup_irq_domain: ++ if (mc->gic_supported) ++ irq_domain_remove(mc->irq_domain); ++ ++ return error; ++} ++ ++/** ++ * fsl_mc_bus_remove - callback invoked when the root MC bus is being ++ * removed ++ */ ++static int fsl_mc_bus_remove(struct platform_device *pdev) ++{ ++ struct fsl_mc *mc = platform_get_drvdata(pdev); ++ ++ if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root)) ++ return -EINVAL; ++ ++ if (mc->gic_supported) ++ irq_domain_remove(mc->irq_domain); ++ ++ fsl_mc_device_remove(mc->root_mc_bus_dev); ++ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); ++ mc->root_mc_bus_dev->mc_io = NULL; ++ ++ dev_info(&pdev->dev, "Root MC bus device removed"); ++ return 0; ++} ++ ++static const struct of_device_id fsl_mc_bus_match_table[] = { ++ {.compatible = "fsl,qoriq-mc",}, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); ++ ++static struct platform_driver fsl_mc_bus_driver = { ++ .driver = { ++ .name = "fsl_mc_bus", ++ .owner = THIS_MODULE, ++ .pm = NULL, ++ .of_match_table = fsl_mc_bus_match_table, ++ }, ++ .probe = fsl_mc_bus_probe, ++ .remove = fsl_mc_bus_remove, ++}; ++ ++static int __init fsl_mc_bus_driver_init(void) ++{ ++ int error; ++ ++ mc_dev_cache = kmem_cache_create("fsl_mc_device", ++ sizeof(struct fsl_mc_device), 0, 0, ++ NULL); ++ if (!mc_dev_cache) { ++ pr_err("Could not create fsl_mc_device cache\n"); ++ return -ENOMEM; ++ } ++ ++ error = bus_register(&fsl_mc_bus_type); ++ if (error < 0) { ++ pr_err("fsl-mc bus type registration failed: %d\n", error); ++ goto error_cleanup_cache; ++ } ++ ++ pr_info("fsl-mc bus type registered\n"); ++ ++ error = platform_driver_register(&fsl_mc_bus_driver); ++ if (error < 0) { ++ pr_err("platform_driver_register() failed: %d\n", error); ++ goto error_cleanup_bus; ++ } ++ ++ error = dprc_driver_init(); ++ if (error < 0) ++ goto error_cleanup_driver; ++ ++ error = fsl_mc_allocator_driver_init(); ++ if (error < 0) ++ goto error_cleanup_dprc_driver; ++ ++ return 0; ++ ++error_cleanup_dprc_driver: ++ dprc_driver_exit(); ++ ++error_cleanup_driver: ++ platform_driver_unregister(&fsl_mc_bus_driver); ++ ++error_cleanup_bus: ++ bus_unregister(&fsl_mc_bus_type); ++ ++error_cleanup_cache: ++ kmem_cache_destroy(mc_dev_cache); ++ return error; ++} ++ ++postcore_initcall(fsl_mc_bus_driver_init); ++ ++static void __exit fsl_mc_bus_driver_exit(void) ++{ ++ if (WARN_ON(!mc_dev_cache)) ++ return; ++ ++ fsl_mc_allocator_driver_exit(); ++ dprc_driver_exit(); ++ platform_driver_unregister(&fsl_mc_bus_driver); ++ bus_unregister(&fsl_mc_bus_type); ++ kmem_cache_destroy(mc_dev_cache); ++ pr_info("MC bus unregistered\n"); ++} ++ ++module_exit(fsl_mc_bus_driver_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor Inc."); ++MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h +new file mode 100644 +index 0000000..d5c1bc3 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h +@@ -0,0 +1,25 @@ ++/* ++ * Freescale Management Complex (MC) ioclt interface ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * Lijun Pan ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_IOCTL_H_ ++#define _FSL_MC_IOCTL_H_ ++ ++#include ++ ++#define RESTOOL_IOCTL_TYPE 'R' ++ ++#define RESTOOL_GET_ROOT_DPRC_INFO \ ++ _IOR(RESTOOL_IOCTL_TYPE, 0x1, uint32_t) ++ ++#define RESTOOL_SEND_MC_COMMAND \ ++ _IOWR(RESTOOL_IOCTL_TYPE, 0x4, struct mc_command) ++ ++#endif /* _FSL_MC_IOCTL_H_ */ +diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c +new file mode 100644 +index 0000000..d261c1a +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-restool.c +@@ -0,0 +1,312 @@ ++/* ++ * Freescale Management Complex (MC) restool driver ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * Lijun Pan ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include "../include/mc-private.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mc-ioctl.h" ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/dpmng.h" ++ ++/** ++ * Maximum number of DPRCs that can be opened at the same time ++ */ ++#define MAX_DPRC_HANDLES 64 ++ ++/** ++ * struct fsl_mc_restool - Management Complex (MC) resource manager object ++ * @tool_mc_io: pointer to the MC I/O object used by the restool ++ */ ++struct fsl_mc_restool { ++ struct fsl_mc_io *tool_mc_io; ++}; ++ ++/** ++ * struct global_state - indicating the number of static and dynamic instance ++ * @dynamic_instance_count - number of dynamically created instances ++ * @static_instance_in_use - static instance is in use or not ++ * @mutex - mutex lock to serialze the operations ++ */ ++struct global_state { ++ uint32_t dynamic_instance_count; ++ bool static_instance_in_use; ++ struct mutex mutex; ++}; ++ ++static struct fsl_mc_restool fsl_mc_restool = { 0 }; ++static struct global_state global_state = { 0 }; ++ ++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_device *root_mc_dev; ++ int error = 0; ++ struct fsl_mc_restool *fsl_mc_restool_new = NULL; ++ ++ mutex_lock(&global_state.mutex); ++ ++ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) { ++ error = -EINVAL; ++ goto error; ++ } ++ ++ if (!global_state.static_instance_in_use) { ++ global_state.static_instance_in_use = true; ++ filep->private_data = &fsl_mc_restool; ++ } else { ++ fsl_mc_restool_new = kmalloc(sizeof(struct fsl_mc_restool), ++ GFP_KERNEL); ++ if (fsl_mc_restool_new == NULL) { ++ error = -ENOMEM; ++ goto error; ++ } ++ memset(fsl_mc_restool_new, 0, sizeof(*fsl_mc_restool_new)); ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, ++ &fsl_mc_restool_new->tool_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto error; ++ } ++ ++global_state.dynamic_instance_count; ++ filep->private_data = fsl_mc_restool_new; ++ } ++ ++ mutex_unlock(&global_state.mutex); ++ return 0; ++error: ++ if (fsl_mc_restool_new != NULL && ++ fsl_mc_restool_new->tool_mc_io != NULL) { ++ fsl_mc_portal_free(fsl_mc_restool_new->tool_mc_io); ++ fsl_mc_restool_new->tool_mc_io = NULL; ++ } ++ ++ kfree(fsl_mc_restool_new); ++ mutex_unlock(&global_state.mutex); ++ return error; ++} ++ ++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) ++{ ++ struct fsl_mc_restool *fsl_mc_restool_local = filep->private_data; ++ ++ if (WARN_ON(filep->private_data == NULL)) ++ return -EINVAL; ++ ++ mutex_lock(&global_state.mutex); ++ ++ if (WARN_ON(global_state.dynamic_instance_count == 0 && ++ !global_state.static_instance_in_use)) { ++ mutex_unlock(&global_state.mutex); ++ return -EINVAL; ++ } ++ ++ /* Globally clean up opened/untracked handles */ ++ fsl_mc_portal_reset(fsl_mc_restool_local->tool_mc_io); ++ ++ pr_debug("dynamic instance count: %d\n", ++ global_state.dynamic_instance_count); ++ pr_debug("static instance count: %d\n", ++ global_state.static_instance_in_use); ++ ++ /* ++ * must check ++ * whether fsl_mc_restool_local is dynamic or global instance ++ * Otherwise it will free up the reserved portal by accident ++ * or even not free up the dynamic allocated portal ++ * if 2 or more instances running concurrently ++ */ ++ if (fsl_mc_restool_local == &fsl_mc_restool) { ++ pr_debug("this is reserved portal"); ++ pr_debug("reserved portal not in use\n"); ++ global_state.static_instance_in_use = false; ++ } else { ++ pr_debug("this is dynamically allocated portal"); ++ pr_debug("free one dynamically allocated portal\n"); ++ fsl_mc_portal_free(fsl_mc_restool_local->tool_mc_io); ++ kfree(filep->private_data); ++ --global_state.dynamic_instance_count; ++ } ++ ++ filep->private_data = NULL; ++ mutex_unlock(&global_state.mutex); ++ return 0; ++} ++ ++static int restool_get_root_dprc_info(unsigned long arg) ++{ ++ int error = -EINVAL; ++ uint32_t root_dprc_id; ++ struct fsl_mc_device *root_mc_dev; ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ root_dprc_id = root_mc_dev->obj_desc.id; ++ error = copy_to_user((void __user *)arg, &root_dprc_id, ++ sizeof(root_dprc_id)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ return 0; ++error: ++ return error; ++} ++ ++static int restool_send_mc_command(unsigned long arg, ++ struct fsl_mc_restool *fsl_mc_restool) ++{ ++ int error = -EINVAL; ++ struct mc_command mc_cmd; ++ ++ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ /* ++ * Send MC command to the MC: ++ */ ++ error = mc_send_command(fsl_mc_restool->tool_mc_io, &mc_cmd); ++ if (error < 0) ++ goto error; ++ ++ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)); ++ if (error < 0) { ++ pr_err("copy_to_user() failed with error %d\n", error); ++ goto error; ++ } ++ ++ return 0; ++error: ++ return error; ++} ++ ++static long ++fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int error = -EINVAL; ++ ++ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) ++ goto out; ++ ++ switch (cmd) { ++ case RESTOOL_GET_ROOT_DPRC_INFO: ++ error = restool_get_root_dprc_info(arg); ++ break; ++ ++ case RESTOOL_SEND_MC_COMMAND: ++ error = restool_send_mc_command(arg, file->private_data); ++ break; ++ default: ++ error = -EINVAL; ++ } ++out: ++ return error; ++} ++ ++static const struct file_operations fsl_mc_restool_dev_fops = { ++ .owner = THIS_MODULE, ++ .open = fsl_mc_restool_dev_open, ++ .release = fsl_mc_restool_dev_release, ++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, ++ .compat_ioctl = fsl_mc_restool_dev_ioctl, ++}; ++ ++static struct miscdevice fsl_mc_restool_dev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "mc_restool", ++ .fops = &fsl_mc_restool_dev_fops ++}; ++ ++static int __init fsl_mc_restool_driver_init(void) ++{ ++ struct fsl_mc_device *root_mc_dev; ++ int error = -EINVAL; ++ bool restool_dev_registered = false; ++ ++ mutex_init(&global_state.mutex); ++ ++ if (WARN_ON(fsl_mc_restool.tool_mc_io != NULL)) ++ goto error; ++ ++ if (WARN_ON(global_state.dynamic_instance_count != 0)) ++ goto error; ++ ++ if (WARN_ON(global_state.static_instance_in_use)) ++ goto error; ++ ++ if (fsl_mc_bus_type.dev_root == NULL) { ++ pr_err("fsl-mc bus not found, restool driver registration failed\n"); ++ goto error; ++ } ++ ++ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); ++ error = fsl_mc_portal_allocate(root_mc_dev, 0, ++ &fsl_mc_restool.tool_mc_io); ++ if (error < 0) { ++ pr_err("Not able to allocate MC portal\n"); ++ goto error; ++ } ++ ++ error = misc_register(&fsl_mc_restool_dev); ++ if (error < 0) { ++ pr_err("misc_register() failed: %d\n", error); ++ goto error; ++ } ++ ++ restool_dev_registered = true; ++ pr_info("%s driver registered\n", fsl_mc_restool_dev.name); ++ return 0; ++error: ++ if (restool_dev_registered) ++ misc_deregister(&fsl_mc_restool_dev); ++ ++ if (fsl_mc_restool.tool_mc_io != NULL) { ++ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); ++ fsl_mc_restool.tool_mc_io = NULL; ++ } ++ ++ return error; ++} ++ ++module_init(fsl_mc_restool_driver_init); ++ ++static void __exit fsl_mc_restool_driver_exit(void) ++{ ++ if (WARN_ON(fsl_mc_restool.tool_mc_io == NULL)) ++ return; ++ ++ if (WARN_ON(global_state.dynamic_instance_count != 0)) ++ return; ++ ++ if (WARN_ON(global_state.static_instance_in_use)) ++ return; ++ ++ misc_deregister(&fsl_mc_restool_dev); ++ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); ++ fsl_mc_restool.tool_mc_io = NULL; ++ pr_info("%s driver unregistered\n", fsl_mc_restool_dev.name); ++} ++ ++module_exit(fsl_mc_restool_driver_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor Inc."); ++MODULE_DESCRIPTION("Freescale's MC restool driver"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c +new file mode 100644 +index 0000000..d3b6940 +--- /dev/null ++++ b/drivers/staging/fsl-mc/bus/mc-sys.c +@@ -0,0 +1,677 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * I/O services to send MC commands to the MC hardware ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "../include/mc-sys.h" ++#include "../include/mc-cmd.h" ++#include "../include/mc.h" ++#include ++#include ++#include ++#include ++#include ++#include "dpmcp.h" ++ ++/** ++ * Timeout in milliseconds to wait for the completion of an MC command ++ * 5000 ms is barely enough for dpsw/dpdmux creation ++ * TODO: if MC firmware could response faster, we should decrease this value ++ */ ++#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 ++ ++/* ++ * usleep_range() min and max values used to throttle down polling ++ * iterations while waiting for MC command completion ++ */ ++#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10 ++#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 ++ ++#define MC_CMD_HDR_READ_CMDID(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) ++ ++/** ++ * dpmcp_irq0_handler - Regular ISR for DPMCP interrupt 0 ++ * ++ * @irq: IRQ number of the interrupt being handled ++ * @arg: Pointer to device structure ++ */ ++static irqreturn_t dpmcp_irq0_handler(int irq_num, void *arg) ++{ ++ struct device *dev = (struct device *)arg; ++ struct fsl_mc_device *dpmcp_dev = to_fsl_mc_device(dev); ++ struct fsl_mc_io *mc_io = dpmcp_dev->mc_io; ++ ++ dev_dbg(dev, "DPMCP IRQ %d triggered on CPU %u\n", irq_num, ++ smp_processor_id()); ++ ++ if (WARN_ON(dpmcp_dev->irqs[0]->irq_number != (uint32_t)irq_num)) ++ goto out; ++ ++ if (WARN_ON(!mc_io)) ++ goto out; ++ ++ complete(&mc_io->mc_command_done_completion); ++out: ++ return IRQ_HANDLED; ++} ++ ++/* ++ * Disable and clear interrupts for a given DPMCP object ++ */ ++static int disable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ /* ++ * Disable generation of the DPMCP interrupt: ++ */ ++ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 0); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_enable() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ /* ++ * Disable all DPMCP interrupt causes: ++ */ ++ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 0x0); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_mask() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ return 0; ++} ++ ++static void unregister_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) ++{ ++ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; ++ ++ devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); ++} ++ ++static int register_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; ++ ++ error = devm_request_irq(&dpmcp_dev->dev, ++ irq->irq_number, ++ dpmcp_irq0_handler, ++ IRQF_NO_SUSPEND | IRQF_ONESHOT, ++ "FSL MC DPMCP irq0", ++ &dpmcp_dev->dev); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "devm_request_irq() failed: %d\n", ++ error); ++ return error; ++ } ++ ++ return 0; ++} ++ ++static int enable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ /* ++ * Enable MC command completion event to trigger DPMCP interrupt: ++ */ ++ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, ++ DPMCP_IRQ_EVENT_CMD_DONE); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_mask() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ /* ++ * Enable generation of the interrupt: ++ */ ++ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, ++ MC_CMD_FLAG_INTR_DIS, ++ dpmcp_dev->mc_handle, ++ DPMCP_IRQ_INDEX, 1); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, ++ "dpmcp_set_irq_enable() failed: %d\n", error); ++ ++ return error; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Setup MC command completion interrupt for the DPMCP device associated with a ++ * given fsl_mc_io object ++ */ ++int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ return -EINVAL; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(!fsl_mc_interrupts_supported())) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->obj_desc.irq_count != 1)) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return -EINVAL; ++ ++ error = fsl_mc_allocate_irqs(dpmcp_dev); ++ if (error < 0) ++ return error; ++ ++ error = disable_dpmcp_irq(dpmcp_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = register_dpmcp_irq_handler(dpmcp_dev); ++ if (error < 0) ++ goto error_free_irqs; ++ ++ error = enable_dpmcp_irq(dpmcp_dev); ++ if (error < 0) ++ goto error_unregister_irq_handler; ++ ++ mc_io->mc_command_done_irq_armed = true; ++ return 0; ++ ++error_unregister_irq_handler: ++ unregister_dpmcp_irq_handler(dpmcp_dev); ++ ++error_free_irqs: ++ fsl_mc_free_irqs(dpmcp_dev); ++ ++ return error; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_setup_dpmcp_irq); ++ ++/* ++ * Tear down interrupts for the DPMCP device associated with a given fsl_mc_io ++ * object ++ */ ++static void teardown_dpmcp_irq(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ if (WARN_ON(!fsl_mc_interrupts_supported())) ++ return; ++ if (WARN_ON(!dpmcp_dev->irqs)) ++ return; ++ ++ mc_io->mc_command_done_irq_armed = false; ++ (void)disable_dpmcp_irq(dpmcp_dev); ++ unregister_dpmcp_irq_handler(dpmcp_dev); ++ fsl_mc_free_irqs(dpmcp_dev); ++} ++ ++/** ++ * Creates an MC I/O object ++ * ++ * @dev: device to be associated with the MC I/O object ++ * @mc_portal_phys_addr: physical address of the MC portal to use ++ * @mc_portal_size: size in bytes of the MC portal ++ * @resource: Pointer to MC bus object allocator resource associated ++ * with this MC I/O object or NULL if none. ++ * @flags: flags for the new MC I/O object ++ * @new_mc_io: Area to return pointer to newly created MC I/O object ++ * ++ * Returns '0' on Success; Error code otherwise. ++ */ ++int __must_check fsl_create_mc_io(struct device *dev, ++ phys_addr_t mc_portal_phys_addr, ++ uint32_t mc_portal_size, ++ struct fsl_mc_device *dpmcp_dev, ++ uint32_t flags, struct fsl_mc_io **new_mc_io) ++{ ++ int error; ++ struct fsl_mc_io *mc_io; ++ void __iomem *mc_portal_virt_addr; ++ struct resource *res; ++ ++ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); ++ if (!mc_io) ++ return -ENOMEM; ++ ++ mc_io->dev = dev; ++ mc_io->flags = flags; ++ mc_io->portal_phys_addr = mc_portal_phys_addr; ++ mc_io->portal_size = mc_portal_size; ++ mc_io->mc_command_done_irq_armed = false; ++ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) { ++ spin_lock_init(&mc_io->spinlock); ++ } else { ++ mutex_init(&mc_io->mutex); ++ init_completion(&mc_io->mc_command_done_completion); ++ } ++ ++ res = devm_request_mem_region(dev, ++ mc_portal_phys_addr, ++ mc_portal_size, ++ "mc_portal"); ++ if (!res) { ++ dev_err(dev, ++ "devm_request_mem_region failed for MC portal %#llx\n", ++ mc_portal_phys_addr); ++ return -EBUSY; ++ } ++ ++ mc_portal_virt_addr = devm_ioremap_nocache(dev, ++ mc_portal_phys_addr, ++ mc_portal_size); ++ if (!mc_portal_virt_addr) { ++ dev_err(dev, ++ "devm_ioremap_nocache failed for MC portal %#llx\n", ++ mc_portal_phys_addr); ++ return -ENXIO; ++ } ++ ++ mc_io->portal_virt_addr = mc_portal_virt_addr; ++ if (dpmcp_dev) { ++ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); ++ if (error < 0) ++ goto error_destroy_mc_io; ++ ++ if (!(flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) && ++ fsl_mc_interrupts_supported()) { ++ error = fsl_mc_io_setup_dpmcp_irq(mc_io); ++ if (error < 0) ++ goto error_destroy_mc_io; ++ } ++ } ++ ++ *new_mc_io = mc_io; ++ return 0; ++ ++error_destroy_mc_io: ++ fsl_destroy_mc_io(mc_io); ++ return error; ++ ++} ++EXPORT_SYMBOL_GPL(fsl_create_mc_io); ++ ++/** ++ * Destroys an MC I/O object ++ * ++ * @mc_io: MC I/O object to destroy ++ */ ++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) ++{ ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (dpmcp_dev) ++ fsl_mc_io_unset_dpmcp(mc_io); ++ ++ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); ++ devm_release_mem_region(mc_io->dev, ++ mc_io->portal_phys_addr, ++ mc_io->portal_size); ++ ++ mc_io->portal_virt_addr = NULL; ++ devm_kfree(mc_io->dev, mc_io); ++} ++EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); ++ ++int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, ++ struct fsl_mc_device *dpmcp_dev) ++{ ++ int error; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_io->dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(dpmcp_dev->mc_io)) ++ return -EINVAL; ++ ++ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { ++ error = dpmcp_open(mc_io, ++ 0, ++ dpmcp_dev->obj_desc.id, ++ &dpmcp_dev->mc_handle); ++ if (error < 0) ++ return error; ++ } ++ ++ mc_io->dpmcp_dev = dpmcp_dev; ++ dpmcp_dev->mc_io = mc_io; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); ++ ++void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) ++{ ++ int error; ++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ ++ if (WARN_ON(!dpmcp_dev)) ++ return; ++ ++ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) ++ return; ++ ++ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { ++ if (dpmcp_dev->irqs) ++ teardown_dpmcp_irq(mc_io); ++ ++ error = dpmcp_close(mc_io, ++ 0, ++ dpmcp_dev->mc_handle); ++ if (error < 0) { ++ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", ++ error); ++ } ++ } ++ ++ mc_io->dpmcp_dev = NULL; ++ dpmcp_dev->mc_io = NULL; ++} ++EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); ++ ++static int mc_status_to_error(enum mc_cmd_status status) ++{ ++ static const int mc_status_to_error_map[] = { ++ [MC_CMD_STATUS_OK] = 0, ++ [MC_CMD_STATUS_AUTH_ERR] = -EACCES, ++ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM, ++ [MC_CMD_STATUS_DMA_ERR] = -EIO, ++ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO, ++ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT, ++ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL, ++ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM, ++ [MC_CMD_STATUS_BUSY] = -EBUSY, ++ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP, ++ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV, ++ }; ++ ++ if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map))) ++ return -EINVAL; ++ ++ return mc_status_to_error_map[status]; ++} ++ ++static const char *mc_status_to_string(enum mc_cmd_status status) ++{ ++ static const char *const status_strings[] = { ++ [MC_CMD_STATUS_OK] = "Command completed successfully", ++ [MC_CMD_STATUS_READY] = "Command ready to be processed", ++ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error", ++ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege", ++ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error", ++ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error", ++ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out", ++ [MC_CMD_STATUS_NO_RESOURCE] = "No resources", ++ [MC_CMD_STATUS_NO_MEMORY] = "No memory available", ++ [MC_CMD_STATUS_BUSY] = "Device is busy", ++ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation", ++ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state" ++ }; ++ ++ if ((unsigned int)status >= ARRAY_SIZE(status_strings)) ++ return "Unknown MC error"; ++ ++ return status_strings[status]; ++} ++ ++/** ++ * mc_write_command - writes a command to a Management Complex (MC) portal ++ * ++ * @portal: pointer to an MC portal ++ * @cmd: pointer to a filled command ++ */ ++static inline void mc_write_command(struct mc_command __iomem *portal, ++ struct mc_command *cmd) ++{ ++ int i; ++ ++ /* copy command parameters into the portal */ ++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) ++ writeq(cmd->params[i], &portal->params[i]); ++ ++ /* submit the command by writing the header */ ++ writeq(cmd->header, &portal->header); ++} ++ ++/** ++ * mc_read_response - reads the response for the last MC command from a ++ * Management Complex (MC) portal ++ * ++ * @portal: pointer to an MC portal ++ * @resp: pointer to command response buffer ++ * ++ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. ++ */ ++static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * ++ portal, ++ struct mc_command *resp) ++{ ++ int i; ++ enum mc_cmd_status status; ++ ++ /* Copy command response header from MC portal: */ ++ resp->header = readq(&portal->header); ++ status = MC_CMD_HDR_READ_STATUS(resp->header); ++ if (status != MC_CMD_STATUS_OK) ++ return status; ++ ++ /* Copy command response data from MC portal: */ ++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) ++ resp->params[i] = readq(&portal->params[i]); ++ ++ return status; ++} ++ ++static int mc_completion_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long jiffies_left; ++ unsigned long timeout_jiffies = ++ msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); ++ ++ if (WARN_ON(!mc_io->dpmcp_dev)) ++ return -EINVAL; ++ ++ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ return -EINVAL; ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ jiffies_left = wait_for_completion_timeout( ++ &mc_io->mc_command_done_completion, ++ timeout_jiffies); ++ if (jiffies_left == 0) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, ++ struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long jiffies_until_timeout = ++ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, ++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); ++ ++ if (time_after_eq(jiffies, jiffies_until_timeout)) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, ++ struct mc_command *cmd, ++ enum mc_cmd_status *mc_status) ++{ ++ enum mc_cmd_status status; ++ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; ++ ++ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) % ++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0); ++ ++ for (;;) { ++ status = mc_read_response(mc_io->portal_virt_addr, cmd); ++ if (status != MC_CMD_STATUS_READY) ++ break; ++ ++ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); ++ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; ++ if (timeout_usecs == 0) ++ return -ETIMEDOUT; ++ } ++ ++ *mc_status = status; ++ return 0; ++} ++ ++/** ++ * Sends a command to the MC device using the given MC I/O object ++ * ++ * @mc_io: MC I/O object to be used ++ * @cmd: command to be sent ++ * ++ * Returns '0' on Success; Error code otherwise. ++ */ ++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) ++{ ++ int error; ++ enum mc_cmd_status status; ++ unsigned long irq_flags = 0; ++ bool dpmcp_completion_intr_disabled = ++ (MC_CMD_HDR_READ_FLAGS(cmd->header) & MC_CMD_FLAG_INTR_DIS); ++ ++ if (WARN_ON(in_irq() && ++ (!dpmcp_completion_intr_disabled || ++ !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))) ++ return -EINVAL; ++ ++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) ++ spin_lock_irqsave(&mc_io->spinlock, irq_flags); ++ else ++ mutex_lock(&mc_io->mutex); ++ ++ /* ++ * Send command to the MC hardware: ++ */ ++ mc_write_command(mc_io->portal_virt_addr, cmd); ++ ++ /* ++ * Wait for response from the MC hardware: ++ */ ++ if (mc_io->mc_command_done_irq_armed && !dpmcp_completion_intr_disabled) ++ error = mc_completion_wait(mc_io, cmd, &status); ++ else if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) ++ error = mc_polling_wait_preemptible(mc_io, cmd, &status); ++ else ++ error = mc_polling_wait_atomic(mc_io, cmd, &status); ++ ++ if (error < 0) { ++ if (error == -ETIMEDOUT) { ++ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", ++ mc_io->portal_phys_addr, ++ (unsigned int) ++ MC_CMD_HDR_READ_TOKEN(cmd->header), ++ (unsigned int) ++ MC_CMD_HDR_READ_CMDID(cmd->header)); ++ } ++ goto common_exit; ++ ++ } ++ ++ if (status != MC_CMD_STATUS_OK) { ++ pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", ++ mc_io->portal_phys_addr, ++ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), ++ (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), ++ mc_status_to_string(status), ++ (unsigned int)status); ++ ++ error = mc_status_to_error(status); ++ goto common_exit; ++ } ++ ++ error = 0; ++ ++common_exit: ++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) ++ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); ++ else ++ mutex_unlock(&mc_io->mutex); ++ ++ return error; ++} ++EXPORT_SYMBOL(mc_send_command); +diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h +new file mode 100644 +index 0000000..1ec04e4 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h +@@ -0,0 +1,62 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPBP_CMD_H ++#define _FSL_DPBP_CMD_H ++ ++/* DPBP Version */ ++#define DPBP_VER_MAJOR 2 ++#define DPBP_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPBP_CMDID_CLOSE 0x800 ++#define DPBP_CMDID_OPEN 0x804 ++#define DPBP_CMDID_CREATE 0x904 ++#define DPBP_CMDID_DESTROY 0x900 ++ ++#define DPBP_CMDID_ENABLE 0x002 ++#define DPBP_CMDID_DISABLE 0x003 ++#define DPBP_CMDID_GET_ATTR 0x004 ++#define DPBP_CMDID_RESET 0x005 ++#define DPBP_CMDID_IS_ENABLED 0x006 ++ ++#define DPBP_CMDID_SET_IRQ 0x010 ++#define DPBP_CMDID_GET_IRQ 0x011 ++#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPBP_CMDID_SET_IRQ_MASK 0x014 ++#define DPBP_CMDID_GET_IRQ_MASK 0x015 ++#define DPBP_CMDID_GET_IRQ_STATUS 0x016 ++#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 ++#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 ++#endif /* _FSL_DPBP_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h +new file mode 100644 +index 0000000..9856bb8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpbp.h +@@ -0,0 +1,438 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPBP_H ++#define __FSL_DPBP_H ++ ++/* Data Path Buffer Pool API ++ * Contains initialization APIs and runtime control APIs for DPBP ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpbp_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpbp_id: DPBP unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpbp_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpbp_id, ++ uint16_t *token); ++ ++/** ++ * dpbp_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpbp_cfg - Structure representing DPBP configuration ++ * @options: place holder ++ */ ++struct dpbp_cfg { ++ uint32_t options; ++}; ++ ++/** ++ * dpbp_create() - Create the DPBP object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPBP object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpbp_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpbp_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpbp_destroy() - Destroy the DPBP object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpbp_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_enable() - Enable the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_disable() - Disable the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpbp_is_enabled() - Check if the DPBP is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpbp_reset() - Reset the DPBP, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpbp_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpbp_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpbp_irq_cfg *irq_cfg); ++ ++/** ++ * dpbp_get_irq() - Get IRQ information from the DPBP. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpbp_irq_cfg *irq_cfg); ++ ++/** ++ * dpbp_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpbp_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpbp_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpbp_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpbp_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpbp_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpbp_attr - Structure representing DPBP attributes ++ * @id: DPBP object ID ++ * @version: DPBP version ++ * @bpid: Hardware buffer pool ID; should be used as an argument in ++ * acquire/release operations on buffers ++ */ ++struct dpbp_attr { ++ int id; ++ /** ++ * struct version - Structure representing DPBP version ++ * @major: DPBP major version ++ * @minor: DPBP minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint16_t bpid; ++}; ++ ++/** ++ * dpbp_get_attributes - Retrieve DPBP attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_attr *attr); ++ ++/** ++ * DPBP notifications options ++ */ ++ ++/** ++ * BPSCN write will attempt to allocate into a cache (coherent write) ++ */ ++#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 ++ ++/** ++ * struct dpbp_notification_cfg - Structure representing DPBP notifications ++ * towards software ++ * @depletion_entry: below this threshold the pool is "depleted"; ++ * set it to '0' to disable it ++ * @depletion_exit: greater than or equal to this threshold the pool exit its ++ * "depleted" state ++ * @surplus_entry: above this threshold the pool is in "surplus" state; ++ * set it to '0' to disable it ++ * @surplus_exit: less than or equal to this threshold the pool exit its ++ * "surplus" state ++ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' ++ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), ++ * must be 16B aligned. ++ * @message_ctx: The context that will be part of the BPSCN message and will ++ * be written to 'message_iova' ++ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values ++ */ ++struct dpbp_notification_cfg { ++ uint32_t depletion_entry; ++ uint32_t depletion_exit; ++ uint32_t surplus_entry; ++ uint32_t surplus_exit; ++ uint64_t message_iova; ++ uint64_t message_ctx; ++ uint16_t options; ++}; ++ ++/** ++ * dpbp_set_notifications() - Set notifications towards software ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @cfg: notifications configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_set_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg); ++ ++/** ++ * dpbp_get_notifications() - Get the notifications configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPBP object ++ * @cfg: notifications configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpbp_get_notifications(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpbp_notification_cfg *cfg); ++ ++#endif /* __FSL_DPBP_H */ +diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h +new file mode 100644 +index 0000000..ecb40d0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h +@@ -0,0 +1,162 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPCON_CMD_H ++#define _FSL_DPCON_CMD_H ++ ++/* DPCON Version */ ++#define DPCON_VER_MAJOR 2 ++#define DPCON_VER_MINOR 2 ++ ++/* Command IDs */ ++#define DPCON_CMDID_CLOSE 0x800 ++#define DPCON_CMDID_OPEN 0x808 ++#define DPCON_CMDID_CREATE 0x908 ++#define DPCON_CMDID_DESTROY 0x900 ++ ++#define DPCON_CMDID_ENABLE 0x002 ++#define DPCON_CMDID_DISABLE 0x003 ++#define DPCON_CMDID_GET_ATTR 0x004 ++#define DPCON_CMDID_RESET 0x005 ++#define DPCON_CMDID_IS_ENABLED 0x006 ++ ++#define DPCON_CMDID_SET_IRQ 0x010 ++#define DPCON_CMDID_GET_IRQ 0x011 ++#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPCON_CMDID_SET_IRQ_MASK 0x014 ++#define DPCON_CMDID_GET_IRQ_MASK 0x015 ++#define DPCON_CMDID_GET_IRQ_STATUS 0x016 ++#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPCON_CMDID_SET_NOTIFICATION 0x100 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_OPEN(cmd, dpcon_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_IS_ENABLED(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 1, int, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ ++ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_RSP_GET_ATTR(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ ++ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ ++} while (0) ++ ++#endif /* _FSL_DPCON_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h +new file mode 100644 +index 0000000..2555be5 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpcon.h +@@ -0,0 +1,407 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPCON_H ++#define __FSL_DPCON_H ++ ++/* Data Path Concentrator API ++ * Contains initialization APIs and runtime control APIs for DPCON ++ */ ++ ++struct fsl_mc_io; ++ ++/** General DPCON macros */ ++ ++/** ++ * Use it to disable notifications; see dpcon_set_notification() ++ */ ++#define DPCON_INVALID_DPIO_ID (int)(-1) ++ ++/** ++ * dpcon_open() - Open a control session for the specified object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @dpcon_id: DPCON unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpcon_create() function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int dpcon_id, ++ uint16_t *token); ++ ++/** ++ * dpcon_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpcon_cfg - Structure representing DPCON configuration ++ * @num_priorities: Number of priorities for the DPCON channel (1-8) ++ */ ++struct dpcon_cfg { ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpcon_create() - Create the DPCON object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPCON object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpcon_open() function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_create(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ const struct dpcon_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpcon_destroy() - Destroy the DPCON object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpcon_destroy(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_enable() - Enable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_disable() - Disable the DPCON ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_disable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * dpcon_is_enabled() - Check if the DPCON is enabled. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @en: Returns '1' if object is enabled; '0' otherwise ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_is_enabled(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *en); ++ ++/** ++ * dpcon_reset() - Reset the DPCON, returns the object to initial state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_reset(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * struct dpcon_irq_cfg - IRQ configuration ++ * @addr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dpcon_irq_cfg { ++ uint64_t addr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dpcon_irq_cfg *irq_cfg); ++ ++/** ++ * dpcon_get_irq() - Get IRQ information from the DPCON. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dpcon_irq_cfg *irq_cfg); ++ ++/** ++ * dpcon_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpcon_get_irq_enable() - Get overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpcon_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpcon_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpcon_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @status: interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpcon_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpcon_attr - Structure representing DPCON attributes ++ * @id: DPCON object ID ++ * @version: DPCON version ++ * @qbman_ch_id: Channel ID to be used by dequeue operation ++ * @num_priorities: Number of priorities for the DPCON channel (1-8) ++ */ ++struct dpcon_attr { ++ int id; ++ /** ++ * struct version - DPCON version ++ * @major: DPCON major version ++ * @minor: DPCON minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++ uint16_t qbman_ch_id; ++ uint8_t num_priorities; ++}; ++ ++/** ++ * dpcon_get_attributes() - Retrieve DPCON attributes. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @attr: Object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpcon_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_attr *attr); ++ ++/** ++ * struct dpcon_notification_cfg - Structure representing notification parameters ++ * @dpio_id: DPIO object ID; must be configured with a notification channel; ++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; ++ * @priority: Priority selection within the DPIO channel; valid values ++ * are 0-7, depending on the number of priorities in that channel ++ * @user_ctx: User context value provided with each CDAN message ++ */ ++struct dpcon_notification_cfg { ++ int dpio_id; ++ uint8_t priority; ++ uint64_t user_ctx; ++}; ++ ++/** ++ * dpcon_set_notification() - Set DPCON notification destination ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPCON object ++ * @cfg: Notification parameters ++ * ++ * Return: '0' on Success; Error code otherwise ++ */ ++int dpcon_set_notification(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dpcon_notification_cfg *cfg); ++ ++#endif /* __FSL_DPCON_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmac-cmd.h b/drivers/staging/fsl-mc/include/dpmac-cmd.h +new file mode 100644 +index 0000000..c123aab +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmac-cmd.h +@@ -0,0 +1,192 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPMAC_CMD_H ++#define _FSL_DPMAC_CMD_H ++ ++/* DPMAC Version */ ++#define DPMAC_VER_MAJOR 3 ++#define DPMAC_VER_MINOR 0 ++ ++/* Command IDs */ ++#define DPMAC_CMDID_CLOSE 0x800 ++#define DPMAC_CMDID_OPEN 0x80c ++#define DPMAC_CMDID_CREATE 0x90c ++#define DPMAC_CMDID_DESTROY 0x900 ++ ++#define DPMAC_CMDID_GET_ATTR 0x004 ++#define DPMAC_CMDID_RESET 0x005 ++ ++#define DPMAC_CMDID_SET_IRQ 0x010 ++#define DPMAC_CMDID_GET_IRQ 0x011 ++#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 ++#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 ++#define DPMAC_CMDID_SET_IRQ_MASK 0x014 ++#define DPMAC_CMDID_GET_IRQ_MASK 0x015 ++#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 ++#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 ++ ++#define DPMAC_CMDID_MDIO_READ 0x0c0 ++#define DPMAC_CMDID_MDIO_WRITE 0x0c1 ++#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 ++#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 ++#define DPMAC_CMDID_GET_COUNTER 0x0c4 ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CREATE(cmd, cfg) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ ++ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_addr, irq_val, user_irq_id) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ ++ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_val);\ ++ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ ++ MC_CMD_OP(cmd, 2, 0, 32, int, user_irq_id); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ(cmd, type, irq_addr, irq_val, user_irq_id) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_val); \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ ++ MC_RSP_OP(cmd, 2, 0, 32, int, user_irq_id); \ ++ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ ++ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index) \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ ++ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ ++ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ ++ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ ++ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ ++ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ ++ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ ++ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ ++ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_MDIO_READ(cmd, data) \ ++ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ ++ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ ++ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ ++do { \ ++ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ ++do { \ ++ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ ++ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ ++ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ ++} while (0) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_CMD_GET_COUNTER(cmd, type) \ ++ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) ++ ++/* cmd, param, offset, width, type, arg_name */ ++#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ ++ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) ++ ++#endif /* _FSL_DPMAC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmac.h b/drivers/staging/fsl-mc/include/dpmac.h +new file mode 100644 +index 0000000..88091b5 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmac.h +@@ -0,0 +1,528 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMAC_H ++#define __FSL_DPMAC_H ++ ++/* Data Path MAC API ++ * Contains initialization APIs and runtime control APIs for DPMAC ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * dpmac_open() - Open a control session for the specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @dpmac_id: DPMAC unique ID ++ * @token: Returned token; use in subsequent API calls ++ * ++ * This function can be used to open a control session for an ++ * already created object; an object may have been declared in ++ * the DPL or by calling the dpmac_create function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent commands for ++ * this specific object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_open(struct fsl_mc_io *mc_io, int dpmac_id, uint16_t *token); ++ ++/** ++ * dpmac_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_close(struct fsl_mc_io *mc_io, uint16_t token); ++ ++/** ++ * enum dpmac_link_type - DPMAC link type ++ * @DPMAC_LINK_TYPE_NONE: No link ++ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type ++ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID ++ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type ++ */ ++enum dpmac_link_type { ++ DPMAC_LINK_TYPE_NONE, ++ DPMAC_LINK_TYPE_FIXED, ++ DPMAC_LINK_TYPE_PHY, ++ DPMAC_LINK_TYPE_BACKPLANE ++}; ++ ++/** ++ * enum dpmac_eth_if - DPMAC Ethrnet interface ++ * @DPMAC_ETH_IF_MII: MII interface ++ * @DPMAC_ETH_IF_RMII: RMII interface ++ * @DPMAC_ETH_IF_SMII: SMII interface ++ * @DPMAC_ETH_IF_GMII: GMII interface ++ * @DPMAC_ETH_IF_RGMII: RGMII interface ++ * @DPMAC_ETH_IF_SGMII: SGMII interface ++ * @DPMAC_ETH_IF_XGMII: XGMII interface ++ * @DPMAC_ETH_IF_QSGMII: QSGMII interface ++ * @DPMAC_ETH_IF_XAUI: XAUI interface ++ * @DPMAC_ETH_IF_XFI: XFI interface ++ */ ++enum dpmac_eth_if { ++ DPMAC_ETH_IF_MII, ++ DPMAC_ETH_IF_RMII, ++ DPMAC_ETH_IF_SMII, ++ DPMAC_ETH_IF_GMII, ++ DPMAC_ETH_IF_RGMII, ++ DPMAC_ETH_IF_SGMII, ++ DPMAC_ETH_IF_XGMII, ++ DPMAC_ETH_IF_QSGMII, ++ DPMAC_ETH_IF_XAUI, ++ DPMAC_ETH_IF_XFI ++}; ++ ++/** ++ * struct dpmac_cfg() - Structure representing DPMAC configuration ++ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, ++ * the MAC IDs are continuous. ++ * For example: 2 WRIOPs, 16 MACs in each: ++ * MAC IDs for the 1st WRIOP: 1-16, ++ * MAC IDs for the 2nd WRIOP: 17-32. ++ */ ++struct dpmac_cfg { ++ int mac_id; ++}; ++ ++/** ++ * dpmac_create() - Create the DPMAC object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cfg: Configuration structure ++ * @token: Returned token; use in subsequent API calls ++ * ++ * Create the DPMAC object, allocate required resources and ++ * perform required initialization. ++ * ++ * The object can be created either by declaring it in the ++ * DPL file, or by calling this function. ++ * This function returns a unique authentication token, ++ * associated with the specific object ID and the specific MC ++ * portal; this token must be used in all subsequent calls to ++ * this specific object. For objects that are created using the ++ * DPL file, call dpmac_open function to get an authentication ++ * token first. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_create(struct fsl_mc_io *mc_io, ++ const struct dpmac_cfg *cfg, ++ uint16_t *token); ++ ++/** ++ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpmac_destroy(struct fsl_mc_io *mc_io, uint16_t token); ++ ++/* DPMAC IRQ Index and Events */ ++ ++/* IRQ index */ ++#define DPMAC_IRQ_INDEX 0 ++/* IRQ event - indicates a change in link state */ ++#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 ++/* irq event - Indicates that the link state changed */ ++#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 ++ ++/** ++ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_addr: Address that must be written to ++ * signal a message-based interrupt ++ * @irq_val: Value to write into irq_addr address ++ * @user_irq_id: A user defined number associated with this IRQ ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint64_t irq_addr, ++ uint32_t irq_val, ++ int user_irq_id); ++ ++/** ++ * dpmac_get_irq() - Get IRQ information from the DPMAC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_addr: Returned address that must be written to ++ * signal the message-based interrupt ++ * @irq_val: Value to write into irq_addr address ++ * @user_irq_id: A user defined number associated with this IRQ ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ uint64_t *irq_addr, ++ uint32_t *irq_val, ++ int *user_irq_id); ++ ++/** ++ * dpmac_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dpmac_get_irq_enable() - Get overall interrupt state ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dpmac_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting IRQ ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dpmac_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dpmac_get_irq_status() - Get the current status of any pending interrupts. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_irq_status(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dpmac_clear_irq_status() - Clear a pending interrupt's status ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @irq_index: The interrupt index to configure ++ * @status: Bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dpmac_attr - Structure representing DPMAC attributes ++ * @id: DPMAC object ID ++ * @phy_id: PHY ID ++ * @link_type: link type ++ * @eth_if: Ethernet interface ++ * @max_rate: Maximum supported rate - in Mbps ++ * @version: DPMAC version ++ */ ++struct dpmac_attr { ++ int id; ++ int phy_id; ++ enum dpmac_link_type link_type; ++ enum dpmac_eth_if eth_if; ++ uint32_t max_rate; ++ /** ++ * struct version - Structure representing DPMAC version ++ * @major: DPMAC major version ++ * @minor: DPMAC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dpmac_get_attributes - Retrieve DPMAC attributes. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPMAC object ++ * @attr: Returned object's attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_attributes(struct fsl_mc_io *mc_io, ++ uint16_t token, ++ struct dpmac_attr *attr); ++ ++/** ++ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters ++ * @phy_addr: MDIO device address ++ * @reg: Address of the register within the Clause 45 PHY device from which data ++ * is to be read ++ * @data: Data read/write from/to MDIO ++ */ ++struct dpmac_mdio_cfg { ++ uint8_t phy_addr; ++ uint8_t reg; ++ uint16_t data; ++}; ++ ++/** ++ * dpmac_mdio_read() - Perform MDIO read transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_read(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++ ++/** ++ * dpmac_mdio_write() - Perform MDIO write transaction ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Structure with MDIO transaction parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_mdio_write(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_mdio_cfg *cfg); ++ ++/* DPMAC link configuration/state options */ ++ ++/* Enable auto-negotiation */ ++#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL ++/* Enable half-duplex mode */ ++#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL ++/* Enable pause frames */ ++#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL ++/* Enable a-symmetric pause frames */ ++#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++ ++/** ++ * struct dpmac_link_cfg - Structure representing DPMAC link configuration ++ * @rate: Link's rate - in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ */ ++struct dpmac_link_cfg { ++ uint32_t rate; ++ uint64_t options; ++}; ++ ++/** ++ * dpmac_get_link_cfg() - Get Ethernet link configuration ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @cfg: Returned structure with the link configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_link_cfg *cfg); ++ ++/** ++ * struct dpmac_link_state - DPMAC link configuration request ++ * @rate: Rate in Mbps ++ * @options: Enable/Disable DPMAC link cfg features (bitmap) ++ * @up: Link state ++ */ ++struct dpmac_link_state { ++ uint32_t rate; ++ uint64_t options; ++ int up; ++}; ++ ++/** ++ * dpmac_set_link_state() - Set the Ethernet link status ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @link_state: Link state configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmac_set_link_state(struct fsl_mc_io *mc_io, uint16_t token, ++ struct dpmac_link_state *link_state); ++ ++/** ++ * enum dpni_counter - DPNI counter types ++ * @DPMAC_CNT_ING_FRAME_64: counts 64-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-octet frame, good or bad. ++ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-octet frame and larger ++ * (up to max frame length specified), ++ * good or bad. ++ * @DPMAC_CNT_ING_FRAG: counts packet which is shorter than 64 octets received ++ * with a wrong CRC ++ * @DPMAC_CNT_ING_JABBER: counts packet longer than the maximum frame length ++ * specified, with a bad frame check sequence. ++ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped packet due to internal errors. ++ * Occurs when a receive FIFO overflows. ++ * Includes also packets truncated as a result of ++ * the receive FIFO overflow. ++ * @DPMAC_CNT_ING_ALIGN_ERR: counts frame with an alignment error ++ * (optional used for wrong SFD) ++ * @DPMAC_CNT_EGR_UNDERSIZED: counts packet transmitted that was less than 64 ++ * octets long with a good CRC. ++ * @DPMAC_CNT_ING_OVERSIZED: counts packet longer than the maximum frame length ++ * specified, with a good frame check sequence. ++ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frame (regular and PFC). ++ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frame transmitted ++ * (regular and PFC). ++ * @DPMAC_CNT_ING_BYTE: counts octet received except preamble for all valid ++ frames and valid pause frames. ++ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frame ++ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frame ++ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad packet received. ++ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frame ++ * @DPMAC_CNT_ING_ERR_FRAME: counts frame received with an error ++ * (except for undersized/fragment frame) ++ * @DPMAC_CNT_EGR_BYTE: counts octet transmitted except preamble for all valid ++ * frames and valid pause frames transmitted. ++ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frame ++ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frame ++ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frame ++ * @DPMAC_CNT_EGR_ERR_FRAME: counts frame transmitted with an error ++ * @DPMAC_CNT_ING_GOOD_FRAME: counts frame received without error, including ++ * pause frames. ++ */ ++enum dpmac_counter { ++ DPMAC_CNT_ING_FRAME_64, ++ DPMAC_CNT_ING_FRAME_127, ++ DPMAC_CNT_ING_FRAME_255, ++ DPMAC_CNT_ING_FRAME_511, ++ DPMAC_CNT_ING_FRAME_1023, ++ DPMAC_CNT_ING_FRAME_1518, ++ DPMAC_CNT_ING_FRAME_1519_MAX, ++ DPMAC_CNT_ING_FRAG, ++ DPMAC_CNT_ING_JABBER, ++ DPMAC_CNT_ING_FRAME_DISCARD, ++ DPMAC_CNT_ING_ALIGN_ERR, ++ DPMAC_CNT_EGR_UNDERSIZED, ++ DPMAC_CNT_ING_OVERSIZED, ++ DPMAC_CNT_ING_VALID_PAUSE_FRAME, ++ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, ++ DPMAC_CNT_ING_BYTE, ++ DPMAC_CNT_ING_MCAST_FRAME, ++ DPMAC_CNT_ING_BCAST_FRAME, ++ DPMAC_CNT_ING_ALL_FRAME, ++ DPMAC_CNT_ING_UCAST_FRAME, ++ DPMAC_CNT_ING_ERR_FRAME, ++ DPMAC_CNT_EGR_BYTE, ++ DPMAC_CNT_EGR_MCAST_FRAME, ++ DPMAC_CNT_EGR_BCAST_FRAME, ++ DPMAC_CNT_EGR_UCAST_FRAME, ++ DPMAC_CNT_EGR_ERR_FRAME, ++ DPMAC_CNT_ING_GOOD_FRAME ++}; ++ ++/** ++ * dpmac_get_counter() - Read a specific DPMAC counter ++ * @mc_io: Pointer to opaque I/O object ++ * @token: Token of DPMAC object ++ * @type: The requested counter ++ * @counter: Returned counter value ++ * ++ * Return: The requested counter; '0' otherwise. ++ */ ++int dpmac_get_counter(struct fsl_mc_io *mc_io, uint16_t token, ++ enum dpmac_counter type, ++ uint64_t *counter); ++ ++#endif /* __FSL_DPMAC_H */ +diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h +new file mode 100644 +index 0000000..d1c4588 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dpmng.h +@@ -0,0 +1,80 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPMNG_H ++#define __FSL_DPMNG_H ++ ++/* Management Complex General API ++ * Contains general API for the Management Complex firmware ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * struct mc_version ++ * @major: Major version number: incremented on API compatibility changes ++ * @minor: Minor version number: incremented on API additions (that are ++ * backward compatible); reset when major version is incremented ++ * @revision: Internal revision number: incremented on implementation changes ++ * and/or bug fixes that have no impact on API ++ */ ++struct mc_version { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t revision; ++}; ++ ++/** ++ * mc_get_version() - Retrieves the Management Complex firmware ++ * version information ++ * @mc_io: Pointer to opaque I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @mc_ver_info: Returned version information structure ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int mc_get_version(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ struct mc_version *mc_ver_info); ++ ++/** ++ * dpmng_get_container_id() - Get container ID associated with a given portal. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @container_id: Requested container ID ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpmng_get_container_id(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int *container_id); ++ ++#endif /* __FSL_DPMNG_H */ +diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h +new file mode 100644 +index 0000000..810ded0 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/dprc.h +@@ -0,0 +1,990 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef _FSL_DPRC_H ++#define _FSL_DPRC_H ++ ++#include "mc-cmd.h" ++ ++/* Data Path Resource Container API ++ * Contains DPRC API for managing and querying DPAA resources ++ */ ++ ++struct fsl_mc_io; ++ ++/** ++ * Set this value as the icid value in dprc_cfg structure when creating a ++ * container, in case the ICID is not selected by the user and should be ++ * allocated by the DPRC from the pool of ICIDs. ++ */ ++#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) ++ ++/** ++ * Set this value as the portal_id value in dprc_cfg structure when creating a ++ * container, in case the portal ID is not specifically selected by the ++ * user and should be allocated by the DPRC from the pool of portal ids. ++ */ ++#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) ++ ++/** ++ * dprc_open() - Open DPRC object for use ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @container_id: Container ID to open ++ * @token: Returned token of DPRC object ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Required before any operation on the object. ++ */ ++int dprc_open(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ int container_id, ++ uint16_t *token); ++ ++/** ++ * dprc_close() - Close the control session of the object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * ++ * After this function is called, no further operations are ++ * allowed on the object without opening a new control session. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_close(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token); ++ ++/** ++ * Container general options ++ * ++ * These options may be selected at container creation by the container creator ++ * and can be retrieved using dprc_get_attributes() ++ */ ++ ++/* Spawn Policy Option allowed - Indicates that the new container is allowed ++ * to spawn and have its own child containers. ++ */ ++#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 ++ ++/* General Container allocation policy - Indicates that the new container is ++ * allowed to allocate requested resources from its parent container; if not ++ * set, the container is only allowed to use resources in its own pools; Note ++ * that this is a container's global policy, but the parent container may ++ * override it and set specific quota per resource type. ++ */ ++#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 ++ ++/* Object initialization allowed - software context associated with this ++ * container is allowed to invoke object initialization operations. ++ */ ++#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 ++ ++/* Topology change allowed - software context associated with this ++ * container is allowed to invoke topology operations, such as attach/detach ++ * of network objects. ++ */ ++#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 ++ ++/* AIOP - Indicates that container belongs to AIOP. */ ++#define DPRC_CFG_OPT_AIOP 0x00000020 ++ ++/* IRQ Config - Indicates that the container allowed to configure its IRQs. */ ++#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 ++ ++/** ++ * struct dprc_cfg - Container configuration options ++ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free ++ * ICID value is allocated by the DPRC ++ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free ++ * portal ID is allocated by the DPRC ++ * @options: Combination of 'DPRC_CFG_OPT_' options ++ * @label: Object's label ++ */ ++struct dprc_cfg { ++ uint16_t icid; ++ int portal_id; ++ uint64_t options; ++ char label[16]; ++}; ++ ++/** ++ * dprc_create_container() - Create child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @cfg: Child container configuration ++ * @child_container_id: Returned child container ID ++ * @child_portal_offset: Returned child portal offset from MC portal base ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_create_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_cfg *cfg, ++ int *child_container_id, ++ uint64_t *child_portal_offset); ++ ++/** ++ * dprc_destroy_container() - Destroy child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the container to destroy ++ * ++ * This function terminates the child container, so following this call the ++ * child container ID becomes invalid. ++ * ++ * Notes: ++ * - All resources and objects of the destroyed container are returned to the ++ * parent container or destroyed if were created be the destroyed container. ++ * - This function destroy all the child containers of the specified ++ * container prior to destroying the container itself. ++ * ++ * warning: Only the parent container is allowed to destroy a child policy ++ * Container 0 can't be destroyed ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ */ ++int dprc_destroy_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id); ++ ++/** ++ * dprc_reset_container - Reset child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the container to reset ++ * ++ * In case a software context crashes or becomes non-responsive, the parent ++ * may wish to reset its resources container before the software context is ++ * restarted. ++ * ++ * This routine informs all objects assigned to the child container that the ++ * container is being reset, so they may perform any cleanup operations that are ++ * needed. All objects handles that were owned by the child container shall be ++ * closed. ++ * ++ * Note that such request may be submitted even if the child software context ++ * has not crashed, but the resulting object cleanup operations will not be ++ * aware of that. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_reset_container(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id); ++ ++/* IRQ */ ++ ++/* IRQ index */ ++#define DPRC_IRQ_INDEX 0 ++ ++/* Number of dprc's IRQs */ ++#define DPRC_NUM_OF_IRQS 1 ++ ++/* DPRC IRQ events */ ++ ++/* IRQ event - Indicates that a new object added to the container */ ++#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 ++ ++/* IRQ event - Indicates that an object was removed from the container */ ++#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 ++ ++/* IRQ event - Indicates that resources added to the container */ ++#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 ++ ++/* IRQ event - Indicates that resources removed from the container */ ++#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 ++ ++/* IRQ event - Indicates that one of the descendant containers that opened by ++ * this container is destroyed ++ */ ++#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 ++ ++/* IRQ event - Indicates that on one of the container's opened object is ++ * destroyed ++ */ ++#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 ++ ++/* Irq event - Indicates that object is created at the container */ ++#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 ++ ++/** ++ * struct dprc_irq_cfg - IRQ configuration ++ * @paddr: Address that must be written to signal a message-based interrupt ++ * @val: Value to write into irq_addr address ++ * @irq_num: A user defined number associated with this IRQ ++ */ ++struct dprc_irq_cfg { ++ uint64_t paddr; ++ uint32_t val; ++ int irq_num; ++}; ++ ++/** ++ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: Identifies the interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_irq() - Get IRQ information from the DPRC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_set_irq_enable() - Set overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @en: Interrupt state - enable = 1, disable = 0 ++ * ++ * Allows GPP software to control when interrupts are generated. ++ * Each interrupt can have up to 32 causes. The enable/disable control's the ++ * overall interrupt state. if the interrupt is disabled no causes will cause ++ * an interrupt. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t en); ++ ++/** ++ * dprc_get_irq_enable() - Get overall interrupt state. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @en: Returned interrupt state - enable = 1, disable = 0 ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_enable(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint8_t *en); ++ ++/** ++ * dprc_set_irq_mask() - Set interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @mask: event mask to trigger interrupt; ++ * each bit: ++ * 0 = ignore event ++ * 1 = consider event for asserting irq ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t mask); ++ ++/** ++ * dprc_get_irq_mask() - Get interrupt mask. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @mask: Returned event mask to trigger interrupt ++ * ++ * Every interrupt can have up to 32 causes and the interrupt model supports ++ * masking/unmasking each cause independently ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_mask(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *mask); ++ ++/** ++ * dprc_get_irq_status() - Get the current status of any pending interrupts. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @status: Returned interrupts status - one bit per cause: ++ * 0 = no interrupt pending ++ * 1 = interrupt pending ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t *status); ++ ++/** ++ * dprc_clear_irq_status() - Clear a pending interrupt's status ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @irq_index: The interrupt index to configure ++ * @status: bits to clear (W1C) - one bit per cause: ++ * 0 = don't change ++ * 1 = clear status bit ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_clear_irq_status(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ uint8_t irq_index, ++ uint32_t status); ++ ++/** ++ * struct dprc_attributes - Container attributes ++ * @container_id: Container's ID ++ * @icid: Container's ICID ++ * @portal_id: Container's portal ID ++ * @options: Container's options as set at container's creation ++ * @version: DPRC version ++ */ ++struct dprc_attributes { ++ int container_id; ++ uint16_t icid; ++ int portal_id; ++ uint64_t options; ++ /** ++ * struct version - DPRC version ++ * @major: DPRC major version ++ * @minor: DPRC minor version ++ */ ++ struct { ++ uint16_t major; ++ uint16_t minor; ++ } version; ++}; ++ ++/** ++ * dprc_get_attributes() - Obtains container attributes ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @attributes: Returned container attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_attributes(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ struct dprc_attributes *attributes); ++ ++/** ++ * dprc_set_res_quota() - Set allocation policy for a specific resource/object ++ * type in a child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @type: Resource/object type ++ * @quota: Sets the maximum number of resources of the selected type ++ * that the child container is allowed to allocate from its parent; ++ * when quota is set to -1, the policy is the same as container's ++ * general policy. ++ * ++ * Allocation policy determines whether or not a container may allocate ++ * resources from its parent. Each container has a 'global' allocation policy ++ * that is set when the container is created. ++ * ++ * This function sets allocation policy for a specific resource type. ++ * The default policy for all resource types matches the container's 'global' ++ * allocation policy. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ * @warning Only the parent container is allowed to change a child policy. ++ */ ++int dprc_set_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t quota); ++ ++/** ++ * dprc_get_res_quota() - Gets the allocation policy of a specific ++ * resource/object type in a child container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @type: resource/object type ++ * @quota: Returnes the maximum number of resources of the selected type ++ * that the child container is allowed to allocate from the parent; ++ * when quota is set to -1, the policy is the same as container's ++ * general policy. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_quota(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ char *type, ++ uint16_t *quota); ++ ++/* Resource request options */ ++ ++/* Explicit resource ID request - The requested objects/resources ++ * are explicit and sequential (in case of resources). ++ * The base ID is given at res_req at base_align field ++ */ ++#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 ++ ++/* Aligned resources request - Relevant only for resources ++ * request (and not objects). Indicates that resources base ID should be ++ * sequential and aligned to the value given at dprc_res_req base_align field ++ */ ++#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 ++ ++/* Plugged Flag - Relevant only for object assignment request. ++ * Indicates that after all objects assigned. An interrupt will be invoked at ++ * the relevant GPP. The assigned object will be marked as plugged. ++ * plugged objects can't be assigned from their container ++ */ ++#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 ++ ++/** ++ * struct dprc_res_req - Resource request descriptor, to be used in assignment ++ * or un-assignment of resources and objects. ++ * @type: Resource/object type: Represent as a NULL terminated string. ++ * This string may received by using dprc_get_pool() to get resource ++ * type and dprc_get_obj() to get object type; ++ * Note: it is not possible to assign/un-assign DPRC objects ++ * @num: Number of resources ++ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options ++ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT ++ * is set at option), this field represents the required base ID ++ * for resource allocation; In case of aligned assignment ++ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field ++ * indicates the required alignment for the resource ID(s) - ++ * use 0 if there is no alignment or explicit ID requirements ++ */ ++struct dprc_res_req { ++ char type[16]; ++ uint32_t num; ++ uint32_t options; ++ int id_base_align; ++}; ++ ++/** ++ * dprc_assign() - Assigns objects or resource to a child container. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @container_id: ID of the child container ++ * @res_req: Describes the type and amount of resources to ++ * assign to the given container ++ * ++ * Assignment is usually done by a parent (this DPRC) to one of its child ++ * containers. ++ * ++ * According to the DPRC allocation policy, the assigned resources may be taken ++ * (allocated) from the container's ancestors, if not enough resources are ++ * available in the container itself. ++ * ++ * The type of assignment depends on the dprc_res_req options, as follows: ++ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have ++ * the explicit base ID specified at the id_base_align field of res_req. ++ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be ++ * aligned to the value given at id_base_align field of res_req. ++ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, ++ * and indicates that the object must be set to the plugged state. ++ * ++ * A container may use this function with its own ID in order to change a ++ * object state to plugged or unplugged. ++ * ++ * If IRQ information has been set in the child DPRC, it will signal an ++ * interrupt following every change in its object assignment. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_assign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int container_id, ++ struct dprc_res_req *res_req); ++ ++/** ++ * dprc_unassign() - Un-assigns objects or resources from a child container ++ * and moves them into this (parent) DPRC. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @child_container_id: ID of the child container ++ * @res_req: Describes the type and amount of resources to un-assign from ++ * the child container ++ * ++ * Un-assignment of objects can succeed only if the object is not in the ++ * plugged or opened state. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_unassign(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int child_container_id, ++ struct dprc_res_req *res_req); ++ ++/** ++ * dprc_get_pool_count() - Get the number of dprc's pools ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @mc_io: Pointer to MC portal's I/O object ++ * @token: Token of DPRC object ++ * @pool_count: Returned number of resource pools in the dprc ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_pool_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *pool_count); ++ ++/** ++ * dprc_get_pool() - Get the type (string) of a certain dprc's pool ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @pool_index: Index of the pool to be queried (< pool_count) ++ * @type: The type of the pool ++ * ++ * The pool types retrieved one by one by incrementing ++ * pool_index up to (not including) the value of pool_count returned ++ * from dprc_get_pool_count(). dprc_get_pool_count() must ++ * be called prior to dprc_get_pool(). ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_pool(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int pool_index, ++ char *type); ++ ++/** ++ * dprc_get_obj_count() - Obtains the number of objects in the DPRC ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_count: Number of objects assigned to the DPRC ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int *obj_count); ++ ++/* Objects Attributes Flags */ ++ ++/* Opened state - Indicates that an object is open by at least one owner */ ++#define DPRC_OBJ_STATE_OPEN 0x00000001 ++/* Plugged state - Indicates that the object is plugged */ ++#define DPRC_OBJ_STATE_PLUGGED 0x00000002 ++ ++/** ++ * Shareability flag - Object flag indicating no memory shareability. ++ * the object generates memory accesses that are non coherent with other ++ * masters; ++ * user is responsible for proper memory handling through IOMMU configuration. ++ */ ++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 ++ ++/** ++ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() ++ * @type: Type of object: NULL terminated string ++ * @id: ID of logical object resource ++ * @vendor: Object vendor identifier ++ * @ver_major: Major version number ++ * @ver_minor: Minor version number ++ * @irq_count: Number of interrupts supported by the object ++ * @region_count: Number of mappable regions supported by the object ++ * @state: Object state: combination of DPRC_OBJ_STATE_ states ++ * @label: Object label ++ * @flags: Object's flags ++ */ ++struct dprc_obj_desc { ++ char type[16]; ++ int id; ++ uint16_t vendor; ++ uint16_t ver_major; ++ uint16_t ver_minor; ++ uint8_t irq_count; ++ uint8_t region_count; ++ uint32_t state; ++ char label[16]; ++ uint16_t flags; ++}; ++ ++/** ++ * dprc_get_obj() - Get general information on an object ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_index: Index of the object to be queried (< obj_count) ++ * @obj_desc: Returns the requested object descriptor ++ * ++ * The object descriptors are retrieved one by one by incrementing ++ * obj_index up to (not including) the value of obj_count returned ++ * from dprc_get_obj_count(). dprc_get_obj_count() must ++ * be called prior to dprc_get_obj(). ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ int obj_index, ++ struct dprc_obj_desc *obj_desc); ++ ++/** ++ * dprc_get_obj_desc() - Get object descriptor. ++ * ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: The type of the object to get its descriptor. ++ * @obj_id: The id of the object to get its descriptor ++ * @obj_desc: The returned descriptor to fill and return to the user ++ * ++ * Return: '0' on Success; Error code otherwise. ++ * ++ */ ++int dprc_get_obj_desc(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ struct dprc_obj_desc *obj_desc); ++ ++/** ++ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Type of the object to set its IRQ ++ * @obj_id: ID of the object to set its IRQ ++ * @irq_index: The interrupt index to configure ++ * @irq_cfg: IRQ configuration ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_obj_irq() - Get IRQ information from object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Type od the object to get its IRQ ++ * @obj_id: ID of the object to get its IRQ ++ * @irq_index: The interrupt index to configure ++ * @type: Interrupt type: 0 represents message interrupt ++ * type (both irq_addr and irq_val are valid) ++ * @irq_cfg: The returned IRQ attributes ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_irq(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t irq_index, ++ int *type, ++ struct dprc_irq_cfg *irq_cfg); ++ ++/** ++ * dprc_get_res_count() - Obtains the number of free resources that are assigned ++ * to this container, by pool type ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @type: pool type ++ * @res_count: Returned number of free resources of the given ++ * resource type that are assigned to this DPRC ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_count(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ int *res_count); ++ ++/** ++ * enum dprc_iter_status - Iteration status ++ * @DPRC_ITER_STATUS_FIRST: Perform first iteration ++ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed ++ * @DPRC_ITER_STATUS_LAST: Indicates last iteration ++ */ ++enum dprc_iter_status { ++ DPRC_ITER_STATUS_FIRST = 0, ++ DPRC_ITER_STATUS_MORE = 1, ++ DPRC_ITER_STATUS_LAST = 2 ++}; ++ ++/** ++ * struct dprc_res_ids_range_desc - Resource ID range descriptor ++ * @base_id: Base resource ID of this range ++ * @last_id: Last resource ID of this range ++ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at ++ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, ++ * additional iterations are needed, until the returned marker is ++ * DPRC_ITER_STATUS_LAST ++ */ ++struct dprc_res_ids_range_desc { ++ int base_id; ++ int last_id; ++ enum dprc_iter_status iter_status; ++}; ++ ++/** ++ * dprc_get_res_ids() - Obtains IDs of free resources in the container ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @type: pool type ++ * @range_desc: range descriptor ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_res_ids(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *type, ++ struct dprc_res_ids_range_desc *range_desc); ++ ++/* Region flags */ ++/* Cacheable - Indicates that region should be mapped as cacheable */ ++#define DPRC_REGION_CACHEABLE 0x00000001 ++ ++/** ++ * enum dprc_region_type - Region type ++ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region ++ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region ++ */ ++enum dprc_region_type { ++ DPRC_REGION_TYPE_MC_PORTAL, ++ DPRC_REGION_TYPE_QBMAN_PORTAL ++}; ++ ++/** ++ * struct dprc_region_desc - Mappable region descriptor ++ * @base_offset: Region offset from region's base address. ++ * For DPMCP and DPRC objects, region base is offset from SoC MC portals ++ * base address; For DPIO, region base is offset from SoC QMan portals ++ * base address ++ * @size: Region size (in bytes) ++ * @flags: Region attributes ++ * @type: Portal region type ++ */ ++struct dprc_region_desc { ++ uint32_t base_offset; ++ uint32_t size; ++ uint32_t flags; ++ enum dprc_region_type type; ++}; ++ ++/** ++ * dprc_get_obj_region() - Get region information for a specified object. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Object type as returned in dprc_get_obj() ++ * @obj_id: Unique object instance as returned in dprc_get_obj() ++ * @region_index: The specific region to query ++ * @region_desc: Returns the requested region descriptor ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_get_obj_region(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ uint8_t region_index, ++ struct dprc_region_desc *region_desc); ++ ++/** ++ * dprc_set_obj_label() - Set object label. ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @obj_type: Object's type ++ * @obj_id: Object's ID ++ * @label: The required label. The maximum length is 16 chars. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_set_obj_label(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ char *obj_type, ++ int obj_id, ++ char *label); ++ ++/** ++ * struct dprc_endpoint - Endpoint description for link connect/disconnect ++ * operations ++ * @type: Endpoint object type: NULL terminated string ++ * @id: Endpoint object ID ++ * @if_id: Interface ID; should be set for endpoints with multiple ++ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 ++ */ ++struct dprc_endpoint { ++ char type[16]; ++ int id; ++ int if_id; ++}; ++ ++/** ++ * struct dprc_connection_cfg - Connection configuration. ++ * Used for virtual connections only ++ * @committed_rate: Committed rate (Mbits/s) ++ * @max_rate: Maximum rate (Mbits/s) ++ */ ++struct dprc_connection_cfg { ++ uint32_t committed_rate; ++ uint32_t max_rate; ++}; ++ ++/** ++ * dprc_connect() - Connect two endpoints to create a network link between them ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @endpoint1: Endpoint 1 configuration parameters ++ * @endpoint2: Endpoint 2 configuration parameters ++ * @cfg: Connection configuration. The connection configuration is ignored for ++ * connections made to DPMAC objects, where rate is retrieved from the ++ * MAC configuration. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_connect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ const struct dprc_endpoint *endpoint2, ++ const struct dprc_connection_cfg *cfg); ++ ++/** ++ * dprc_disconnect() - Disconnect one endpoint to remove its network connection ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPRC object ++ * @endpoint: Endpoint configuration parameters ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dprc_disconnect(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint); ++ ++/** ++* dprc_get_connection() - Get connected endpoint and link status if connection ++* exists. ++* @mc_io: Pointer to MC portal's I/O object ++* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++* @token: Token of DPRC object ++* @endpoint1: Endpoint 1 configuration parameters ++* @endpoint2: Returned endpoint 2 configuration parameters ++* @state: Returned link state: ++* 1 - link is up; ++* 0 - link is down; ++* -1 - no connection (endpoint2 information is irrelevant) ++* ++* Return: '0' on Success; -ENAVAIL if connection does not exist. ++*/ ++int dprc_get_connection(struct fsl_mc_io *mc_io, ++ uint32_t cmd_flags, ++ uint16_t token, ++ const struct dprc_endpoint *endpoint1, ++ struct dprc_endpoint *endpoint2, ++ int *state); ++ ++#endif /* _FSL_DPRC_H */ ++ +diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h +new file mode 100644 +index 0000000..3e9af59 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h +@@ -0,0 +1,774 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_FD_H ++#define __FSL_DPAA2_FD_H ++ ++/** ++ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 ++ * ++ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. ++ * Frames can be enqueued and dequeued to Frame Queues which are consumed ++ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) ++ * ++ * There are three types of frames: Single, Scatter Gather and Frame Lists. ++ * ++ * The set of APIs in this file must be used to create, manipulate and ++ * query Frame Descriptor. ++ * ++ */ ++ ++/** ++ * struct dpaa2_fd - Place-holder for FDs. ++ * @words: for easier/faster copying the whole FD structure. ++ * @addr_lo: the lower 32 bits of the address in FD. ++ * @addr_hi: the upper 32 bits of the address in FD. ++ * @len: the length field in FD. ++ * @bpid_offset: represent the bpid and offset fields in FD ++ * @frc: frame context ++ * @ctrl: the 32bit control bits including dd, sc,... va, err. ++ * @flc_lo: the lower 32bit of flow context. ++ * @flc_hi: the upper 32bits of flow context. ++ * ++ * This structure represents the basic Frame Descriptor used in the system. ++ * We represent it via the simplest form that we need for now. Different ++ * overlays may be needed to support different options, etc. (It is impractical ++ * to define One True Struct, because the resulting encoding routines (lots of ++ * read-modify-writes) would be worst-case performance whether or not ++ * circumstances required them.) ++ */ ++struct dpaa2_fd { ++ union { ++ u32 words[8]; ++ struct dpaa2_fd_simple { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ /* offset in the MS 16 bits, BPID in the LS 16 bits */ ++ u32 bpid_offset; ++ u32 frc; /* frame context */ ++ /* "err", "va", "cbmt", "asal", [...] */ ++ u32 ctrl; ++ /* flow context */ ++ u32 flc_lo; ++ u32 flc_hi; ++ } simple; ++ }; ++}; ++ ++enum dpaa2_fd_format { ++ dpaa2_fd_single = 0, ++ dpaa2_fd_list, ++ dpaa2_fd_sg ++}; ++ ++/* Accessors for SG entry fields ++ * ++ * These setters and getters assume little endian format. For converting ++ * between LE and cpu endianness, the specific conversion functions must be ++ * called before the SGE contents are accessed by the core (on Rx), ++ * respectively before the SG table is sent to hardware (on Tx) ++ */ ++ ++/** ++ * dpaa2_fd_get_addr() - get the addr field of frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the address in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32) ++ + fd->simple.addr_lo); ++} ++ ++/** ++ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @addr: the address needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) ++{ ++ fd->simple.addr_hi = upper_32_bits(addr); ++ fd->simple.addr_lo = lower_32_bits(addr); ++} ++ ++/** ++ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the frame context field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) ++{ ++ return fd->simple.frc; ++} ++ ++/** ++ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * @frc: the frame context needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) ++{ ++ fd->simple.frc = frc; ++} ++ ++/** ++ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the flow context in the frame descriptor. ++ */ ++static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) ++{ ++ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) + ++ fd->simple.flc_lo); ++} ++ ++/** ++ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @flc_addr: the flow context needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) ++{ ++ fd->simple.flc_hi = upper_32_bits(flc_addr); ++ fd->simple.flc_lo = lower_32_bits(flc_addr); ++} ++ ++/** ++ * dpaa2_fd_get_len() - Get the length in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the length field in the frame descriptor. ++ */ ++static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) ++{ ++ return fd->simple.len; ++} ++ ++/** ++ * dpaa2_fd_set_len() - Set the length field of frame descriptor ++ * @fd: the given frame descriptor. ++ * @len: the length needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) ++{ ++ fd->simple.len = len; ++} ++ ++/** ++ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the offset. ++ */ ++static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) ++{ ++ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @offset: the offset needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) ++{ ++ fd->simple.bpid_offset &= 0xF000FFFF; ++ fd->simple.bpid_offset |= (u32)offset << 16; ++} ++ ++/** ++ * dpaa2_fd_get_format() - Get the format field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_fd_format dpaa2_fd_get_format( ++ const struct dpaa2_fd *fd) ++{ ++ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_fd_set_format() - Set the format field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @format: the format needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, ++ enum dpaa2_fd_format format) ++{ ++ fd->simple.bpid_offset &= 0xCFFFFFFF; ++ fd->simple.bpid_offset |= (u32)format << 28; ++} ++ ++/** ++ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor ++ * @fd: the given frame descriptor. ++ * ++ * Return the bpid. ++ */ ++static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) ++{ ++ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF); ++} ++ ++/** ++ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor ++ * ++ * @fd: the given frame descriptor. ++ * @bpid: the bpid needs to be set in frame descriptor. ++ */ ++static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) ++{ ++ fd->simple.bpid_offset &= 0xFFFF0000; ++ fd->simple.bpid_offset |= (u32)bpid; ++} ++ ++/** ++ * struct dpaa2_sg_entry - the scatter-gathering structure ++ * @addr_lo: the lower 32bit of address ++ * @addr_hi: the upper 32bit of address ++ * @len: the length in this sg entry. ++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. ++ */ ++struct dpaa2_sg_entry { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ u32 bpid_offset; ++}; ++ ++enum dpaa2_sg_format { ++ dpaa2_sg_single = 0, ++ dpaa2_sg_frame_data, ++ dpaa2_sg_sgt_ext ++}; ++ ++/** ++ * dpaa2_sg_get_addr() - Get the address from SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the address. ++ */ ++static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) ++{ ++ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo); ++} ++ ++/** ++ * dpaa2_sg_set_addr() - Set the address in SG entry ++ * @sg: the given scatter-gathering object. ++ * @addr: the address to be set. ++ */ ++static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) ++{ ++ sg->addr_hi = upper_32_bits(addr); ++ sg->addr_lo = lower_32_bits(addr); ++} ++ ++ ++static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) ++{ ++ return (sg->bpid_offset >> 30) & 0x1; ++} ++ ++/** ++ * dpaa2_sg_get_len() - Get the length in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the length. ++ */ ++static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) ++{ ++ if (dpaa2_sg_short_len(sg)) ++ return sg->len & 0x1FFFF; ++ return sg->len; ++} ++ ++/** ++ * dpaa2_sg_set_len() - Set the length in SG entry ++ * @sg: the given scatter-gathering object. ++ * @len: the length to be set. ++ */ ++static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) ++{ ++ sg->len = len; ++} ++ ++/** ++ * dpaa2_sg_get_offset() - Get the offset in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the offset. ++ */ ++static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) ++{ ++ return (u16)(sg->bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_sg_set_offset() - Set the offset in SG entry ++ * @sg: the given scatter-gathering object. ++ * @offset: the offset to be set. ++ */ ++static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, ++ u16 offset) ++{ ++ sg->bpid_offset &= 0xF000FFFF; ++ sg->bpid_offset |= (u32)offset << 16; ++} ++ ++/** ++ * dpaa2_sg_get_format() - Get the SG format in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the format. ++ */ ++static inline enum dpaa2_sg_format ++ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) ++{ ++ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_sg_set_format() - Set the SG format in SG entry ++ * @sg: the given scatter-gathering object. ++ * @format: the format to be set. ++ */ ++static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, ++ enum dpaa2_sg_format format) ++{ ++ sg->bpid_offset &= 0xCFFFFFFF; ++ sg->bpid_offset |= (u32)format << 28; ++} ++ ++/** ++ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return the bpid. ++ */ ++static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) ++{ ++ return (u16)(sg->bpid_offset & 0x3FFF); ++} ++ ++/** ++ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry ++ * @sg: the given scatter-gathering object. ++ * @bpid: the bpid to be set. ++ */ ++static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) ++{ ++ sg->bpid_offset &= 0xFFFFC000; ++ sg->bpid_offset |= (u32)bpid; ++} ++ ++/** ++ * dpaa2_sg_is_final() - Check final bit in SG entry ++ * @sg: the given scatter-gathering object. ++ * ++ * Return bool. ++ */ ++static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) ++{ ++ return !!(sg->bpid_offset >> 31); ++} ++ ++/** ++ * dpaa2_sg_set_final() - Set the final bit in SG entry ++ * @sg: the given scatter-gathering object. ++ * @final: the final boolean to be set. ++ */ ++static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) ++{ ++ sg->bpid_offset &= 0x7FFFFFFF; ++ sg->bpid_offset |= (u32)final << 31; ++} ++ ++/* Endianness conversion helper functions ++ * The accelerator drivers which construct / read scatter gather entries ++ * need to call these in order to account for endianness mismatches between ++ * hardware and cpu ++ */ ++#ifdef __BIG_ENDIAN ++/** ++ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu ++ * format little endian format. ++ * @sg: the given scatter gather entry. ++ */ ++static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg) ++{ ++ uint32_t *p = (uint32_t *)sg; ++ int i; ++ ++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) ++ cpu_to_le32s(p++); ++} ++ ++/** ++ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian ++ * format to native cpu format. ++ * @sg: the given scatter gather entry. ++ */ ++static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg) ++{ ++ uint32_t *p = (uint32_t *)sg; ++ int i; ++ ++ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) ++ le32_to_cpus(p++); ++} ++#else ++#define dpaa2_sg_cpu_to_le(sg) ++#define dpaa2_sg_le_to_cpu(sg) ++#endif /* __BIG_ENDIAN */ ++ ++ ++/** ++ * struct dpaa2_fl_entry - structure for frame list entry. ++ * @addr_lo: the lower 32bit of address ++ * @addr_hi: the upper 32bit of address ++ * @len: the length in this sg entry. ++ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. ++ * @frc: frame context ++ * @ctrl: the 32bit control bits including dd, sc,... va, err. ++ * @flc_lo: the lower 32bit of flow context. ++ * @flc_hi: the upper 32bits of flow context. ++ * ++ * Frame List Entry (FLE) ++ * Identical to dpaa2_fd.simple layout, but some bits are different ++ */ ++struct dpaa2_fl_entry { ++ u32 addr_lo; ++ u32 addr_hi; ++ u32 len; ++ u32 bpid_offset; ++ u32 frc; ++ u32 ctrl; ++ u32 flc_lo; ++ u32 flc_hi; ++}; ++ ++enum dpaa2_fl_format { ++ dpaa2_fl_single = 0, ++ dpaa2_fl_res, ++ dpaa2_fl_sg ++}; ++ ++/** ++ * dpaa2_fl_get_addr() - Get address in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return address for the get function. ++ */ ++static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo); ++} ++ ++/** ++ * dpaa2_fl_set_addr() - Set the address in the frame list entry ++ * @fle: the given frame list entry. ++ * @addr: the address needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, ++ dma_addr_t addr) ++{ ++ fle->addr_hi = upper_32_bits(addr); ++ fle->addr_lo = lower_32_bits(addr); ++} ++ ++/** ++ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return flow context for the get function. ++ */ ++static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) ++{ ++ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo); ++} ++ ++/** ++ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry ++ * @fle: the given frame list entry. ++ * @flc_addr: the flow context address needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, ++ dma_addr_t flc_addr) ++{ ++ fle->flc_hi = upper_32_bits(flc_addr); ++ fle->flc_lo = lower_32_bits(flc_addr); ++} ++ ++/** ++ * dpaa2_fl_get_len() - Get the length in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return length for the get function. ++ */ ++static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) ++{ ++ return fle->len; ++} ++ ++/** ++ * dpaa2_fl_set_len() - Set the length in the frame list entry ++ * @fle: the given frame list entry. ++ * @len: the length needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) ++{ ++ fle->len = len; ++} ++ ++/** ++ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return offset for the get function. ++ */ ++static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) ++{ ++ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF; ++} ++ ++/** ++ * dpaa2_fl_set_offset() - Set the offset in the frame list entry ++ * @fle: the given frame list entry. ++ * @offset: the offset needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, ++ uint16_t offset) ++{ ++ fle->bpid_offset &= 0xF000FFFF; ++ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16; ++} ++ ++/** ++ * dpaa2_fl_get_format() - Get the format in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return frame list format for the get function. ++ */ ++static inline enum dpaa2_fl_format dpaa2_fl_get_format( ++ const struct dpaa2_fl_entry *fle) ++{ ++ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3); ++} ++ ++/** ++ * dpaa2_fl_set_format() - Set the format in the frame list entry ++ * @fle: the given frame list entry. ++ * @format: the frame list format needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, ++ enum dpaa2_fl_format format) ++{ ++ fle->bpid_offset &= 0xCFFFFFFF; ++ fle->bpid_offset |= (u32)(format & 0x3) << 28; ++} ++ ++/** ++ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry ++ * @fle: the given frame list entry. ++ * ++ * Return bpid for the get function. ++ */ ++static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) ++{ ++ return (uint16_t)(fle->bpid_offset & 0x3FFF); ++} ++ ++/** ++ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry ++ * @fle: the given frame list entry. ++ * @bpid: the buffer pool id needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid) ++{ ++ fle->bpid_offset &= 0xFFFFC000; ++ fle->bpid_offset |= (u32)bpid; ++} ++ ++/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list. ++ * @fle: the given frame list entry. ++ * ++ * Return final bit settting. ++ */ ++static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) ++{ ++ return !!(fle->bpid_offset >> 31); ++} ++ ++/** ++ * dpaa2_fl_set_final() - Set the final bit in the frame list entry ++ * @fle: the given frame list entry. ++ * @final: the final bit needs to be set. ++ * ++ */ ++static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) ++{ ++ fle->bpid_offset &= 0x7FFFFFFF; ++ fle->bpid_offset |= (u32)final << 31; ++} ++ ++/** ++ * struct dpaa2_dq - the qman result structure ++ * @dont_manipulate_directly: the 16 32bit data to represent the whole ++ * possible qman dequeue result. ++ * ++ * When frames are dequeued, the FDs show up inside "dequeue" result structures ++ * (if at all, not all dequeue results contain valid FDs). This structure type ++ * is intentionally defined without internal detail, and the only reason it ++ * isn't declared opaquely (without size) is to allow the user to provide ++ * suitably-sized (and aligned) memory for these entries. ++ */ ++struct dpaa2_dq { ++ uint32_t dont_manipulate_directly[16]; ++}; ++ ++/* Parsing frame dequeue results */ ++/* FQ empty */ ++#define DPAA2_DQ_STAT_FQEMPTY 0x80 ++/* FQ held active */ ++#define DPAA2_DQ_STAT_HELDACTIVE 0x40 ++/* FQ force eligible */ ++#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 ++/* Valid frame */ ++#define DPAA2_DQ_STAT_VALIDFRAME 0x10 ++/* FQ ODP enable */ ++#define DPAA2_DQ_STAT_ODPVALID 0x04 ++/* Volatile dequeue */ ++#define DPAA2_DQ_STAT_VOLATILE 0x02 ++/* volatile dequeue command is expired */ ++#define DPAA2_DQ_STAT_EXPIRED 0x01 ++ ++/** ++ * dpaa2_dq_flags() - Get the stat field of dequeue response ++ * @dq: the dequeue result. ++ */ ++uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull ++ * command. ++ * @dq: the dequeue result. ++ * ++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. ++ */ ++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) ++{ ++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); ++} ++ ++/** ++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. ++ * @dq: the dequeue result. ++ * ++ * Return boolean. ++ */ ++static inline int dpaa2_dq_is_pull_complete( ++ const struct dpaa2_dq *dq) ++{ ++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); ++} ++ ++/** ++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response ++ * seqnum is valid only if VALIDFRAME flag is TRUE ++ * @dq: the dequeue result. ++ * ++ * Return seqnum. ++ */ ++uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response ++ * odpid is valid only if ODPVAILD flag is TRUE. ++ * @dq: the dequeue result. ++ * ++ * Return odpid. ++ */ ++uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fqid() - Get the fqid in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return fqid. ++ */ ++uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the byte count remaining in the FQ. ++ */ ++uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame count remaining in the FQ. ++ */ ++uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame queue context. ++ */ ++uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq); ++ ++/** ++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response ++ * @dq: the dequeue result. ++ * ++ * Return the frame descriptor. ++ */ ++const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq); ++ ++#endif /* __FSL_DPAA2_FD_H */ +diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h +new file mode 100644 +index 0000000..6ea2ff9 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h +@@ -0,0 +1,619 @@ ++/* Copyright 2014 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Freescale Semiconductor nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY ++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY ++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_DPAA2_IO_H ++#define __FSL_DPAA2_IO_H ++ ++#include "fsl_dpaa2_fd.h" ++ ++struct dpaa2_io; ++struct dpaa2_io_store; ++ ++/** ++ * DOC: DPIO Service Management ++ * ++ * The DPIO service provides APIs for users to interact with the datapath ++ * by enqueueing and dequeing frame descriptors. ++ * ++ * The following set of APIs can be used to enqueue and dequeue frames ++ * as well as producing notification callbacks when data is available ++ * for dequeue. ++ */ ++ ++/** ++ * struct dpaa2_io_desc - The DPIO descriptor. ++ * @receives_notifications: Use notificaton mode. ++ * @has_irq: use irq-based proessing. ++ * @will_poll: use poll processing. ++ * @has_8prio: set for channel with 8 priority WQs. ++ * @cpu: the cpu index that at least interrupt handlers will execute on. ++ * @stash_affinity: the stash affinity for this portal favour 'cpu' ++ * @regs_cena: the cache enabled regs. ++ * @regs_cinh: the cache inhibited regs. ++ * @dpio_id: The dpio index. ++ * @qman_version: the qman version ++ * ++ * Describe the attributes and features of the DPIO object. ++ */ ++struct dpaa2_io_desc { ++ /* non-zero iff the DPIO has a channel */ ++ int receives_notifications; ++ /* non-zero if the DPIO portal interrupt is handled. If so, the ++ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */ ++ int has_irq; ++ /* non-zero if the caller/OS is prepared to called the ++ * dpaa2_io_service_poll() routine as part of its run-to-completion (or ++ * scheduling) loop. If so, the DPIO service may dynamically switch some ++ * of its processing between polling-based and irq-based. It is illegal ++ * combination to have (!has_irq && !will_poll). */ ++ int will_poll; ++ /* ignored unless 'receives_notifications'. Non-zero iff the channel has ++ * 8 priority WQs, otherwise the channel has 2. */ ++ int has_8prio; ++ /* the cpu index that at least interrupt handlers will execute on. And ++ * if 'stash_affinity' is non-zero, the cache targeted by stash ++ * transactions is affine to this cpu. */ ++ int cpu; ++ /* non-zero if stash transactions for this portal favour 'cpu' over ++ * other CPUs. (Eg. zero if there's no stashing, or stashing is to ++ * shared cache.) */ ++ int stash_affinity; ++ /* Caller-provided flags, determined by bus-scanning and/or creation of ++ * DPIO objects via MC commands. */ ++ void *regs_cena; ++ void *regs_cinh; ++ int dpio_id; ++ uint32_t qman_version; ++}; ++ ++/** ++ * dpaa2_io_create() - create a dpaa2_io object. ++ * @desc: the dpaa2_io descriptor ++ * ++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual ++ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO ++ * service") or later be added to a service-type "struct dpaa2_io" object. Note, ++ * the information required on 'cfg' is copied so the caller is free to do as ++ * they wish with the input parameter upon return. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); ++ ++/** ++ * dpaa2_io_create_service() - Create an (initially empty) DPIO service. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_create_service(void); ++ ++/** ++ * dpaa2_io_default_service() - Use the driver's own global (and initially ++ * empty) DPIO service. ++ * ++ * This increments the reference count, so don't forget to use dpaa2_io_down() ++ * for each time this function is called. ++ * ++ * Return a valid dpaa2_io object for success, or NULL for failure. ++ */ ++struct dpaa2_io *dpaa2_io_default_service(void); ++ ++/** ++ * dpaa2_io_down() - release the dpaa2_io object. ++ * @d: the dpaa2_io object to be released. ++ * ++ * The "struct dpaa2_io" type can represent an individual DPIO object (as ++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", ++ * which can be used to group/encapsulate multiple DPIO objects. In all cases, ++ * each handle obtained should be released using this function. ++ */ ++void dpaa2_io_down(struct dpaa2_io *d); ++ ++/** ++ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service. ++ * @service: the given DPIO service. ++ * @obj: the given DPIO object. ++ * ++ * 'service' must have been created by dpaa2_io_create_service() and 'obj' ++ * must have been created by dpaa2_io_create(). This increments the reference ++ * count on the object that 'obj' refers to, so the user could call ++ * dpaa2_io_down(obj) after this and the object will persist within the service ++ * (and will be destroyed when the service is destroyed). ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object. ++ * @obj: the given DPIO object. ++ * @desc: the returned DPIO descriptor. ++ * ++ * This function will return failure if the given dpaa2_io struct represents a ++ * service rather than an individual DPIO object, otherwise it returns zero and ++ * the given 'cfg' structure is filled in. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc); ++ ++/** ++ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that ++ * are polling-driven. ++ * @obj: the given DPIO object. ++ * ++ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are ++ * irq-driven. ++ * @obj: the given DPIO object. ++ * ++ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero. ++ * ++ * Return IRQ_HANDLED for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_irq(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_pause_poll() - Used to stop polling. ++ * @obj: the given DPIO object. ++ * ++ * If a polling application is going to stop polling for a period of time and ++ * supports interrupt processing, it can call this function to convert all ++ * processing to IRQ. (Eg. when sleeping.) ++ * ++ * Return -EINVAL. ++ */ ++int dpaa2_io_pause_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_resume_poll() - Resume polling ++ * @obj: the given DPIO object. ++ * ++ * Return -EINVAL. ++ */ ++int dpaa2_io_resume_poll(struct dpaa2_io *obj); ++ ++/** ++ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service ++ * can receive notifications on. ++ * @s: the given DPIO object. ++ * @mask: the mask of cpus. ++ * ++ * Note that this is a run-time snapshot. If things like cpu-hotplug are ++ * supported in the target system, then an attempt to register notifications ++ * for a cpu that appears present in the given mask might fail if that cpu has ++ * gone offline in the mean time. ++ */ ++void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask); ++ ++/** ++ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash ++ * affinity to. ++ * @s: the given DPIO object. ++ * @mask: the mask of cpus. ++ */ ++void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask); ++ ++/** ++ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity ++ * for stashing. ++ * @s: the given DPIO object. ++ * ++ * Return a boolean, whether or not the DPIO service has resources that have no ++ * particular cpu affinity for stashing. (Useful to know if you wish to operate ++ * on CPUs that the service has no affinity to, you would choose to use ++ * resources that are neutral, rather than affine to a different CPU.) Unlike ++ * other service-specific APIs, this one doesn't return an error if it is passed ++ * a non-service object. So don't do it. ++ */ ++int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s); ++ ++/*************************/ ++/* Notification handling */ ++/*************************/ ++ ++/** ++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure. ++ * @cb: the callback to be invoked when the notification arrives. ++ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN. ++ * @id: FQID or channel ID, needed for rearm. ++ * @desired_cpu: the cpu on which the notifications will show up. ++ * @actual_cpu: the cpu the notification actually shows up. ++ * @migration_cb: callback function used for migration. ++ * @dpio_id: the dpio index. ++ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN. ++ * @node: the list node. ++ * @dpio_private: the dpio object internal to dpio_service. ++ * ++ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a ++ * context of the following type is used. The caller can embed it within a ++ * larger structure in order to add state that is tracked along with the ++ * notification (this may be useful when callbacks are invoked that pass this ++ * notification context as a parameter). ++ */ ++struct dpaa2_io_notification_ctx { ++ void (*cb)(struct dpaa2_io_notification_ctx *); ++ int is_cdan; ++ uint32_t id; ++ /* This specifies which cpu the user wants notifications to show up on ++ * (ie. to execute 'cb'). If notification-handling on that cpu is not ++ * available at the time of notification registration, the registration ++ * will fail. */ ++ int desired_cpu; ++ /* If the target platform supports cpu-hotplug or other features ++ * (related to power-management, one would expect) that can migrate IRQ ++ * handling of a given DPIO object, then this value will potentially be ++ * different to 'desired_cpu' at run-time. */ ++ int actual_cpu; ++ /* And if migration does occur and this callback is non-NULL, it will ++ * be invoked prior to any futher notification callbacks executing on ++ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the ++ * migration, and 'newcpu' is what it is now. Both could conceivably be ++ * different to 'desired_cpu'. */ ++ void (*migration_cb)(struct dpaa2_io_notification_ctx *, ++ int oldcpu, int newcpu); ++ /* These are returned from dpaa2_io_service_register(). ++ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that ++ * has been selected by the service for receiving the notifications. The ++ * caller can use this value in the MC command that attaches the FQ (or ++ * channel) of their DPNI (or DPCON, respectively) to this DPIO for ++ * notification-generation. ++ * 'qman64' is the 64-bit context value that needs to be sent in the ++ * same MC command in order to be programmed into the FQ or channel - ++ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to ++ * the DPIO object, and the DPIO service specifies this value back to ++ * the caller so that the notifications that show up will be ++ * comprensible/demux-able to the DPIO service. */ ++ int dpio_id; ++ uint64_t qman64; ++ /* These fields are internal to the DPIO service once the context is ++ * registered. TBD: may require more internal state fields. */ ++ struct list_head node; ++ void *dpio_private; ++}; ++ ++/** ++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN ++ * notifications on the given DPIO service. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a ++ * DPIO object is performed after this function is called. In that way, (a) the ++ * DPIO service is "ready" to handle a notification arrival (which might happen ++ * before the "attach" command to MC has returned control of execution back to ++ * the caller), and (b) the DPIO service can provide back to the caller the ++ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command ++ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the ++ * right notification fields to the DPIO service. ++ * ++ * Return 0 for success, or -ENODEV for failure. ++ */ ++int dpaa2_io_service_register(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_service_deregister - The opposite of 'register'. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * Note that 'register' should be called *before* ++ * making the MC call to attach the notification-producing device to the ++ * notification-handling DPIO service, the 'unregister' function should be ++ * called *after* making the MC call to detach the notification-producing ++ * device. ++ * ++ * Return 0 for success. ++ */ ++int dpaa2_io_service_deregister(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. ++ * @service: the given DPIO service. ++ * @ctx: the notification context. ++ * ++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is ++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that ++ * traffic source for as long as it likes. Eventually it may wish to "rearm" ++ * that source to allow it to produce another FQDAN/CDAN, that's what this ++ * function achieves. ++ * ++ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not ++ * being able to implement the rearm the notifiaton due to setting CDAN or ++ * scheduling fq. ++ */ ++int dpaa2_io_service_rearm(struct dpaa2_io *service, ++ struct dpaa2_io_notification_ctx *ctx); ++ ++/** ++ * dpaa2_io_from_registration() - Get the DPIO object from the given notification ++ * context. ++ * @ctx: the given notifiation context. ++ * @ret: the returned DPIO object. ++ * ++ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the ++ * returned handle is not selected based on a 'cpu' argument, but is the same ++ * DPIO object that the given notification context is registered against. The ++ * returned handle carries a reference count, so a corresponding dpaa2_io_down() ++ * would be required when the reference is no longer needed. ++ * ++ * Return 0 for success, or -EINVAL for failure. ++ */ ++int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, ++ struct dpaa2_io **ret); ++ ++/**********************************/ ++/* General usage of DPIO services */ ++/**********************************/ ++ ++/** ++ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given ++ * notification context and cpu. ++ * @service: the DPIO service. ++ * @cpu: the cpu that the DPIO resource has stashing affinity to. ++ * @ret: the returned DPIO resource. ++ * ++ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers ++ * to an individual DPIO object or to a whole service. In the latter case, an ++ * internal choice is made for each operation. This function supports the former ++ * case, by selecting an individual DPIO object *from* the service in order for ++ * it to be used multiple times to provide "persistence". The returned handle ++ * also carries a reference count, so a corresponding dpaa2_io_down() would be ++ * required when the reference is no longer needed. Note, a parameter of -1 for ++ * 'cpu' will select a DPIO resource that has no particular stashing affinity to ++ * any cpu (eg. one that stashes to platform cache). ++ * ++ * Return 0 for success, or -ENODEV for failure. ++ */ ++int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, ++ struct dpaa2_io **ret); ++ ++/*****************/ ++/* Pull dequeues */ ++/*****************/ ++ ++/** ++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * To support DCA/order-preservation, it will be necessary to support an ++ * alternative form, because they must ultimately dequeue to DQRR rather than a ++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will ++ * "complete" using a caller-provided callback (from DQRR processing) rather ++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. ++ * the alternative form will likely take a callback parameter rather than a ++ * store parameter. Ignoring it for now to keep the picture clearer. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, ++ struct dpaa2_io_store *s); ++ ++/** ++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. ++ * @d: the given DPIO service. ++ * @channelid: the given channel id. ++ * @s: the dpaa2_io_store object for the result. ++ * ++ * To support DCA/order-preservation, it will be necessary to support an ++ * alternative form, because they must ultimately dequeue to DQRR rather than a ++ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will ++ * "complete" using a caller-provided callback (from DQRR processing) rather ++ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. ++ * the alternative form will likely take a callback parameter rather than a ++ * store parameter. Ignoring it for now to keep the picture clearer. ++ * ++ * Return 0 for success, or error code for failure. ++ */ ++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, ++ struct dpaa2_io_store *s); ++ ++/************/ ++/* Enqueues */ ++/************/ ++ ++/** ++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. ++ * @d: the given DPIO service. ++ * @fqid: the given frame queue id. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * This definition bypasses some features that are not expected to be priority-1 ++ * features, and may not be needed at all via current assumptions (QBMan's ++ * feature set is wider than the MC object model is intendeding to support, ++ * initially at least). Plus, keeping them out (for now) keeps the API view ++ * simpler. Missing features are; ++ * - enqueue confirmation (results DMA'd back to the user) ++ * - ORP ++ * - DCA/order-preservation (see note in "pull dequeues") ++ * - enqueue consumption interrupts ++ * ++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, ++ uint32_t fqid, ++ const struct dpaa2_fd *fd); ++ ++/** ++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. ++ * @d: the given DPIO service. ++ * @qdid: the given queuing destination id. ++ * @prio: the given queuing priority. ++ * @qdbin: the given queuing destination bin. ++ * @fd: the frame descriptor which is enqueued. ++ * ++ * This definition bypasses some features that are not expected to be priority-1 ++ * features, and may not be needed at all via current assumptions (QBMan's ++ * feature set is wider than the MC object model is intendeding to support, ++ * initially at least). Plus, keeping them out (for now) keeps the API view ++ * simpler. Missing features are; ++ * - enqueue confirmation (results DMA'd back to the user) ++ * - ORP ++ * - DCA/order-preservation (see note in "pull dequeues") ++ * - enqueue consumption interrupts ++ * ++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, ++ * or -ENODEV if there is no dpio service. ++ */ ++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, ++ uint32_t qdid, uint8_t prio, uint16_t qdbin, ++ const struct dpaa2_fd *fd); ++ ++/*******************/ ++/* Buffer handling */ ++/*******************/ ++ ++/** ++ * dpaa2_io_service_release() - Release buffers to a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffers to be released. ++ * @num_buffers: the number of the buffers to be released. ++ * ++ * Return 0 for success, and negative error code for failure. ++ */ ++int dpaa2_io_service_release(struct dpaa2_io *d, ++ uint32_t bpid, ++ const uint64_t *buffers, ++ unsigned int num_buffers); ++ ++/** ++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the buffer pool id. ++ * @buffers: the buffer addresses for acquired buffers. ++ * @num_buffers: the expected number of the buffers to acquire. ++ * ++ * Return a negative error code if the command failed, otherwise it returns ++ * the number of buffers acquired, which may be less than the number requested. ++ * Eg. if the buffer pool is empty, this will return zero. ++ */ ++int dpaa2_io_service_acquire(struct dpaa2_io *d, ++ uint32_t bpid, ++ uint64_t *buffers, ++ unsigned int num_buffers); ++ ++/***************/ ++/* DPIO stores */ ++/***************/ ++ ++/* These are reusable memory blocks for retrieving dequeue results into, and to ++ * assist with parsing those results once they show up. They also hide the ++ * details of how to use "tokens" to make detection of DMA results possible (ie. ++ * comparing memory before the DMA and after it) while minimising the needless ++ * clearing/rewriting of those memory locations between uses. ++ */ ++ ++/** ++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue ++ * result. ++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. ++ * @dev: the device to allow mapping/unmapping the DMAable region. ++ * ++ * Constructor - max_frames must be <= 16. The user provides the ++ * device struct to allow mapping/unmapping of the DMAable region. Area for ++ * storage will be allocated during create. The size of this storage is ++ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a ++ * wrapper structure allocated within the DPIO code, which owns and manages ++ * allocated store. ++ * ++ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL ++ * if not getting the stroage for dequeue result in create API. ++ */ ++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, ++ struct device *dev); ++ ++/** ++ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue ++ * result. ++ * @s: the storage memory to be destroyed. ++ * ++ * Frees to specified storage memory. ++ */ ++void dpaa2_io_store_destroy(struct dpaa2_io_store *s); ++ ++/** ++ * dpaa2_io_store_next() - Determine when the next dequeue result is available. ++ * @s: the dpaa2_io_store object. ++ * @is_last: indicate whether this is the last frame in the pull command. ++ * ++ * Once dpaa2_io_store has been passed to a function that performs dequeues to ++ * it, like dpaa2_ni_rx(), this function can be used to determine when the next ++ * frame result is available. Once this function returns non-NULL, a subsequent ++ * call to it will try to find the *next* dequeue result. ++ * ++ * Note that if a pull-dequeue has a null result because the target FQ/channel ++ * was empty, then this function will return NULL rather than expect the caller ++ * to always check for this on his own side. As such, "is_last" can be used to ++ * differentiate between "end-of-empty-dequeue" and "still-waiting". ++ * ++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. ++ */ ++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); ++ ++#ifdef CONFIG_FSL_QBMAN_DEBUG ++/** ++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. ++ * @d: the given DPIO object. ++ * @fqid: the id of frame queue to be queried. ++ * @fcnt: the queried frame count. ++ * @bcnt: the queried byte count. ++ * ++ * Knowing the FQ count at run-time can be useful in debugging situations. ++ * The instantaneous frame- and byte-count are hereby returned. ++ * ++ * Return 0 for a successful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, ++ uint32_t *fcnt, uint32_t *bcnt); ++ ++/** ++ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a ++ * buffer pool. ++ * @d: the given DPIO object. ++ * @bpid: the index of buffer pool to be queried. ++ * @num: the queried number of buffers in the buffer pool. ++ * ++ * Return 0 for a sucessful query, and negative error code if query fails. ++ */ ++int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, ++ uint32_t *num); ++#endif ++#endif /* __FSL_DPAA2_IO_H */ +diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h +new file mode 100644 +index 0000000..00f0b74 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-cmd.h +@@ -0,0 +1,133 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_MC_CMD_H ++#define __FSL_MC_CMD_H ++ ++#define MC_CMD_NUM_OF_PARAMS 7 ++ ++#define MAKE_UMASK64(_width) \ ++ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ ++ (uint64_t)-1)) ++ ++static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) ++{ ++ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); ++} ++ ++static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) ++{ ++ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); ++} ++ ++struct mc_command { ++ uint64_t header; ++ uint64_t params[MC_CMD_NUM_OF_PARAMS]; ++}; ++ ++enum mc_cmd_status { ++ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ ++ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ ++ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ ++ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ ++ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ ++ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ ++ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ ++ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ ++ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ ++ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ ++ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ ++ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ ++}; ++ ++/* ++ * MC command flags ++ */ ++ ++/* High priority flag */ ++#define MC_CMD_FLAG_PRI 0x00008000 ++/* Command completion flag */ ++#define MC_CMD_FLAG_INTR_DIS 0x01000000 ++ ++/* TODO Remove following two defines after completion of flib 8.0.0 ++integration */ ++#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */ ++#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */ ++ ++#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */ ++#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */ ++#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */ ++#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */ ++#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */ ++#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/ ++#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */ ++#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/ ++#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */ ++ ++#define MC_CMD_HDR_READ_STATUS(_hdr) \ ++ ((enum mc_cmd_status)mc_dec((_hdr), \ ++ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) ++ ++#define MC_CMD_HDR_READ_TOKEN(_hdr) \ ++ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) ++ ++#define MC_CMD_HDR_READ_FLAGS(_hdr) \ ++ ((uint32_t)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S)) ++ ++#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ ++ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) ++ ++#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) ++ ++#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) ++ ++#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ ++ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) ++ ++static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, ++ uint32_t cmd_flags, ++ uint16_t token) ++{ ++ uint64_t hdr; ++ ++ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); ++ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, ++ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); ++ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); ++ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, ++ MC_CMD_STATUS_READY); ++ ++ return hdr; ++} ++ ++#endif /* __FSL_MC_CMD_H */ +diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h +new file mode 100644 +index 0000000..1246ca8 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-private.h +@@ -0,0 +1,168 @@ ++/* ++ * Freescale Management Complex (MC) bus private declarations ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_PRIVATE_H_ ++#define _FSL_MC_PRIVATE_H_ ++ ++#include "../include/mc.h" ++#include ++#include ++ ++#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc" ++ ++#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \ ++ (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \ ++ (_mc_dev)->obj_desc.id == (_obj_desc)->id) ++ ++#define FSL_MC_IS_ALLOCATABLE(_obj_type) \ ++ (strcmp(_obj_type, "dpbp") == 0 || \ ++ strcmp(_obj_type, "dpmcp") == 0 || \ ++ strcmp(_obj_type, "dpcon") == 0) ++ ++/** ++ * Maximum number of total IRQs that can be pre-allocated for an MC bus' ++ * IRQ pool ++ */ ++#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 ++ ++/** ++ * Maximum number of extra IRQs pre-reallocated for an MC bus' IRQ pool, ++ * to be used by dynamically created MC objects ++ */ ++#define FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS 64 ++ ++/** ++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device ++ * @root_mc_bus_dev: MC object device representing the root DPRC ++ * @irq_domain: IRQ domain for the fsl-mc bus type ++ * @gic_supported: boolean flag that indicates if the GIC interrupt controller ++ * is supported. ++ * @num_translation_ranges: number of entries in addr_translation_ranges ++ * @addr_translation_ranges: array of bus to system address translation ranges ++ */ ++struct fsl_mc { ++ struct fsl_mc_device *root_mc_bus_dev; ++ struct irq_domain *irq_domain; ++ bool gic_supported; ++ uint8_t num_translation_ranges; ++ struct fsl_mc_addr_translation_range *translation_ranges; ++}; ++ ++/** ++ * enum mc_region_types - Types of MC MMIO regions ++ */ ++enum fsl_mc_region_types { ++ FSL_MC_PORTAL = 0x0, ++ FSL_QBMAN_PORTAL, ++ ++ /* ++ * New offset types must be added above this entry ++ */ ++ FSL_NUM_MC_OFFSET_TYPES ++}; ++ ++/** ++ * struct fsl_mc_addr_translation_range - bus to system address translation ++ * range ++ * @mc_region_type: Type of MC region for the range being translated ++ * @start_mc_offset: Start MC offset of the range being translated ++ * @end_mc_offset: MC offset of the first byte after the range (last MC ++ * offset of the range is end_mc_offset - 1) ++ * @start_phys_addr: system physical address corresponding to start_mc_addr ++ */ ++struct fsl_mc_addr_translation_range { ++ enum fsl_mc_region_types mc_region_type; ++ uint64_t start_mc_offset; ++ uint64_t end_mc_offset; ++ phys_addr_t start_phys_addr; ++}; ++ ++/** ++ * struct fsl_mc_resource_pool - Pool of MC resources of a given ++ * type ++ * @type: type of resources in the pool ++ * @max_count: maximum number of resources in the pool ++ * @free_count: number of free resources in the pool ++ * @mutex: mutex to serialize access to the pool's free list ++ * @free_list: anchor node of list of free resources in the pool ++ * @mc_bus: pointer to the MC bus that owns this resource pool ++ */ ++struct fsl_mc_resource_pool { ++ enum fsl_mc_pool_type type; ++ int16_t max_count; ++ int16_t free_count; ++ struct mutex mutex; /* serializes access to free_list */ ++ struct list_head free_list; ++ struct fsl_mc_bus *mc_bus; ++}; ++ ++/** ++ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC ++ * @mc_dev: fsl-mc device for the bus device itself. ++ * @resource_pools: array of resource pools (one pool per resource type) ++ * for this MC bus. These resources represent allocatable entities ++ * from the physical DPRC. ++ * @atomic_mc_io: mc_io object to be used to send DPRC commands to the MC ++ * in atomic context (e.g., when programming MSIs in program_msi_at_mc()). ++ * @atomic_dprc_handle: DPRC handle opened using the atomic_mc_io's portal. ++ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool. ++ * @scan_mutex: Serializes bus scanning ++ * @dprc_attr: DPRC attributes ++ */ ++struct fsl_mc_bus { ++ struct fsl_mc_device mc_dev; ++ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; ++ struct fsl_mc_device_irq *irq_resources; ++ struct fsl_mc_io *atomic_mc_io; ++ uint16_t atomic_dprc_handle; ++ struct mutex scan_mutex; /* serializes bus scanning */ ++ struct dprc_attributes dprc_attr; ++}; ++ ++#define to_fsl_mc_bus(_mc_dev) \ ++ container_of(_mc_dev, struct fsl_mc_bus, mc_dev) ++ ++int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, ++ struct fsl_mc_io *mc_io, ++ struct device *parent_dev, ++ const char *driver_override, ++ struct fsl_mc_device **new_mc_dev); ++ ++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); ++ ++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, ++ const char *driver_override, ++ unsigned int *total_irq_count); ++ ++int __init dprc_driver_init(void); ++ ++void __exit dprc_driver_exit(void); ++ ++int __init fsl_mc_allocator_driver_init(void); ++ ++void __exit fsl_mc_allocator_driver_exit(void); ++ ++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_resource ++ **new_resource); ++ ++void fsl_mc_resource_free(struct fsl_mc_resource *resource); ++ ++int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, ++ unsigned int irq_count); ++ ++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); ++ ++void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); ++ ++void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); ++ ++#endif /* _FSL_MC_PRIVATE_H_ */ +diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h +new file mode 100644 +index 0000000..b08df85 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc-sys.h +@@ -0,0 +1,128 @@ ++/* Copyright 2013-2014 Freescale Semiconductor Inc. ++ * ++ * Interface of the I/O services to send MC commands to the MC hardware ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _FSL_MC_SYS_H ++#define _FSL_MC_SYS_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * Bit masks for a MC I/O object (struct fsl_mc_io) flags ++ */ ++#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 ++ ++struct fsl_mc_resource; ++struct mc_command; ++ ++/** ++ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() ++ * @dev: device associated with this Mc I/O object ++ * @flags: flags for mc_send_command() ++ * @portal_size: MC command portal size in bytes ++ * @portal_phys_addr: MC command portal physical address ++ * @portal_virt_addr: MC command portal virtual address ++ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. ++ * @mc_command_done_irq_armed: Flag indicating that the MC command done IRQ ++ * is currently armed. ++ * @mc_command_done_completion: Completion variable to be signaled when an MC ++ * command sent to the MC fw is completed. ++ * ++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not ++ * set: ++ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC ++ * portal, if the fsl_mc_io object was created with the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this ++ * fsl_mc_io object must be made only from non-atomic context. ++ * @mc_command_done_completion: Linux completion variable to be signaled ++ * when a DPMCP command completion interrupts is received. ++ * @mc_command_done_irq_armed: Boolean flag that indicates if interrupts have ++ * been successfully configured for the corresponding DPMCP object. ++ * ++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is ++ * set: ++ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC ++ * portal, if the fsl_mc_io object was created with the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this ++ * fsl_mc_io object can be made from atomic or non-atomic context. ++ */ ++struct fsl_mc_io { ++ struct device *dev; ++ uint16_t flags; ++ uint16_t portal_size; ++ phys_addr_t portal_phys_addr; ++ void __iomem *portal_virt_addr; ++ struct fsl_mc_device *dpmcp_dev; ++ union { ++ /* ++ * These fields are only meaningful if the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set ++ */ ++ struct { ++ struct mutex mutex; /* serializes mc_send_command() */ ++ struct completion mc_command_done_completion; ++ bool mc_command_done_irq_armed; ++ }; ++ ++ /* ++ * This field is only meaningful if the ++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set ++ */ ++ spinlock_t spinlock; /* serializes mc_send_command() */ ++ }; ++}; ++ ++int __must_check fsl_create_mc_io(struct device *dev, ++ phys_addr_t mc_portal_phys_addr, ++ uint32_t mc_portal_size, ++ struct fsl_mc_device *dpmcp_dev, ++ uint32_t flags, struct fsl_mc_io **new_mc_io); ++ ++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, ++ struct fsl_mc_device *dpmcp_dev); ++ ++void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io); ++ ++int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); ++ ++#endif /* _FSL_MC_SYS_H */ +diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h +new file mode 100644 +index 0000000..bbeb121 +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/mc.h +@@ -0,0 +1,244 @@ ++/* ++ * Freescale Management Complex (MC) bus public interface ++ * ++ * Copyright (C) 2014 Freescale Semiconductor, Inc. ++ * Author: German Rivera ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++#ifndef _FSL_MC_H_ ++#define _FSL_MC_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "../include/dprc.h" ++ ++#define FSL_MC_VENDOR_FREESCALE 0x1957 ++ ++struct fsl_mc_device; ++struct fsl_mc_io; ++ ++/** ++ * struct fsl_mc_driver - MC object device driver object ++ * @driver: Generic device driver ++ * @match_id_table: table of supported device matching Ids ++ * @probe: Function called when a device is added ++ * @remove: Function called when a device is removed ++ * @shutdown: Function called at shutdown time to quiesce the device ++ * @suspend: Function called when a device is stopped ++ * @resume: Function called when a device is resumed ++ * ++ * Generic DPAA device driver object for device drivers that are registered ++ * with a DPRC bus. This structure is to be embedded in each device-specific ++ * driver structure. ++ */ ++struct fsl_mc_driver { ++ struct device_driver driver; ++ const struct fsl_mc_device_match_id *match_id_table; ++ int (*probe)(struct fsl_mc_device *dev); ++ int (*remove)(struct fsl_mc_device *dev); ++ void (*shutdown)(struct fsl_mc_device *dev); ++ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); ++ int (*resume)(struct fsl_mc_device *dev); ++}; ++ ++#define to_fsl_mc_driver(_drv) \ ++ container_of(_drv, struct fsl_mc_driver, driver) ++ ++/** ++ * struct fsl_mc_device_match_id - MC object device Id entry for driver matching ++ * @vendor: vendor ID ++ * @obj_type: MC object type ++ * @ver_major: MC object version major number ++ * @ver_minor: MC object version minor number ++ * ++ * Type of entries in the "device Id" table for MC object devices supported by ++ * a MC object device driver. The last entry of the table has vendor set to 0x0 ++ */ ++struct fsl_mc_device_match_id { ++ uint16_t vendor; ++ const char obj_type[16]; ++ uint32_t ver_major; ++ uint32_t ver_minor; ++}; ++ ++/** ++ * enum fsl_mc_pool_type - Types of allocatable MC bus resources ++ * ++ * Entries in these enum are used as indices in the array of resource ++ * pools of an fsl_mc_bus object. ++ */ ++enum fsl_mc_pool_type { ++ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ ++ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ ++ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ ++ FSL_MC_POOL_IRQ, ++ ++ /* ++ * NOTE: New resource pool types must be added before this entry ++ */ ++ FSL_MC_NUM_POOL_TYPES ++}; ++ ++/** ++ * struct fsl_mc_resource - MC generic resource ++ * @type: type of resource ++ * @id: unique MC resource Id within the resources of the same type ++ * @data: pointer to resource-specific data if the resource is currently ++ * allocated, or NULL if the resource is not currently allocated. ++ * @parent_pool: pointer to the parent resource pool from which this ++ * resource is allocated from. ++ * @node: Node in the free list of the corresponding resource pool ++ * ++ * NOTE: This structure is to be embedded as a field of specific ++ * MC resource structures. ++ */ ++struct fsl_mc_resource { ++ enum fsl_mc_pool_type type; ++ int32_t id; ++ void *data; ++ struct fsl_mc_resource_pool *parent_pool; ++ struct list_head node; ++}; ++ ++/** ++ * struct fsl_mc_device_irq - MC object device message-based interrupt ++ * @msi_paddr: message-based interrupt physical address ++ * @msi_value: message-based interrupt data value ++ * @irq_number: Linux IRQ number assigned to the interrupt ++ * @mc_dev: MC object device that owns this interrupt ++ * @dev_irq_index: device-relative IRQ index ++ * @resource: MC generic resource associated with the interrupt ++ */ ++struct fsl_mc_device_irq { ++ phys_addr_t msi_paddr; ++ uint32_t msi_value; ++ uint32_t irq_number; ++ struct fsl_mc_device *mc_dev; ++ uint8_t dev_irq_index; ++ struct fsl_mc_resource resource; ++}; ++ ++#define to_fsl_mc_irq(_mc_resource) \ ++ container_of(_mc_resource, struct fsl_mc_device_irq, resource) ++ ++/** ++ * Bit masks for a MC object device (struct fsl_mc_device) flags ++ */ ++#define FSL_MC_IS_DPRC 0x0001 ++ ++/** ++ * root dprc's parent is a platform device ++ * that platform device's bus type is platform_bus_type. ++ */ ++#define is_root_dprc(dev) \ ++ ((to_fsl_mc_device(dev)->flags & FSL_MC_IS_DPRC) && \ ++ ((dev)->bus == &fsl_mc_bus_type) && \ ++ ((dev)->parent->bus == &platform_bus_type)) ++ ++/** ++ * Default DMA mask for devices on a fsl-mc bus ++ */ ++#define FSL_MC_DEFAULT_DMA_MASK (~0ULL) ++ ++/** ++ * struct fsl_mc_device - MC object device object ++ * @dev: Linux driver model device object ++ * @dma_mask: Default DMA mask ++ * @flags: MC object device flags ++ * @icid: Isolation context ID for the device ++ * @mc_handle: MC handle for the corresponding MC object opened ++ * @mc_io: Pointer to MC IO object assigned to this device or ++ * NULL if none. ++ * @obj_desc: MC description of the DPAA device ++ * @regions: pointer to array of MMIO region entries ++ * @irqs: pointer to array of pointers to interrupts allocated to this device ++ * @resource: generic resource associated with this MC object device, if any. ++ * @driver_override: Driver name to force a match ++ * ++ * Generic device object for MC object devices that are "attached" to a ++ * MC bus. ++ * ++ * NOTES: ++ * - For a non-DPRC object its icid is the same as its parent DPRC's icid. ++ * - The SMMU notifier callback gets invoked after device_add() has been ++ * called for an MC object device, but before the device-specific probe ++ * callback gets called. ++ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC ++ * portals. For all other MC objects, their device drivers are responsible for ++ * allocating MC portals for them by calling fsl_mc_portal_allocate(). ++ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are ++ * treated as resources that can be allocated/deallocated from the ++ * corresponding resource pool in the object's parent DPRC, using the ++ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects ++ * are known as "allocatable" objects. For them, the corresponding ++ * fsl_mc_device's 'resource' points to the associated resource object. ++ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), ++ * 'resource' is NULL. ++ */ ++struct fsl_mc_device { ++ struct device dev; ++ uint64_t dma_mask; ++ uint16_t flags; ++ uint16_t icid; ++ uint16_t mc_handle; ++ struct fsl_mc_io *mc_io; ++ struct dprc_obj_desc obj_desc; ++ struct resource *regions; ++ struct fsl_mc_device_irq **irqs; ++ struct fsl_mc_resource *resource; ++ const char *driver_override; ++}; ++ ++#define to_fsl_mc_device(_dev) \ ++ container_of(_dev, struct fsl_mc_device, dev) ++ ++/* ++ * module_fsl_mc_driver() - Helper macro for drivers that don't do ++ * anything special in module init/exit. This eliminates a lot of ++ * boilerplate. Each module may only use this macro once, and ++ * calling it replaces module_init() and module_exit() ++ */ ++#define module_fsl_mc_driver(__fsl_mc_driver) \ ++ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ ++ fsl_mc_driver_unregister) ++ ++/* ++ * Macro to avoid include chaining to get THIS_MODULE ++ */ ++#define fsl_mc_driver_register(drv) \ ++ __fsl_mc_driver_register(drv, THIS_MODULE) ++ ++int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, ++ struct module *owner); ++ ++void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); ++ ++bool fsl_mc_interrupts_supported(void); ++ ++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, ++ uint16_t mc_io_flags, ++ struct fsl_mc_io **new_mc_io); ++ ++void fsl_mc_portal_free(struct fsl_mc_io *mc_io); ++ ++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); ++ ++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, ++ enum fsl_mc_pool_type pool_type, ++ struct fsl_mc_device **new_mc_adev); ++ ++void fsl_mc_object_free(struct fsl_mc_device *mc_adev); ++ ++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); ++ ++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); ++ ++extern struct bus_type fsl_mc_bus_type; ++ ++#endif /* _FSL_MC_H_ */ +diff --git a/drivers/staging/fsl-mc/include/net.h b/drivers/staging/fsl-mc/include/net.h +new file mode 100644 +index 0000000..7480f6a +--- /dev/null ++++ b/drivers/staging/fsl-mc/include/net.h +@@ -0,0 +1,481 @@ ++/* Copyright 2013-2015 Freescale Semiconductor Inc. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++#ifndef __FSL_NET_H ++#define __FSL_NET_H ++ ++#define LAST_HDR_INDEX 0xFFFFFFFF ++ ++/*****************************************************************************/ ++/* Protocol fields */ ++/*****************************************************************************/ ++ ++/************************* Ethernet fields *********************************/ ++#define NH_FLD_ETH_DA (1) ++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) ++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) ++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) ++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) ++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) ++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) ++ ++#define NH_FLD_ETH_ADDR_SIZE 6 ++ ++/*************************** VLAN fields ***********************************/ ++#define NH_FLD_VLAN_VPRI (1) ++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) ++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) ++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) ++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) ++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) ++ ++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ ++ NH_FLD_VLAN_CFI | \ ++ NH_FLD_VLAN_VID) ++ ++/************************ IP (generic) fields ******************************/ ++#define NH_FLD_IP_VER (1) ++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) ++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) ++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) ++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) ++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) ++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) ++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) ++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) ++ ++#define NH_FLD_IP_PROTO_SIZE 1 ++ ++/***************************** IPV4 fields *********************************/ ++#define NH_FLD_IPV4_VER (1) ++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) ++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) ++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) ++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) ++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) ++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) ++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) ++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) ++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) ++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) ++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) ++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) ++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) ++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) ++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) ++ ++#define NH_FLD_IPV4_ADDR_SIZE 4 ++#define NH_FLD_IPV4_PROTO_SIZE 1 ++ ++/***************************** IPV6 fields *********************************/ ++#define NH_FLD_IPV6_VER (1) ++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) ++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) ++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) ++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) ++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) ++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) ++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) ++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) ++ ++#define NH_FLD_IPV6_ADDR_SIZE 16 ++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 ++ ++/***************************** ICMP fields *********************************/ ++#define NH_FLD_ICMP_TYPE (1) ++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) ++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) ++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) ++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) ++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) ++ ++#define NH_FLD_ICMP_CODE_SIZE 1 ++#define NH_FLD_ICMP_TYPE_SIZE 1 ++ ++/***************************** IGMP fields *********************************/ ++#define NH_FLD_IGMP_VERSION (1) ++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) ++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) ++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) ++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) ++ ++/***************************** TCP fields **********************************/ ++#define NH_FLD_TCP_PORT_SRC (1) ++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) ++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) ++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) ++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) ++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) ++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) ++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) ++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) ++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) ++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) ++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) ++ ++#define NH_FLD_TCP_PORT_SIZE 2 ++ ++/***************************** UDP fields **********************************/ ++#define NH_FLD_UDP_PORT_SRC (1) ++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) ++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) ++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) ++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_UDP_PORT_SIZE 2 ++ ++/*************************** UDP-lite fields *******************************/ ++#define NH_FLD_UDP_LITE_PORT_SRC (1) ++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) ++#define NH_FLD_UDP_LITE_ALL_FIELDS \ ++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_UDP_LITE_PORT_SIZE 2 ++ ++/*************************** UDP-encap-ESP fields **************************/ ++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) ++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) ++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) ++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) ++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) ++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) ++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ ++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) ++ ++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 ++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_PORT_SRC (1) ++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) ++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) ++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) ++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) ++ ++#define NH_FLD_SCTP_PORT_SIZE 2 ++ ++/***************************** DCCP fields *********************************/ ++#define NH_FLD_DCCP_PORT_SRC (1) ++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) ++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) ++ ++#define NH_FLD_DCCP_PORT_SIZE 2 ++ ++/***************************** IPHC fields *********************************/ ++#define NH_FLD_IPHC_CID (1) ++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) ++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) ++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) ++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) ++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) ++ ++/***************************** SCTP fields *********************************/ ++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) ++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) ++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) ++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) ++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) ++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) ++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) ++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) ++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) ++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ ++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) ++ ++/*************************** L2TPV2 fields *********************************/ ++#define NH_FLD_L2TPV2_TYPE_BIT (1) ++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) ++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) ++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) ++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) ++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) ++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) ++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) ++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) ++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) ++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) ++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) ++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) ++#define NH_FLD_L2TPV2_ALL_FIELDS \ ++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) ++ ++/*************************** L2TPV3 fields *********************************/ ++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) ++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) ++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) ++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) ++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) ++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) ++ ++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) ++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) ++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) ++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) ++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ ++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) ++ ++/**************************** PPP fields ***********************************/ ++#define NH_FLD_PPP_PID (1) ++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) ++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) ++ ++/************************** PPPoE fields ***********************************/ ++#define NH_FLD_PPPOE_VER (1) ++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) ++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) ++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) ++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) ++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) ++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) ++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) ++ ++/************************* PPP-Mux fields **********************************/ ++#define NH_FLD_PPPMUX_PID (1) ++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) ++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) ++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) ++ ++/*********************** PPP-Mux sub-frame fields **************************/ ++#define NH_FLD_PPPMUX_SUBFRM_PFF (1) ++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) ++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) ++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) ++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) ++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ ++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) ++ ++/*************************** LLC fields ************************************/ ++#define NH_FLD_LLC_DSAP (1) ++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) ++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) ++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) ++ ++/*************************** NLPID fields **********************************/ ++#define NH_FLD_NLPID_NLPID (1) ++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) ++ ++/*************************** SNAP fields ***********************************/ ++#define NH_FLD_SNAP_OUI (1) ++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) ++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) ++ ++/*************************** LLC SNAP fields *******************************/ ++#define NH_FLD_LLC_SNAP_TYPE (1) ++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) ++ ++#define NH_FLD_ARP_HTYPE (1) ++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) ++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) ++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) ++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) ++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) ++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) ++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) ++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) ++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) ++ ++/*************************** RFC2684 fields ********************************/ ++#define NH_FLD_RFC2684_LLC (1) ++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) ++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) ++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) ++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) ++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) ++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) ++ ++/*************************** User defined fields ***************************/ ++#define NH_FLD_USER_DEFINED_SRCPORT (1) ++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) ++#define NH_FLD_USER_DEFINED_ALL_FIELDS \ ++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) ++ ++/*************************** Payload fields ********************************/ ++#define NH_FLD_PAYLOAD_BUFFER (1) ++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) ++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) ++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) ++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) ++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) ++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) ++ ++/*************************** GRE fields ************************************/ ++#define NH_FLD_GRE_TYPE (1) ++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) ++ ++/*************************** MINENCAP fields *******************************/ ++#define NH_FLD_MINENCAP_SRC_IP (1) ++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) ++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) ++#define NH_FLD_MINENCAP_ALL_FIELDS \ ++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) ++ ++/*************************** IPSEC AH fields *******************************/ ++#define NH_FLD_IPSEC_AH_SPI (1) ++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) ++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) ++ ++/*************************** IPSEC ESP fields ******************************/ ++#define NH_FLD_IPSEC_ESP_SPI (1) ++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) ++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) ++ ++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 ++ ++/*************************** MPLS fields ***********************************/ ++#define NH_FLD_MPLS_LABEL_STACK (1) ++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ ++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) ++ ++/*************************** MACSEC fields *********************************/ ++#define NH_FLD_MACSEC_SECTAG (1) ++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) ++ ++/*************************** GTP fields ************************************/ ++#define NH_FLD_GTP_TEID (1) ++ ++ ++/* Protocol options */ ++ ++/* Ethernet options */ ++#define NH_OPT_ETH_BROADCAST 1 ++#define NH_OPT_ETH_MULTICAST 2 ++#define NH_OPT_ETH_UNICAST 3 ++#define NH_OPT_ETH_BPDU 4 ++ ++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) ++/* also applicable for broadcast */ ++ ++/* VLAN options */ ++#define NH_OPT_VLAN_CFI 1 ++ ++/* IPV4 options */ ++#define NH_OPT_IPV4_UNICAST 1 ++#define NH_OPT_IPV4_MULTICAST 2 ++#define NH_OPT_IPV4_BROADCAST 3 ++#define NH_OPT_IPV4_OPTION 4 ++#define NH_OPT_IPV4_FRAG 5 ++#define NH_OPT_IPV4_INITIAL_FRAG 6 ++ ++/* IPV6 options */ ++#define NH_OPT_IPV6_UNICAST 1 ++#define NH_OPT_IPV6_MULTICAST 2 ++#define NH_OPT_IPV6_OPTION 3 ++#define NH_OPT_IPV6_FRAG 4 ++#define NH_OPT_IPV6_INITIAL_FRAG 5 ++ ++/* General IP options (may be used for any version) */ ++#define NH_OPT_IP_FRAG 1 ++#define NH_OPT_IP_INITIAL_FRAG 2 ++#define NH_OPT_IP_OPTION 3 ++ ++/* Minenc. options */ ++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 ++ ++/* GRE. options */ ++#define NH_OPT_GRE_ROUTING_PRESENT 1 ++ ++/* TCP options */ ++#define NH_OPT_TCP_OPTIONS 1 ++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 ++#define NH_OPT_TCP_CONTROL_LOW_BITS 3 ++ ++/* CAPWAP options */ ++#define NH_OPT_CAPWAP_DTLS 1 ++ ++enum net_prot { ++ NET_PROT_NONE = 0, ++ NET_PROT_PAYLOAD, ++ NET_PROT_ETH, ++ NET_PROT_VLAN, ++ NET_PROT_IPV4, ++ NET_PROT_IPV6, ++ NET_PROT_IP, ++ NET_PROT_TCP, ++ NET_PROT_UDP, ++ NET_PROT_UDP_LITE, ++ NET_PROT_IPHC, ++ NET_PROT_SCTP, ++ NET_PROT_SCTP_CHUNK_DATA, ++ NET_PROT_PPPOE, ++ NET_PROT_PPP, ++ NET_PROT_PPPMUX, ++ NET_PROT_PPPMUX_SUBFRM, ++ NET_PROT_L2TPV2, ++ NET_PROT_L2TPV3_CTRL, ++ NET_PROT_L2TPV3_SESS, ++ NET_PROT_LLC, ++ NET_PROT_LLC_SNAP, ++ NET_PROT_NLPID, ++ NET_PROT_SNAP, ++ NET_PROT_MPLS, ++ NET_PROT_IPSEC_AH, ++ NET_PROT_IPSEC_ESP, ++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ ++ NET_PROT_MACSEC, ++ NET_PROT_GRE, ++ NET_PROT_MINENCAP, ++ NET_PROT_DCCP, ++ NET_PROT_ICMP, ++ NET_PROT_IGMP, ++ NET_PROT_ARP, ++ NET_PROT_CAPWAP_DATA, ++ NET_PROT_CAPWAP_CTRL, ++ NET_PROT_RFC2684, ++ NET_PROT_ICMPV6, ++ NET_PROT_FCOE, ++ NET_PROT_FIP, ++ NET_PROT_ISCSI, ++ NET_PROT_GTP, ++ NET_PROT_USER_DEFINED_L2, ++ NET_PROT_USER_DEFINED_L3, ++ NET_PROT_USER_DEFINED_L4, ++ NET_PROT_USER_DEFINED_L5, ++ NET_PROT_USER_DEFINED_SHIM1, ++ NET_PROT_USER_DEFINED_SHIM2, ++ ++ NET_PROT_DUMMY_LAST ++}; ++ ++/*! IEEE8021.Q */ ++#define NH_IEEE8021Q_ETYPE 0x8100 ++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ ++ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ ++ (((uint32_t)(pcp & 0x07)) << 13) | \ ++ (((uint32_t)(dei & 0x01)) << 12) | \ ++ (((uint32_t)(vlan_id & 0xFFF)))) ++ ++#endif /* __FSL_NET_H */ +diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst +new file mode 100644 +index 0000000..909ed7a +--- /dev/null ++++ b/scripts/Makefile.dtbinst +@@ -0,0 +1,51 @@ ++# ========================================================================== ++# Installing dtb files ++# ++# Installs all dtb files listed in $(dtb-y) either in the ++# INSTALL_DTBS_PATH directory or the default location: ++# ++# $INSTALL_PATH/dtbs/$KERNELRELEASE ++# ++# Traverse through subdirectories listed in $(dts-dirs). ++# ========================================================================== ++ ++src := $(obj) ++ ++PHONY := __dtbs_install ++__dtbs_install: ++ ++export dtbinst-root ?= $(obj) ++ ++include include/config/auto.conf ++include scripts/Kbuild.include ++include $(srctree)/$(obj)/Makefile ++ ++PHONY += __dtbs_install_prep ++__dtbs_install_prep: ++ifeq ("$(dtbinst-root)", "$(obj)") ++ $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi ++ $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi ++ $(Q)mkdir -p $(INSTALL_DTBS_PATH) ++endif ++ ++dtbinst-files := $(dtb-y) ++dtbinst-dirs := $(dts-dirs) ++ ++# Helper targets for Installing DTBs into the boot directory ++quiet_cmd_dtb_install = INSTALL $< ++ cmd_dtb_install = mkdir -p $(2); cp $< $(2) ++ ++install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) ++ ++$(dtbinst-files) $(dtbinst-dirs): | __dtbs_install_prep ++ ++$(dtbinst-files): %.dtb: $(obj)/%.dtb ++ $(call cmd,dtb_install,$(install-dir)) ++ ++$(dtbinst-dirs): ++ $(Q)$(MAKE) $(dtbinst)=$(obj)/$@ ++ ++PHONY += $(dtbinst-files) $(dtbinst-dirs) ++__dtbs_install: $(dtbinst-files) $(dtbinst-dirs) ++ ++.PHONY: $(PHONY) +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch new file mode 100644 index 00000000..6a6e36f5 --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch @@ -0,0 +1,11095 @@ +From fdf22b15468bed6aac4e52e83903d8e010fbe60b Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Fri, 23 Sep 2016 14:58:06 +0800 +Subject: [PATCH 2/2] Backport some kernel patches based on 3.18.25 + +Fixup dpaa2-eth, phy, pcie, gicv3, sdhc, i2c. +Verified on LS2080A/LS2088A RDB. +--- + Documentation/devicetree/bindings/arm/gic.txt | 8 +- + .../devicetree/bindings/clock/qoriq-clock.txt | 64 +- + Documentation/devicetree/bindings/i2c/i2c-imx.txt | 11 + + .../devicetree/bindings/i2c/i2c-mux-pca954x.txt | 3 + + .../bindings/memory-controllers/fsl/ifc.txt | 3 + + Documentation/devicetree/of_selftest.txt | 20 +- + Documentation/devicetree/todo.txt | 1 - + arch/arm64/Kconfig | 1 + + arch/arm64/include/asm/device.h | 1 + + arch/arm64/include/asm/dma-mapping.h | 16 +- + arch/powerpc/include/asm/mpc85xx.h | 94 -- + arch/powerpc/platforms/85xx/mpc85xx_mds.c | 2 +- + arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 2 +- + arch/powerpc/platforms/85xx/p1022_ds.c | 2 +- + arch/powerpc/platforms/85xx/p1022_rdk.c | 2 +- + arch/powerpc/platforms/85xx/smp.c | 2 +- + arch/powerpc/platforms/85xx/twr_p102x.c | 2 +- + arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 2 +- + arch/x86/pci/xen.c | 4 + + drivers/clk/Kconfig | 10 +- + drivers/clk/Makefile | 2 +- + drivers/clk/clk-qoriq.c | 1256 ++++++++++++++++++++ + drivers/cpufreq/Kconfig.powerpc | 2 +- + drivers/i2c/busses/Kconfig | 4 +- + drivers/i2c/busses/i2c-imx.c | 373 +++++- + drivers/i2c/muxes/i2c-mux-pca9541.c | 4 +- + drivers/i2c/muxes/i2c-mux-pca954x.c | 57 +- + drivers/iommu/fsl_pamu.c | 2 +- + drivers/iommu/io-pgtable-arm.c | 15 +- + drivers/irqchip/Kconfig | 8 + + drivers/irqchip/Makefile | 1 + + drivers/irqchip/irq-gic-common.c | 18 +- + drivers/irqchip/irq-gic-common.h | 2 +- + drivers/irqchip/irq-gic-v2m.c | 333 ++++++ + drivers/irqchip/irq-gic-v3-its.c | 6 +- + drivers/irqchip/irq-gic-v3.c | 66 +- + drivers/irqchip/irq-gic.c | 90 +- + drivers/irqchip/irq-hip04.c | 9 +- + drivers/memory/Kconfig | 2 +- + drivers/memory/fsl_ifc.c | 77 +- + drivers/mfd/vexpress-sysreg.c | 2 +- + drivers/mmc/card/block.c | 4 + + drivers/mmc/host/Kconfig | 10 +- + drivers/mmc/host/sdhci-esdhc.h | 9 +- + drivers/mmc/host/sdhci-of-esdhc.c | 680 +++++++++-- + drivers/mmc/host/sdhci.c | 250 ++-- + drivers/mmc/host/sdhci.h | 42 + + drivers/mtd/nand/Kconfig | 2 +- + drivers/mtd/nand/fsl_ifc_nand.c | 301 ++--- + drivers/net/ethernet/freescale/gianfar.c | 6 +- + drivers/net/phy/Kconfig | 14 +- + drivers/net/phy/Makefile | 4 +- + drivers/net/phy/at803x.c | 4 + + drivers/net/phy/fixed.c | 336 ------ + drivers/net/phy/fixed_phy.c | 370 ++++++ + drivers/net/phy/marvell.c | 11 + + drivers/net/phy/mdio_bus.c | 34 +- + drivers/net/phy/phy.c | 19 +- + drivers/net/phy/phy_device.c | 90 +- + drivers/net/phy/realtek.c | 82 +- + drivers/of/base.c | 53 +- + drivers/of/dynamic.c | 13 - + drivers/of/fdt.c | 30 +- + drivers/of/pdt.c | 27 +- + drivers/of/selftest.c | 71 +- + drivers/pci/Makefile | 1 + + drivers/pci/access.c | 87 ++ + drivers/pci/host/Kconfig | 2 +- + drivers/pci/host/pci-layerscape.c | 86 +- + drivers/pci/host/pcie-designware.c | 14 + + drivers/pci/host/pcie-designware.h | 1 + + drivers/pci/msi.c | 5 + + drivers/pci/pci.c | 1 + + drivers/pci/pcie/portdrv_core.c | 31 +- + drivers/pci/probe.c | 1 + + drivers/pci/remove.c | 2 + + drivers/pci/setup-bus.c | 1 + + drivers/pci/setup-irq.c | 1 + + drivers/soc/Kconfig | 13 + + drivers/soc/Makefile | 1 + + drivers/soc/fsl/Kconfig | 6 + + drivers/soc/fsl/Kconfig.arm | 25 + + drivers/soc/fsl/Makefile | 6 + + drivers/soc/fsl/guts.c | 123 ++ + drivers/soc/fsl/ls1/Kconfig | 11 + + drivers/soc/fsl/ls1/Makefile | 1 + + drivers/soc/fsl/ls1/ftm_alarm.c | 274 +++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 273 +++-- + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 48 +- + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 553 ++++----- + drivers/staging/fsl-dpaa2/mac/mac.c | 4 +- + drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- + drivers/staging/fsl-mc/include/mc-private.h | 2 +- + drivers/usb/host/xhci.c | 6 +- + include/linux/fsl/guts.h | 99 +- + include/linux/fsl/svr.h | 95 ++ + include/linux/fsl_ifc.h | 116 +- + include/linux/interrupt.h | 14 + + include/linux/iommu.h | 1 + + include/linux/irq.h | 8 + + include/linux/irqchip/arm-gic-v3.h | 12 + + include/linux/irqchip/arm-gic.h | 2 + + include/linux/irqdomain.h | 1 + + include/linux/mmc/sdhci.h | 16 +- + include/linux/of.h | 11 +- + include/linux/of_pdt.h | 3 +- + include/linux/pci.h | 11 + + include/linux/phy.h | 1 + + include/linux/phy_fixed.h | 11 +- + kernel/irq/chip.c | 58 +- + kernel/irq/manage.c | 91 ++ + kernel/irq/msi.c | 13 +- + sound/soc/fsl/mpc8610_hpcd.c | 2 +- + sound/soc/fsl/p1022_ds.c | 2 +- + sound/soc/fsl/p1022_rdk.c | 2 +- + 115 files changed, 5570 insertions(+), 1621 deletions(-) + delete mode 100644 arch/powerpc/include/asm/mpc85xx.h + create mode 100644 drivers/clk/clk-qoriq.c + create mode 100644 drivers/irqchip/irq-gic-v2m.c + delete mode 100644 drivers/net/phy/fixed.c + create mode 100644 drivers/net/phy/fixed_phy.c + create mode 100644 drivers/soc/fsl/Kconfig + create mode 100644 drivers/soc/fsl/Kconfig.arm + create mode 100644 drivers/soc/fsl/Makefile + create mode 100644 drivers/soc/fsl/guts.c + create mode 100644 drivers/soc/fsl/ls1/Kconfig + create mode 100644 drivers/soc/fsl/ls1/Makefile + create mode 100644 drivers/soc/fsl/ls1/ftm_alarm.c + create mode 100644 include/linux/fsl/svr.h + +diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt +index c7d2fa1..e87d3d7 100644 +--- a/Documentation/devicetree/bindings/arm/gic.txt ++++ b/Documentation/devicetree/bindings/arm/gic.txt +@@ -31,12 +31,16 @@ Main node required properties: + The 3rd cell is the flags, encoded as follows: + bits[3:0] trigger type and level flags. + 1 = low-to-high edge triggered +- 2 = high-to-low edge triggered ++ 2 = high-to-low edge triggered (invalid for SPIs) + 4 = active high level-sensitive +- 8 = active low level-sensitive ++ 8 = active low level-sensitive (invalid for SPIs). + bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of + the 8 possible cpus attached to the GIC. A bit set to '1' indicated + the interrupt is wired to that CPU. Only valid for PPI interrupts. ++ Also note that the configurability of PPI interrupts is IMPLEMENTATION ++ DEFINED and as such not guaranteed to be present (most SoC available ++ in 2014 seem to ignore the setting of this flag and use the hardware ++ default value). + + - reg : Specifies base physical address(s) and size of the GIC registers. The + first region is the GIC distributor register base and size. The 2nd region is +diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt +index 5666812..128fc72 100644 +--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt ++++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt +@@ -1,6 +1,6 @@ +-* Clock Block on Freescale CoreNet Platforms ++* Clock Block on Freescale QorIQ Platforms + +-Freescale CoreNet chips take primary clocking input from the external ++Freescale QorIQ chips take primary clocking input from the external + SYSCLK signal. The SYSCLK input (frequency) is multiplied using + multiple phase locked loops (PLL) to create a variety of frequencies + which can then be passed to a variety of internal logic, including +@@ -13,14 +13,16 @@ which the chip complies. + Chassis Version Example Chips + --------------- ------------- + 1.0 p4080, p5020, p5040 +-2.0 t4240, b4860, t1040 ++2.0 t4240, b4860 + + 1. Clock Block Binding + + Required properties: +-- compatible: Should contain a specific clock block compatible string +- and a single chassis clock compatible string. +- Clock block strings include, but not limited to, one of the: ++- compatible: Should contain a chip-specific clock block compatible ++ string and (if applicable) may contain a chassis-version clock ++ compatible string. ++ ++ Chip-specific strings are of the form "fsl,-clockgen", such as: + * "fsl,p2041-clockgen" + * "fsl,p3041-clockgen" + * "fsl,p4080-clockgen" +@@ -29,15 +31,15 @@ Required properties: + * "fsl,t4240-clockgen" + * "fsl,b4420-clockgen" + * "fsl,b4860-clockgen" +- Chassis clock strings include: ++ * "fsl,ls1021a-clockgen" ++ Chassis-version clock strings include: + * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks + * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks + - reg: Describes the address of the device's resources within the + address space defined by its parent bus, and resource zero + represents the clock register set +-- clock-frequency: Input system clock frequency + +-Recommended properties: ++Optional properties: + - ranges: Allows valid translation between child's address space and + parent's. Must be present if the device has sub-nodes. + - #address-cells: Specifies the number of cells used to represent +@@ -46,8 +48,46 @@ Recommended properties: + - #size-cells: Specifies the number of cells used to represent + the size of an address. Must be present if the device has + sub-nodes and set to 1 if present ++- clock-frequency: Input system clock frequency (SYSCLK) ++- clocks: If clock-frequency is not specified, sysclk may be provided ++ as an input clock. Either clock-frequency or clocks must be ++ provided. ++ ++2. Clock Provider ++ ++The clockgen node should act as a clock provider, though in older device ++trees the children of the clockgen node are the clock providers. ++ ++When the clockgen node is a clock provider, #clock-cells = <2>. ++The first cell of the clock specifier is the clock type, and the ++second cell is the clock index for the specified type. ++ ++ Type# Name Index Cell ++ 0 sysclk must be 0 ++ 1 cmux index (n in CLKCnCSR) ++ 2 hwaccel index (n in CLKCGnHWACSR) ++ 3 fman 0 for fm1, 1 for fm2 ++ 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4 ++ ++3. Example ++ ++ clockgen: global-utilities@e1000 { ++ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; ++ clock-frequency = <133333333>; ++ reg = <0xe1000 0x1000>; ++ #clock-cells = <2>; ++ }; ++ ++ fman@400000 { ++ ... ++ clocks = <&clockgen 3 0>; ++ ... ++ }; ++} ++4. Legacy Child Nodes + +-2. Clock Provider/Consumer Binding ++NOTE: These nodes are deprecated. Kernels should continue to support ++device trees with these nodes, but new device trees should not use them. + + Most of the bindings are from the common clock binding[1]. + [1] Documentation/devicetree/bindings/clock/clock-bindings.txt +@@ -79,7 +119,7 @@ Recommended properties: + - reg: Should be the offset and length of clock block base address. + The length should be 4. + +-Example for clock block and clock provider: ++Legacy Example: + / { + clockgen: global-utilities@e1000 { + compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; +@@ -131,7 +171,7 @@ Example for clock block and clock provider: + }; + } + +-Example for clock consumer: ++Example for legacy clock consumer: + + / { + cpu0: PowerPC,e5500@0 { +diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt +index 4a8513e..52d37fd 100644 +--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt ++++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt +@@ -11,6 +11,8 @@ Required properties: + Optional properties: + - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. + The absence of the propoerty indicates the default frequency 100 kHz. ++- dmas: A list of two dma specifiers, one for each entry in dma-names. ++- dma-names: should contain "tx" and "rx". + + Examples: + +@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */ + interrupts = <64>; + clock-frequency = <400000>; + }; ++ ++i2c0: i2c@40066000 { /* i2c0 on vf610 */ ++ compatible = "fsl,vf610-i2c"; ++ reg = <0x40066000 0x1000>; ++ interrupts =<0 71 0x04>; ++ dmas = <&edma0 0 50>, ++ <&edma0 0 51>; ++ dma-names = "rx","tx"; ++}; +diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +index 34a3fb6..cf53d5f 100644 +--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt ++++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +@@ -16,6 +16,9 @@ Required Properties: + Optional Properties: + + - reset-gpios: Reference to the GPIO connected to the reset input. ++ - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all ++ children in idle state. This is necessary for example, if there are several ++ multiplexers on the bus and the devices behind them use same I2C addresses. + + + Example: +diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt +index d5e3704..89427b0 100644 +--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt ++++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt +@@ -18,6 +18,8 @@ Properties: + interrupt (NAND_EVTER_STAT). If there is only one, + that interrupt reports both types of event. + ++- little-endian : If this property is absent, the big-endian mode will ++ be in use as default for registers. + + - ranges : Each range corresponds to a single chipselect, and covers + the entire access window as configured. +@@ -34,6 +36,7 @@ Example: + #size-cells = <1>; + reg = <0x0 0xffe1e000 0 0x2000>; + interrupts = <16 2 19 2>; ++ little-endian; + + /* NOR, NAND Flashes and CPLD on board */ + ranges = <0x0 0x0 0x0 0xee000000 0x02000000 +diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt +index 1e3d5c9..57a808b 100644 +--- a/Documentation/devicetree/of_selftest.txt ++++ b/Documentation/devicetree/of_selftest.txt +@@ -63,7 +63,6 @@ struct device_node { + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; +- struct device_node *allnext; /* next in list of all nodes */ + ... + }; + +@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null + Figure 1: Generic structure of un-flattened device tree + + +-*allnext: it is used to link all the nodes of DT into a list. So, for the +- above tree the list would be as follows: +- +-root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> +-child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null +- + Before executing OF selftest, it is required to attach the test data to + machine's device tree (if present). So, when selftest_data_add() is called, + at first it reads the flattened device tree data linked into the kernel image +@@ -131,11 +124,6 @@ root ('/') + test-child01 null null null + + +-allnext list: +- +-root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 +-->test-sibling3->null +- + Figure 2: Example test data tree to be attached to live tree. + + According to the scenario above, the live tree is already present so it isn't +@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the + whole tree). selftest_data_remove() calls detach_node_and_children() that uses + of_detach_node() to detach the nodes from the live device tree. + +-To detach a node, of_detach_node() first updates all_next linked list, by +-attaching the previous node's allnext to current node's allnext pointer. And +-then, it either updates the child pointer of given node's parent to its +-sibling or attaches the previous sibling to the given node's sibling, as +-appropriate. That is it :) ++To detach a node, of_detach_node() either updates the child pointer of given ++node's parent to its sibling or attaches the previous sibling to the given ++node's sibling, as appropriate. That is it :) +diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt +index c3cf065..b5139d1 100644 +--- a/Documentation/devicetree/todo.txt ++++ b/Documentation/devicetree/todo.txt +@@ -2,7 +2,6 @@ Todo list for devicetree: + + === General structure === + - Switch from custom lists to (h)list_head for nodes and properties structure +-- Remove of_allnodes list and iterate using list of child nodes alone + + === CONFIG_OF_DYNAMIC === + - Switch to RCU for tree updates and get rid of global spinlock +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 08e1287..329f5f4 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -13,6 +13,7 @@ config ARM64 + select ARM_ARCH_TIMER + select ARM_GIC + select AUDIT_ARCH_COMPAT_GENERIC ++ select ARM_GIC_V2M if PCI_MSI + select ARM_GIC_V3 + select ARM_GIC_V3_ITS if PCI_MSI + select BUILDTIME_EXTABLE_SORT +diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h +index cf98b36..243ef25 100644 +--- a/arch/arm64/include/asm/device.h ++++ b/arch/arm64/include/asm/device.h +@@ -21,6 +21,7 @@ struct dev_archdata { + #ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ + #endif ++ bool dma_coherent; + }; + + struct pdev_archdata { +diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h +index adeae3f..9ce3e68 100644 +--- a/arch/arm64/include/asm/dma-mapping.h ++++ b/arch/arm64/include/asm/dma-mapping.h +@@ -52,12 +52,20 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) + dev->archdata.dma_ops = ops; + } + +-static inline int set_arch_dma_coherent_ops(struct device *dev) ++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ++ struct iommu_ops *iommu, bool coherent) + { +- set_dma_ops(dev, &coherent_swiotlb_dma_ops); +- return 0; ++ dev->archdata.dma_coherent = coherent; ++ if (coherent) ++ set_dma_ops(dev, &coherent_swiotlb_dma_ops); ++} ++#define arch_setup_dma_ops arch_setup_dma_ops ++ ++/* do not use this function in a driver */ ++static inline bool is_device_dma_coherent(struct device *dev) ++{ ++ return dev->archdata.dma_coherent; + } +-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops + + #include + +diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h +deleted file mode 100644 +index 3bef74a..0000000 +--- a/arch/powerpc/include/asm/mpc85xx.h ++++ /dev/null +@@ -1,94 +0,0 @@ +-/* +- * MPC85xx cpu type detection +- * +- * Copyright 2011-2012 Freescale Semiconductor, Inc. +- * +- * This is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- */ +- +-#ifndef __ASM_PPC_MPC85XX_H +-#define __ASM_PPC_MPC85XX_H +- +-#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ +-#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ +-#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ +- +-/* Some parts define SVR[0:23] as the SOC version */ +-#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ +- +-#define SVR_8533 0x803400 +-#define SVR_8535 0x803701 +-#define SVR_8536 0x803700 +-#define SVR_8540 0x803000 +-#define SVR_8541 0x807200 +-#define SVR_8543 0x803200 +-#define SVR_8544 0x803401 +-#define SVR_8545 0x803102 +-#define SVR_8547 0x803101 +-#define SVR_8548 0x803100 +-#define SVR_8555 0x807100 +-#define SVR_8560 0x807000 +-#define SVR_8567 0x807501 +-#define SVR_8568 0x807500 +-#define SVR_8569 0x808000 +-#define SVR_8572 0x80E000 +-#define SVR_P1010 0x80F100 +-#define SVR_P1011 0x80E500 +-#define SVR_P1012 0x80E501 +-#define SVR_P1013 0x80E700 +-#define SVR_P1014 0x80F101 +-#define SVR_P1017 0x80F700 +-#define SVR_P1020 0x80E400 +-#define SVR_P1021 0x80E401 +-#define SVR_P1022 0x80E600 +-#define SVR_P1023 0x80F600 +-#define SVR_P1024 0x80E402 +-#define SVR_P1025 0x80E403 +-#define SVR_P2010 0x80E300 +-#define SVR_P2020 0x80E200 +-#define SVR_P2040 0x821000 +-#define SVR_P2041 0x821001 +-#define SVR_P3041 0x821103 +-#define SVR_P4040 0x820100 +-#define SVR_P4080 0x820000 +-#define SVR_P5010 0x822100 +-#define SVR_P5020 0x822000 +-#define SVR_P5021 0X820500 +-#define SVR_P5040 0x820400 +-#define SVR_T4240 0x824000 +-#define SVR_T4120 0x824001 +-#define SVR_T4160 0x824100 +-#define SVR_C291 0x850000 +-#define SVR_C292 0x850020 +-#define SVR_C293 0x850030 +-#define SVR_B4860 0X868000 +-#define SVR_G4860 0x868001 +-#define SVR_G4060 0x868003 +-#define SVR_B4440 0x868100 +-#define SVR_G4440 0x868101 +-#define SVR_B4420 0x868102 +-#define SVR_B4220 0x868103 +-#define SVR_T1040 0x852000 +-#define SVR_T1041 0x852001 +-#define SVR_T1042 0x852002 +-#define SVR_T1020 0x852100 +-#define SVR_T1021 0x852101 +-#define SVR_T1022 0x852102 +-#define SVR_T2080 0x853000 +-#define SVR_T2081 0x853100 +- +-#define SVR_8610 0x80A000 +-#define SVR_8641 0x809000 +-#define SVR_8641D 0x809001 +- +-#define SVR_9130 0x860001 +-#define SVR_9131 0x860000 +-#define SVR_9132 0x861000 +-#define SVR_9232 0x861400 +- +-#define SVR_Unknown 0xFFFFFF +- +-#endif +diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +index a392e94..f0be439 100644 +--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c ++++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -51,7 +52,6 @@ + #include + #include + #include +-#include + #include "smp.h" + + #include "mpc85xx.h" +diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +index e358bed..50dcc00 100644 +--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c ++++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -27,7 +28,6 @@ + #include + #include + #include +-#include + + #include + #include +diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c +index 6ac986d..371df82 100644 +--- a/arch/powerpc/platforms/85xx/p1022_ds.c ++++ b/arch/powerpc/platforms/85xx/p1022_ds.c +@@ -16,6 +16,7 @@ + * kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -25,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include "smp.h" + +diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c +index 7a180f0..4f8fc5f 100644 +--- a/arch/powerpc/platforms/85xx/p1022_rdk.c ++++ b/arch/powerpc/platforms/85xx/p1022_rdk.c +@@ -12,6 +12,7 @@ + * kind, whether express or implied. + */ + ++#include + #include + #include + #include +@@ -21,7 +22,6 @@ + #include + #include + #include +-#include + #include "smp.h" + + #include "mpc85xx.h" +diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c +index d7c1e69..3956455 100644 +--- a/arch/powerpc/platforms/85xx/smp.c ++++ b/arch/powerpc/platforms/85xx/smp.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -26,7 +27,6 @@ + #include + #include + #include +-#include + #include + #include + +diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c +index 1eadb6d..2799120 100644 +--- a/arch/powerpc/platforms/85xx/twr_p102x.c ++++ b/arch/powerpc/platforms/85xx/twr_p102x.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -23,7 +24,6 @@ + #include + #include + #include +-#include + + #include + #include +diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +index 55413a5..437a9c3 100644 +--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c ++++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -38,7 +39,6 @@ + #include + #include + #include +-#include + + #include "mpc86xx.h" + +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c +index 4f6844b..878fb8e 100644 +--- a/arch/x86/pci/xen.c ++++ b/arch/x86/pci/xen.c +@@ -296,12 +296,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + map_irq.entry_nr = nvec; + } else if (type == PCI_CAP_ID_MSIX) { + int pos; ++ unsigned long flags; + u32 table_offset, bir; + + pos = dev->msix_cap; + pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); ++ flags = pci_resource_flags(dev, bir); ++ if (!flags || (flags & IORESOURCE_UNSET)) ++ return -EINVAL; + + map_irq.table_base = pci_resource_start(dev, bir); + map_irq.entry_nr = msidesc->msi_attrib.entry_nr; +diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig +index 455fd17..38c8814 100644 +--- a/drivers/clk/Kconfig ++++ b/drivers/clk/Kconfig +@@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN + Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx + FPGAs. It is commonly used in Analog Devices' reference designs. + +-config CLK_PPC_CORENET +- bool "Clock driver for PowerPC corenet platforms" +- depends on PPC_E500MC && OF ++config CLK_QORIQ ++ bool "Clock driver for Freescale QorIQ platforms" ++ depends on (PPC_E500MC || ARM || ARM64) && OF + ---help--- +- This adds the clock driver support for Freescale PowerPC corenet +- platforms using common clock framework. ++ This adds the clock driver support for Freescale QorIQ platforms ++ using common clock framework. + + config COMMON_CLK_XGENE + bool "Clock driver for APM XGene SoC" +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +index d5fba5b..4ff94cd 100644 +--- a/drivers/clk/Makefile ++++ b/drivers/clk/Makefile +@@ -30,7 +30,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o + obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o + obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o + obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o +-obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o ++obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o + obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o + obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o + obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o +diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c +new file mode 100644 +index 0000000..74051c9 +--- /dev/null ++++ b/drivers/clk/clk-qoriq.c +@@ -0,0 +1,1256 @@ ++/* ++ * Copyright 2013 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * clock driver for Freescale QorIQ SoCs. ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PLL_DIV1 0 ++#define PLL_DIV2 1 ++#define PLL_DIV3 2 ++#define PLL_DIV4 3 ++ ++#define PLATFORM_PLL 0 ++#define CGA_PLL1 1 ++#define CGA_PLL2 2 ++#define CGA_PLL3 3 ++#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ ++#define CGB_PLL1 4 ++#define CGB_PLL2 5 ++ ++struct clockgen_pll_div { ++ struct clk *clk; ++ char name[32]; ++}; ++ ++struct clockgen_pll { ++ struct clockgen_pll_div div[4]; ++}; ++ ++#define CLKSEL_VALID 1 ++#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ ++ ++struct clockgen_sourceinfo { ++ u32 flags; /* CLKSEL_xxx */ ++ int pll; /* CGx_PLLn */ ++ int div; /* PLL_DIVn */ ++}; ++ ++#define NUM_MUX_PARENTS 16 ++ ++struct clockgen_muxinfo { ++ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; ++}; ++ ++#define NUM_HWACCEL 5 ++#define NUM_CMUX 8 ++ ++struct clockgen; ++ ++/* ++ * cmux freq must be >= platform pll. ++ * If not set, cmux freq must be >= platform pll/2 ++ */ ++#define CG_CMUX_GE_PLAT 1 ++ ++#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ ++#define CG_VER3 4 /* version 3 cg: reg layout different */ ++#define CG_LITTLE_ENDIAN 8 ++ ++struct clockgen_chipinfo { ++ const char *compat, *guts_compat; ++ const struct clockgen_muxinfo *cmux_groups[2]; ++ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; ++ void (*init_periph)(struct clockgen *cg); ++ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ ++ u32 pll_mask; /* 1 << n bit set if PLL n is valid */ ++ u32 flags; /* CG_xxx */ ++}; ++ ++struct clockgen { ++ struct device_node *node; ++ void __iomem *regs; ++ struct clockgen_chipinfo info; /* mutable copy */ ++ struct clk *sysclk; ++ struct clockgen_pll pll[6]; ++ struct clk *cmux[NUM_CMUX]; ++ struct clk *hwaccel[NUM_HWACCEL]; ++ struct clk *fman[2]; ++ struct ccsr_guts __iomem *guts; ++}; ++ ++static struct clockgen clockgen; ++ ++static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) ++{ ++ if (cg->info.flags & CG_LITTLE_ENDIAN) ++ iowrite32(val, reg); ++ else ++ iowrite32be(val, reg); ++} ++ ++static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) ++{ ++ u32 val; ++ ++ if (cg->info.flags & CG_LITTLE_ENDIAN) ++ val = ioread32(reg); ++ else ++ val = ioread32be(reg); ++ ++ return val; ++} ++ ++static const struct clockgen_muxinfo p2041_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p2041_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5020_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5020_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5040_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p5040_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p4080_cmux_grp1 = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo p4080_cmux_grp2 = { ++ { ++ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, ++ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, ++ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, ++ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, ++ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo t1023_cmux = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ } ++}; ++ ++static const struct clockgen_muxinfo t1040_cmux = { ++ { ++ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ } ++}; ++ ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cga = { ++ { ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { ++ { ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo clockgen2_cmux_cgb = { ++ { ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t1023_hwa1 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t1023_hwa2 = { ++ { ++ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t2080_hwa1 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t2080_hwa2 = { ++ { ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa1 = { ++ { ++ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, ++ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, ++ {}, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, ++ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa4 = { ++ { ++ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, ++ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, ++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ }, ++}; ++ ++static const struct clockgen_muxinfo t4240_hwa5 = { ++ { ++ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, ++ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, ++ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, ++ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, ++ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, ++ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, ++ }, ++}; ++ ++#define RCWSR7_FM1_CLK_SEL 0x40000000 ++#define RCWSR7_FM2_CLK_SEL 0x20000000 ++#define RCWSR7_HWA_ASYNC_DIV 0x04000000 ++ ++static void __init p2041_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p4080_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++ ++ if (reg & RCWSR7_FM2_CLK_SEL) ++ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; ++ else ++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p5020_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ int div = PLL_DIV2; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ if (reg & RCWSR7_HWA_ASYNC_DIV) ++ div = PLL_DIV4; ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init p5040_init_periph(struct clockgen *cg) ++{ ++ u32 reg; ++ int div = PLL_DIV2; ++ ++ reg = ioread32be(&cg->guts->rcwsr[7]); ++ if (reg & RCWSR7_HWA_ASYNC_DIV) ++ div = PLL_DIV4; ++ ++ if (reg & RCWSR7_FM1_CLK_SEL) ++ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; ++ else ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++ ++ if (reg & RCWSR7_FM2_CLK_SEL) ++ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; ++ else ++ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; ++} ++ ++static void __init t1023_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[1]; ++} ++ ++static void __init t1040_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; ++} ++ ++static void __init t2080_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[0]; ++} ++ ++static void __init t4240_init_periph(struct clockgen *cg) ++{ ++ cg->fman[0] = cg->hwaccel[3]; ++ cg->fman[1] = cg->hwaccel[4]; ++} ++ ++static const struct clockgen_chipinfo chipinfo[] = { ++ { ++ .compat = "fsl,b4420-clockgen", ++ .guts_compat = "fsl,b4860-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t2080_hwa1 ++ }, ++ .cmux_to_group = { ++ 0, 1, 1, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,b4860-clockgen", ++ .guts_compat = "fsl,b4860-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t2080_hwa1 ++ }, ++ .cmux_to_group = { ++ 0, 1, 1, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,ls1021a-clockgen", ++ .cmux_groups = { ++ &t1023_cmux ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x03, ++ }, ++ { ++ .compat = "fsl,ls2080a-clockgen", ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x37, ++ .flags = CG_VER3 | CG_LITTLE_ENDIAN, ++ }, ++ { ++ .compat = "fsl,ls2088a-clockgen", ++ .cmux_groups = { ++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x37, ++ .flags = CG_VER3 | CG_LITTLE_ENDIAN, ++ }, ++ { ++ .compat = "fsl,p2041-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p2041_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p3041-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p2041_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p4080-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p4080_init_periph, ++ .cmux_groups = { ++ &p4080_cmux_grp1, &p4080_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 0, 0, 1, 1, 1, 1 ++ }, ++ .pll_mask = 0x1f, ++ }, ++ { ++ .compat = "fsl,p5020-clockgen", ++ .guts_compat = "fsl,qoriq-device-config-1.0", ++ .init_periph = p5020_init_periph, ++ .cmux_groups = { ++ &p2041_cmux_grp1, &p2041_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 1, -1 ++ }, ++ .pll_mask = 0x07, ++ }, ++ { ++ .compat = "fsl,p5040-clockgen", ++ .guts_compat = "fsl,p5040-device-config", ++ .init_periph = p5040_init_periph, ++ .cmux_groups = { ++ &p5040_cmux_grp1, &p5040_cmux_grp2 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, 1, -1 ++ }, ++ .pll_mask = 0x0f, ++ }, ++ { ++ .compat = "fsl,t1023-clockgen", ++ .guts_compat = "fsl,t1023-device-config", ++ .init_periph = t1023_init_periph, ++ .cmux_groups = { ++ &t1023_cmux ++ }, ++ .hwaccel = { ++ &t1023_hwa1, &t1023_hwa2 ++ }, ++ .cmux_to_group = { ++ 0, 0, -1 ++ }, ++ .pll_mask = 0x03, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t1040-clockgen", ++ .guts_compat = "fsl,t1040-device-config", ++ .init_periph = t1040_init_periph, ++ .cmux_groups = { ++ &t1040_cmux ++ }, ++ .cmux_to_group = { ++ 0, 0, 0, 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t2080-clockgen", ++ .guts_compat = "fsl,t2080-device-config", ++ .init_periph = t2080_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga12 ++ }, ++ .hwaccel = { ++ &t2080_hwa1, &t2080_hwa2 ++ }, ++ .cmux_to_group = { ++ 0, -1 ++ }, ++ .pll_mask = 0x07, ++ .flags = CG_PLL_8BIT, ++ }, ++ { ++ .compat = "fsl,t4240-clockgen", ++ .guts_compat = "fsl,t4240-device-config", ++ .init_periph = t4240_init_periph, ++ .cmux_groups = { ++ &clockgen2_cmux_cga, &clockgen2_cmux_cgb ++ }, ++ .hwaccel = { ++ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 ++ }, ++ .cmux_to_group = { ++ 0, 0, 1, -1 ++ }, ++ .pll_mask = 0x3f, ++ .flags = CG_PLL_8BIT, ++ }, ++ {}, ++}; ++ ++struct mux_hwclock { ++ struct clk_hw hw; ++ struct clockgen *cg; ++ const struct clockgen_muxinfo *info; ++ u32 __iomem *reg; ++ u8 parent_to_clksel[NUM_MUX_PARENTS]; ++ s8 clksel_to_parent[NUM_MUX_PARENTS]; ++ int num_parents; ++}; ++ ++#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) ++#define CLKSEL_MASK 0x78000000 ++#define CLKSEL_SHIFT 27 ++ ++static int mux_set_parent(struct clk_hw *hw, u8 idx) ++{ ++ struct mux_hwclock *hwc = to_mux_hwclock(hw); ++ u32 clksel; ++ ++ if (idx >= hwc->num_parents) ++ return -EINVAL; ++ ++ clksel = hwc->parent_to_clksel[idx]; ++ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); ++ ++ return 0; ++} ++ ++static u8 mux_get_parent(struct clk_hw *hw) ++{ ++ struct mux_hwclock *hwc = to_mux_hwclock(hw); ++ u32 clksel; ++ s8 ret; ++ ++ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; ++ ++ ret = hwc->clksel_to_parent[clksel]; ++ if (ret < 0) { ++ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); ++ return 0; ++ } ++ ++ return ret; ++} ++ ++static const struct clk_ops cmux_ops = { ++ .get_parent = mux_get_parent, ++ .set_parent = mux_set_parent, ++}; ++ ++/* ++ * Don't allow setting for now, as the clock options haven't been ++ * sanitized for additional restrictions. ++ */ ++static const struct clk_ops hwaccel_ops = { ++ .get_parent = mux_get_parent, ++}; ++ ++static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, ++ struct mux_hwclock *hwc, ++ int idx) ++{ ++ int pll, div; ++ ++ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) ++ return NULL; ++ ++ pll = hwc->info->clksel[idx].pll; ++ div = hwc->info->clksel[idx].div; ++ ++ return &cg->pll[pll].div[div]; ++} ++ ++static struct clk * __init create_mux_common(struct clockgen *cg, ++ struct mux_hwclock *hwc, ++ const struct clk_ops *ops, ++ unsigned long min_rate, ++ unsigned long pct80_rate, ++ const char *fmt, int idx) ++{ ++ struct clk_init_data init = {}; ++ struct clk *clk; ++ const struct clockgen_pll_div *div; ++ const char *parent_names[NUM_MUX_PARENTS]; ++ char name[32]; ++ int i, j; ++ ++ snprintf(name, sizeof(name), fmt, idx); ++ ++ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { ++ unsigned long rate; ++ ++ hwc->clksel_to_parent[i] = -1; ++ ++ div = get_pll_div(cg, hwc, i); ++ if (!div) ++ continue; ++ ++ rate = clk_get_rate(div->clk); ++ ++ if (hwc->info->clksel[i].flags & CLKSEL_80PCT && ++ rate > pct80_rate) ++ continue; ++ if (rate < min_rate) ++ continue; ++ ++ parent_names[j] = div->name; ++ hwc->parent_to_clksel[j] = i; ++ hwc->clksel_to_parent[i] = j; ++ j++; ++ } ++ ++ init.name = name; ++ init.ops = ops; ++ init.parent_names = parent_names; ++ init.num_parents = hwc->num_parents = j; ++ init.flags = 0; ++ hwc->hw.init = &init; ++ hwc->cg = cg; ++ ++ clk = clk_register(NULL, &hwc->hw); ++ if (IS_ERR(clk)) { ++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, ++ PTR_ERR(clk)); ++ kfree(hwc); ++ return NULL; ++ } ++ ++ return clk; ++} ++ ++static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) ++{ ++ struct mux_hwclock *hwc; ++ const struct clockgen_pll_div *div; ++ unsigned long plat_rate, min_rate; ++ u64 pct80_rate; ++ u32 clksel; ++ ++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); ++ if (!hwc) ++ return NULL; ++ ++ if (cg->info.flags & CG_VER3) ++ hwc->reg = cg->regs + 0x70000 + 0x20 * idx; ++ else ++ hwc->reg = cg->regs + 0x20 * idx; ++ ++ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; ++ ++ /* ++ * Find the rate for the default clksel, and treat it as the ++ * maximum rated core frequency. If this is an incorrect ++ * assumption, certain clock options (possibly including the ++ * default clksel) may be inappropriately excluded on certain ++ * chips. ++ */ ++ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; ++ div = get_pll_div(cg, hwc, clksel); ++ if (!div) ++ return NULL; ++ ++ pct80_rate = clk_get_rate(div->clk); ++ pct80_rate *= 8; ++ do_div(pct80_rate, 10); ++ ++ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); ++ ++ if (cg->info.flags & CG_CMUX_GE_PLAT) ++ min_rate = plat_rate; ++ else ++ min_rate = plat_rate / 2; ++ ++ return create_mux_common(cg, hwc, &cmux_ops, min_rate, ++ pct80_rate, "cg-cmux%d", idx); ++} ++ ++static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) ++{ ++ struct mux_hwclock *hwc; ++ ++ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); ++ if (!hwc) ++ return NULL; ++ ++ hwc->reg = cg->regs + 0x20 * idx + 0x10; ++ hwc->info = cg->info.hwaccel[idx]; ++ ++ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, ++ "cg-hwaccel%d", idx); ++} ++ ++static void __init create_muxes(struct clockgen *cg) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { ++ if (cg->info.cmux_to_group[i] < 0) ++ break; ++ if (cg->info.cmux_to_group[i] >= ++ ARRAY_SIZE(cg->info.cmux_groups)) { ++ WARN_ON_ONCE(1); ++ continue; ++ } ++ ++ cg->cmux[i] = create_one_cmux(cg, i); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { ++ if (!cg->info.hwaccel[i]) ++ continue; ++ ++ cg->hwaccel[i] = create_one_hwaccel(cg, i); ++ } ++} ++ ++static void __init clockgen_init(struct device_node *np); ++ ++/* Legacy nodes may get probed before the parent clockgen node */ ++static void __init legacy_init_clockgen(struct device_node *np) ++{ ++ if (!clockgen.node) ++ clockgen_init(of_get_parent(np)); ++} ++ ++/* Legacy node */ ++static void __init core_mux_init(struct device_node *np) ++{ ++ struct clk *clk; ++ struct resource res; ++ int idx, rc; ++ ++ legacy_init_clockgen(np); ++ ++ if (of_address_to_resource(np, 0, &res)) ++ return; ++ ++ idx = (res.start & 0xf0) >> 5; ++ clk = clockgen.cmux[idx]; ++ ++ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); ++ if (rc) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, rc); ++ return; ++ } ++} ++ ++static struct clk *sysclk_from_fixed(struct device_node *node, const char *name) ++{ ++ u32 rate; ++ ++ if (of_property_read_u32(node, "clock-frequency", &rate)) ++ return ERR_PTR(-ENODEV); ++ ++ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); ++} ++ ++static struct clk *sysclk_from_parent(const char *name) ++{ ++ struct clk *clk; ++ const char *parent_name; ++ ++ clk = of_clk_get(clockgen.node, 0); ++ if (IS_ERR(clk)) ++ return clk; ++ ++ /* Register the input clock under the desired name. */ ++ parent_name = __clk_get_name(clk); ++ clk = clk_register_fixed_factor(NULL, name, parent_name, ++ 0, 1, 1); ++ if (IS_ERR(clk)) ++ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, ++ PTR_ERR(clk)); ++ ++ return clk; ++} ++ ++static struct clk * __init create_sysclk(const char *name) ++{ ++ struct device_node *sysclk; ++ struct clk *clk; ++ ++ clk = sysclk_from_fixed(clockgen.node, name); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ clk = sysclk_from_parent(name); ++ if (!IS_ERR(clk)) ++ return clk; ++ ++ sysclk = of_get_child_by_name(clockgen.node, "sysclk"); ++ if (sysclk) { ++ clk = sysclk_from_fixed(sysclk, name); ++ if (!IS_ERR(clk)) ++ return clk; ++ } ++ ++ pr_err("%s: No input clock\n", __func__); ++ return NULL; ++} ++ ++/* Legacy node */ ++static void __init sysclk_init(struct device_node *node) ++{ ++ struct clk *clk; ++ ++ legacy_init_clockgen(node); ++ ++ clk = clockgen.sysclk; ++ if (clk) ++ of_clk_add_provider(node, of_clk_src_simple_get, clk); ++} ++ ++#define PLL_KILL BIT(31) ++ ++static void __init create_one_pll(struct clockgen *cg, int idx) ++{ ++ u32 __iomem *reg; ++ u32 mult; ++ struct clockgen_pll *pll = &cg->pll[idx]; ++ int i; ++ ++ if (!(cg->info.pll_mask & (1 << idx))) ++ return; ++ ++ if (cg->info.flags & CG_VER3) { ++ switch (idx) { ++ case PLATFORM_PLL: ++ reg = cg->regs + 0x60080; ++ break; ++ case CGA_PLL1: ++ reg = cg->regs + 0x80; ++ break; ++ case CGA_PLL2: ++ reg = cg->regs + 0xa0; ++ break; ++ case CGB_PLL1: ++ reg = cg->regs + 0x10080; ++ break; ++ case CGB_PLL2: ++ reg = cg->regs + 0x100a0; ++ break; ++ default: ++ WARN_ONCE(1, "index %d\n", idx); ++ return; ++ } ++ } else { ++ if (idx == PLATFORM_PLL) ++ reg = cg->regs + 0xc00; ++ else ++ reg = cg->regs + 0x800 + 0x20 * (idx - 1); ++ } ++ ++ /* Get the multiple of PLL */ ++ mult = cg_in(cg, reg); ++ ++ /* Check if this PLL is disabled */ ++ if (mult & PLL_KILL) { ++ pr_debug("%s(): pll %p disabled\n", __func__, reg); ++ return; ++ } ++ ++ if ((cg->info.flags & CG_VER3) || ++ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) ++ mult = (mult & GENMASK(8, 1)) >> 1; ++ else ++ mult = (mult & GENMASK(6, 1)) >> 1; ++ ++ for (i = 0; i < ARRAY_SIZE(pll->div); i++) { ++ struct clk *clk; ++ ++ snprintf(pll->div[i].name, sizeof(pll->div[i].name), ++ "cg-pll%d-div%d", idx, i + 1); ++ ++ clk = clk_register_fixed_factor(NULL, ++ pll->div[i].name, "cg-sysclk", 0, mult, i + 1); ++ if (IS_ERR(clk)) { ++ pr_err("%s: %s: register failed %ld\n", ++ __func__, pll->div[i].name, PTR_ERR(clk)); ++ continue; ++ } ++ ++ pll->div[i].clk = clk; ++ } ++} ++ ++static void __init create_plls(struct clockgen *cg) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cg->pll); i++) ++ create_one_pll(cg, i); ++} ++ ++static void __init legacy_pll_init(struct device_node *np, int idx) ++{ ++ struct clockgen_pll *pll; ++ struct clk_onecell_data *onecell_data; ++ struct clk **subclks; ++ int count, rc; ++ ++ legacy_init_clockgen(np); ++ ++ pll = &clockgen.pll[idx]; ++ count = of_property_count_strings(np, "clock-output-names"); ++ ++ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); ++ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); ++ if (!subclks) ++ return; ++ ++ onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); ++ if (!onecell_data) ++ goto err_clks; ++ ++ if (count <= 3) { ++ subclks[0] = pll->div[0].clk; ++ subclks[1] = pll->div[1].clk; ++ subclks[2] = pll->div[3].clk; ++ } else { ++ subclks[0] = pll->div[0].clk; ++ subclks[1] = pll->div[1].clk; ++ subclks[2] = pll->div[2].clk; ++ subclks[3] = pll->div[3].clk; ++ } ++ ++ onecell_data->clks = subclks; ++ onecell_data->clk_num = count; ++ ++ rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); ++ if (rc) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, rc); ++ goto err_cell; ++ } ++ ++ return; ++err_cell: ++ kfree(onecell_data); ++err_clks: ++ kfree(subclks); ++} ++ ++/* Legacy node */ ++static void __init pltfrm_pll_init(struct device_node *np) ++{ ++ legacy_pll_init(np, PLATFORM_PLL); ++} ++ ++/* Legacy node */ ++static void __init core_pll_init(struct device_node *np) ++{ ++ struct resource res; ++ int idx; ++ ++ if (of_address_to_resource(np, 0, &res)) ++ return; ++ ++ if ((res.start & 0xfff) == 0xc00) { ++ /* ++ * ls1021a devtree labels the platform PLL ++ * with the core PLL compatible ++ */ ++ pltfrm_pll_init(np); ++ } else { ++ idx = (res.start & 0xf0) >> 5; ++ legacy_pll_init(np, CGA_PLL1 + idx); ++ } ++} ++ ++static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) ++{ ++ struct clockgen *cg = data; ++ struct clk *clk; ++ struct clockgen_pll *pll; ++ u32 type, idx; ++ ++ if (clkspec->args_count < 2) { ++ pr_err("%s: insufficient phandle args\n", __func__); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ type = clkspec->args[0]; ++ idx = clkspec->args[1]; ++ ++ switch (type) { ++ case 0: ++ if (idx != 0) ++ goto bad_args; ++ clk = cg->sysclk; ++ break; ++ case 1: ++ if (idx >= ARRAY_SIZE(cg->cmux)) ++ goto bad_args; ++ clk = cg->cmux[idx]; ++ break; ++ case 2: ++ if (idx >= ARRAY_SIZE(cg->hwaccel)) ++ goto bad_args; ++ clk = cg->hwaccel[idx]; ++ break; ++ case 3: ++ if (idx >= ARRAY_SIZE(cg->fman)) ++ goto bad_args; ++ clk = cg->fman[idx]; ++ break; ++ case 4: ++ pll = &cg->pll[PLATFORM_PLL]; ++ if (idx >= ARRAY_SIZE(pll->div)) ++ goto bad_args; ++ clk = pll->div[idx].clk; ++ break; ++ default: ++ goto bad_args; ++ } ++ ++ if (!clk) ++ return ERR_PTR(-ENOENT); ++ return clk; ++ ++bad_args: ++ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); ++ return ERR_PTR(-EINVAL); ++} ++ ++#ifdef CONFIG_PPC ++ ++static const u32 a4510_svrs[] __initconst = { ++ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ ++ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ ++ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ ++ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ ++ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ ++ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ ++ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ ++ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ ++ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ ++ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ ++ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ ++ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ ++ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ ++}; ++ ++#define SVR_SECURITY 0x80000 /* The Security (E) bit */ ++ ++static bool __init has_erratum_a4510(void) ++{ ++ u32 svr = mfspr(SPRN_SVR); ++ int i; ++ ++ svr &= ~SVR_SECURITY; ++ ++ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { ++ if (svr == a4510_svrs[i]) ++ return true; ++ } ++ ++ return false; ++} ++#else ++static bool __init has_erratum_a4510(void) ++{ ++ return false; ++} ++#endif ++ ++static void __init clockgen_init(struct device_node *np) ++{ ++ int i, ret; ++ bool is_old_ls1021a = false; ++ ++ /* May have already been called by a legacy probe */ ++ if (clockgen.node) ++ return; ++ ++ clockgen.node = np; ++ clockgen.regs = of_iomap(np, 0); ++ if (!clockgen.regs && ++ of_device_is_compatible(of_root, "fsl,ls1021a")) { ++ /* Compatibility hack for old, broken device trees */ ++ clockgen.regs = ioremap(0x1ee1000, 0x1000); ++ is_old_ls1021a = true; ++ } ++ if (!clockgen.regs) { ++ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); ++ return; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { ++ if (of_device_is_compatible(np, chipinfo[i].compat)) ++ break; ++ if (is_old_ls1021a && ++ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) ++ break; ++ } ++ ++ if (i == ARRAY_SIZE(chipinfo)) { ++ pr_err("%s: unknown clockgen node %s\n", __func__, ++ np->full_name); ++ goto err; ++ } ++ clockgen.info = chipinfo[i]; ++ ++ if (clockgen.info.guts_compat) { ++ struct device_node *guts; ++ ++ guts = of_find_compatible_node(NULL, NULL, ++ clockgen.info.guts_compat); ++ if (guts) { ++ clockgen.guts = of_iomap(guts, 0); ++ if (!clockgen.guts) { ++ pr_err("%s: Couldn't map %s regs\n", __func__, ++ guts->full_name); ++ } ++ } ++ ++ } ++ ++ if (has_erratum_a4510()) ++ clockgen.info.flags |= CG_CMUX_GE_PLAT; ++ ++ clockgen.sysclk = create_sysclk("cg-sysclk"); ++ create_plls(&clockgen); ++ create_muxes(&clockgen); ++ ++ if (clockgen.info.init_periph) ++ clockgen.info.init_periph(&clockgen); ++ ++ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); ++ if (ret) { ++ pr_err("%s: Couldn't register clk provider for node %s: %d\n", ++ __func__, np->name, ret); ++ } ++ ++ return; ++err: ++ iounmap(clockgen.regs); ++ clockgen.regs = NULL; ++} ++ ++CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); ++CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init); ++ ++/* Legacy nodes */ ++CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); ++CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); ++CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); ++CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); ++CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); ++CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); ++CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); ++CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); +diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc +index 72564b7..7ea2441 100644 +--- a/drivers/cpufreq/Kconfig.powerpc ++++ b/drivers/cpufreq/Kconfig.powerpc +@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE + config PPC_CORENET_CPUFREQ + tristate "CPU frequency scaling driver for Freescale E500MC SoCs" + depends on PPC_E500MC && OF && COMMON_CLK +- select CLK_PPC_CORENET ++ select CLK_QORIQ + help + This adds the CPUFreq driver support for Freescale e500mc, + e5500 and e6500 series SoCs which are capable of changing +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index 06e99eb..bbf8ae4 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -526,10 +526,10 @@ config I2C_IBM_IIC + + config I2C_IMX + tristate "IMX I2C interface" +- depends on ARCH_MXC ++ depends on ARCH_MXC || ARCH_LAYERSCAPE + help + Say Y here if you want to use the IIC bus controller on +- the Freescale i.MX/MXC processors. ++ the Freescale i.MX/MXC and layerscape processors. + + This driver can also be built as a module. If so, the module + will be called i2c-imx. +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c +index e9fb7cf..13f88f9 100644 +--- a/drivers/i2c/busses/i2c-imx.c ++++ b/drivers/i2c/busses/i2c-imx.c +@@ -33,6 +33,10 @@ + *******************************************************************************/ + + #include ++#include ++#include ++#include ++#include + #include + #include + #include +@@ -47,6 +51,7 @@ + #include + #include + #include ++#include + #include + + /** Defines ******************************************************************** +@@ -58,6 +63,15 @@ + /* Default value */ + #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ + ++/* ++ * Enable DMA if transfer byte size is bigger than this threshold. ++ * As the hardware request, it must bigger than 4 bytes.\ ++ * I have set '16' here, maybe it's not the best but I think it's ++ * the appropriate. ++ */ ++#define DMA_THRESHOLD 16 ++#define DMA_TIMEOUT 1000 ++ + /* IMX I2C registers: + * the I2C register offset is different between SoCs, + * to provid support for all these chips, split the +@@ -83,6 +97,7 @@ + #define I2SR_IBB 0x20 + #define I2SR_IAAS 0x40 + #define I2SR_ICF 0x80 ++#define I2CR_DMAEN 0x02 + #define I2CR_RSTA 0x04 + #define I2CR_TXAK 0x08 + #define I2CR_MTX 0x10 +@@ -169,6 +184,17 @@ struct imx_i2c_hwdata { + unsigned i2cr_ien_opcode; + }; + ++struct imx_i2c_dma { ++ struct dma_chan *chan_tx; ++ struct dma_chan *chan_rx; ++ struct dma_chan *chan_using; ++ struct completion cmd_complete; ++ dma_addr_t dma_buf; ++ unsigned int dma_len; ++ enum dma_transfer_direction dma_transfer_dir; ++ enum dma_data_direction dma_data_dir; ++}; ++ + struct imx_i2c_struct { + struct i2c_adapter adapter; + struct clk *clk; +@@ -181,6 +207,8 @@ struct imx_i2c_struct { + unsigned int cur_clk; + unsigned int bitrate; + const struct imx_i2c_hwdata *hwdata; ++ ++ struct imx_i2c_dma *dma; + }; + + static const struct imx_i2c_hwdata imx1_i2c_hwdata = { +@@ -251,6 +279,162 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, + return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); + } + ++/* Functions for DMA support */ ++static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, ++ dma_addr_t phy_addr) ++{ ++ struct imx_i2c_dma *dma; ++ struct dma_slave_config dma_sconfig; ++ struct device *dev = &i2c_imx->adapter.dev; ++ int ret; ++ ++ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); ++ if (!dma) ++ return; ++ ++ dma->chan_tx = dma_request_slave_channel(dev, "tx"); ++ if (!dma->chan_tx) { ++ dev_dbg(dev, "can't request DMA tx channel\n"); ++ goto fail_al; ++ } ++ ++ dma_sconfig.dst_addr = phy_addr + ++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); ++ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ dma_sconfig.dst_maxburst = 1; ++ dma_sconfig.direction = DMA_MEM_TO_DEV; ++ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); ++ if (ret < 0) { ++ dev_dbg(dev, "can't configure tx channel\n"); ++ goto fail_tx; ++ } ++ ++ dma->chan_rx = dma_request_slave_channel(dev, "rx"); ++ if (!dma->chan_rx) { ++ dev_dbg(dev, "can't request DMA rx channel\n"); ++ goto fail_tx; ++ } ++ ++ dma_sconfig.src_addr = phy_addr + ++ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); ++ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ dma_sconfig.src_maxburst = 1; ++ dma_sconfig.direction = DMA_DEV_TO_MEM; ++ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); ++ if (ret < 0) { ++ dev_dbg(dev, "can't configure rx channel\n"); ++ goto fail_rx; ++ } ++ ++ i2c_imx->dma = dma; ++ init_completion(&dma->cmd_complete); ++ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", ++ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); ++ ++ return; ++ ++fail_rx: ++ dma_release_channel(dma->chan_rx); ++fail_tx: ++ dma_release_channel(dma->chan_tx); ++fail_al: ++ devm_kfree(dev, dma); ++ dev_info(dev, "can't use DMA\n"); ++} ++ ++static void i2c_imx_dma_callback(void *arg) ++{ ++ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ ++ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf, ++ dma->dma_len, dma->dma_data_dir); ++ complete(&dma->cmd_complete); ++} ++ ++static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs) ++{ ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct dma_async_tx_descriptor *txdesc; ++ struct device *dev = &i2c_imx->adapter.dev; ++ struct device *chan_dev = dma->chan_using->device->dev; ++ ++ dma->dma_buf = dma_map_single(chan_dev, msgs->buf, ++ dma->dma_len, dma->dma_data_dir); ++ if (dma_mapping_error(chan_dev, dma->dma_buf)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ goto err_map; ++ } ++ ++ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, ++ dma->dma_len, dma->dma_transfer_dir, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!txdesc) { ++ dev_err(dev, "Not able to get desc for DMA xfer\n"); ++ goto err_desc; ++ } ++ ++ txdesc->callback = i2c_imx_dma_callback; ++ txdesc->callback_param = i2c_imx; ++ if (dma_submit_error(dmaengine_submit(txdesc))) { ++ dev_err(dev, "DMA submit failed\n"); ++ goto err_submit; ++ } ++ ++ dma_async_issue_pending(dma->chan_using); ++ return 0; ++ ++err_submit: ++err_desc: ++ dma_unmap_single(chan_dev, dma->dma_buf, ++ dma->dma_len, dma->dma_data_dir); ++err_map: ++ return -EINVAL; ++} ++ ++static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) ++{ ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ ++ dma->dma_buf = 0; ++ dma->dma_len = 0; ++ ++ dma_release_channel(dma->chan_tx); ++ dma->chan_tx = NULL; ++ ++ dma_release_channel(dma->chan_rx); ++ dma->chan_rx = NULL; ++ ++ dma->chan_using = NULL; ++} ++ ++/* ++ * When a system reset does not cause all I2C devices to be reset, it is ++ * sometimes necessary to force the I2C module to become the I2C bus master ++ * out of reset and drive SCL A slave can hold bus low to cause bus hang. ++ * Thus, SDA can be driven low by another I2C device while this I2C module ++ * is coming out of reset and will stay low indefinitely. ++ * The I2C master has to generate 9 clock pulses to get the bus free or idle. ++ */ ++static void imx_i2c_fixup(struct imx_i2c_struct *i2c_imx) ++{ ++ int k; ++ u32 delay_val = 1000000 / i2c_imx->cur_clk + 1; ++ ++ if (delay_val < 2) ++ delay_val = 2; ++ ++ for (k = 9; k; k--) { ++ imx_i2c_write_reg(I2CR_IEN, i2c_imx, IMX_I2C_I2CR); ++ imx_i2c_write_reg((I2CR_MSTA | I2CR_MTX) & (~I2CR_IEN), ++ i2c_imx, IMX_I2C_I2CR); ++ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); ++ udelay(delay_val << 1); ++ } ++} ++ + /** Functions for IMX I2C adapter driver *************************************** + *******************************************************************************/ + +@@ -276,8 +460,15 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) + if (!for_busy && !(temp & I2SR_IBB)) + break; + if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { ++ u8 status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ + dev_dbg(&i2c_imx->adapter.dev, + "<%s> I2C bus is busy\n", __func__); ++ if ((status & (I2SR_ICF | I2SR_IBB | I2CR_TXAK)) != 0) { ++ imx_i2c_write_reg(status & ~I2SR_IAL, i2c_imx, ++ IMX_I2C_I2CR); ++ imx_i2c_fixup(i2c_imx); ++ } + return -ETIMEDOUT; + } + schedule(); +@@ -382,6 +573,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) + i2c_imx->stopped = 0; + + temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; ++ temp &= ~I2CR_DMAEN; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + return result; + } +@@ -395,6 +587,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + temp &= ~(I2CR_MSTA | I2CR_MTX); ++ if (i2c_imx->dma) ++ temp &= ~I2CR_DMAEN; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); + } + if (is_imx1_i2c(i2c_imx)) { +@@ -435,6 +629,157 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) + return IRQ_NONE; + } + ++static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs) ++{ ++ int result; ++ unsigned long time_left; ++ unsigned int temp = 0; ++ unsigned long orig_jiffies = jiffies; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct device *dev = &i2c_imx->adapter.dev; ++ ++ dma->chan_using = dma->chan_tx; ++ dma->dma_transfer_dir = DMA_MEM_TO_DEV; ++ dma->dma_data_dir = DMA_TO_DEVICE; ++ dma->dma_len = msgs->len - 1; ++ result = i2c_imx_dma_xfer(i2c_imx, msgs); ++ if (result) ++ return result; ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* ++ * Write slave address. ++ * The first byte must be transmitted by the CPU. ++ */ ++ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); ++ reinit_completion(&i2c_imx->dma->cmd_complete); ++ time_left = wait_for_completion_timeout( ++ &i2c_imx->dma->cmd_complete, ++ msecs_to_jiffies(DMA_TIMEOUT)); ++ if (time_left == 0) { ++ dmaengine_terminate_all(dma->chan_using); ++ return -ETIMEDOUT; ++ } ++ ++ /* Waiting for transfer complete. */ ++ while (1) { ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ if (temp & I2SR_ICF) ++ break; ++ if (time_after(jiffies, orig_jiffies + ++ msecs_to_jiffies(DMA_TIMEOUT))) { ++ dev_dbg(dev, "<%s> Timeout\n", __func__); ++ return -ETIMEDOUT; ++ } ++ schedule(); ++ } ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* The last data byte must be transferred by the CPU. */ ++ imx_i2c_write_reg(msgs->buf[msgs->len-1], ++ i2c_imx, IMX_I2C_I2DR); ++ result = i2c_imx_trx_complete(i2c_imx); ++ if (result) ++ return result; ++ ++ return i2c_imx_acked(i2c_imx); ++} ++ ++static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, ++ struct i2c_msg *msgs, bool is_lastmsg) ++{ ++ int result; ++ unsigned long time_left; ++ unsigned int temp; ++ unsigned long orig_jiffies = jiffies; ++ struct imx_i2c_dma *dma = i2c_imx->dma; ++ struct device *dev = &i2c_imx->adapter.dev; ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ dma->chan_using = dma->chan_rx; ++ dma->dma_transfer_dir = DMA_DEV_TO_MEM; ++ dma->dma_data_dir = DMA_FROM_DEVICE; ++ /* The last two data bytes must be transferred by the CPU. */ ++ dma->dma_len = msgs->len - 2; ++ result = i2c_imx_dma_xfer(i2c_imx, msgs); ++ if (result) ++ return result; ++ ++ reinit_completion(&i2c_imx->dma->cmd_complete); ++ time_left = wait_for_completion_timeout( ++ &i2c_imx->dma->cmd_complete, ++ msecs_to_jiffies(DMA_TIMEOUT)); ++ if (time_left == 0) { ++ dmaengine_terminate_all(dma->chan_using); ++ return -ETIMEDOUT; ++ } ++ ++ /* waiting for transfer complete. */ ++ while (1) { ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ if (temp & I2SR_ICF) ++ break; ++ if (time_after(jiffies, orig_jiffies + ++ msecs_to_jiffies(DMA_TIMEOUT))) { ++ dev_dbg(dev, "<%s> Timeout\n", __func__); ++ return -ETIMEDOUT; ++ } ++ schedule(); ++ } ++ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~I2CR_DMAEN; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* read n-1 byte data */ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp |= I2CR_TXAK; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ /* read n byte data */ ++ result = i2c_imx_trx_complete(i2c_imx); ++ if (result) ++ return result; ++ ++ if (is_lastmsg) { ++ /* ++ * It must generate STOP before read I2DR to prevent ++ * controller from generating another clock cycle ++ */ ++ dev_dbg(dev, "<%s> clear MSTA\n", __func__); ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~(I2CR_MSTA | I2CR_MTX); ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ i2c_imx_bus_busy(i2c_imx, 0); ++ i2c_imx->stopped = 1; ++ } else { ++ /* ++ * For i2c master receiver repeat restart operation like: ++ * read -> repeat MSTA -> read/write ++ * The controller must set MTX before read the last byte in ++ * the first read operation, otherwise the first read cost ++ * one extra clock cycle. ++ */ ++ temp = readb(i2c_imx->base + IMX_I2C_I2CR); ++ temp |= I2CR_MTX; ++ writeb(temp, i2c_imx->base + IMX_I2C_I2CR); ++ } ++ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); ++ ++ return 0; ++} ++ + static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) + { + int i, result; +@@ -504,6 +849,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo + + dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); + ++ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data) ++ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); ++ + /* read data */ + for (i = 0; i < msgs->len; i++) { + u8 len = 0; +@@ -577,6 +925,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, + + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + ++ /* workround for ERR010027: ensure that the I2C BUS is idle ++ before switching to master mode and attempting a Start cycle ++ */ ++ result = i2c_imx_bus_busy(i2c_imx, 0); ++ if (result) ++ goto fail0; ++ + /* Start I2C transfer */ + result = i2c_imx_start(i2c_imx); + if (result) +@@ -618,8 +973,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, + #endif + if (msgs[i].flags & I2C_M_RD) + result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); +- else +- result = i2c_imx_write(i2c_imx, &msgs[i]); ++ else { ++ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) ++ result = i2c_imx_dma_write(i2c_imx, &msgs[i]); ++ else ++ result = i2c_imx_write(i2c_imx, &msgs[i]); ++ } + if (result) + goto fail0; + } +@@ -654,6 +1013,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev); + void __iomem *base; + int irq, ret; ++ dma_addr_t phy_addr; + + dev_dbg(&pdev->dev, "<%s>\n", __func__); + +@@ -668,6 +1028,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + if (IS_ERR(base)) + return PTR_ERR(base); + ++ phy_addr = (dma_addr_t)res->start; + i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct), + GFP_KERNEL); + if (!i2c_imx) +@@ -701,7 +1062,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + return ret; + } + /* Request IRQ */ +- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, ++ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, + pdev->name, i2c_imx); + if (ret) { + dev_err(&pdev->dev, "can't claim irq %d\n", irq); +@@ -743,6 +1104,9 @@ static int i2c_imx_probe(struct platform_device *pdev) + i2c_imx->adapter.name); + dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); + ++ /* Init DMA config if support*/ ++ i2c_imx_dma_request(i2c_imx, phy_addr); ++ + return 0; /* Return OK */ + + clk_disable: +@@ -758,6 +1122,9 @@ static int i2c_imx_remove(struct platform_device *pdev) + dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); + i2c_del_adapter(&i2c_imx->adapter); + ++ if (i2c_imx->dma) ++ i2c_imx_dma_free(i2c_imx); ++ + /* setup chip registers to defaults */ + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); +diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c +index cb77277..0c8d4d2 100644 +--- a/drivers/i2c/muxes/i2c-mux-pca9541.c ++++ b/drivers/i2c/muxes/i2c-mux-pca9541.c +@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val) + buf[0] = command; + buf[1] = val; + msg.buf = buf; +- ret = adap->algo->master_xfer(adap, &msg, 1); ++ ret = __i2c_transfer(adap, &msg, 1); + } else { + union i2c_smbus_data data; + +@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command) + .buf = &val + } + }; +- ret = adap->algo->master_xfer(adap, msg, 2); ++ ret = __i2c_transfer(adap, msg, 2); + if (ret == 2) + ret = val; + else if (ret >= 0) +diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c +index ec11b40..28540a4 100644 +--- a/drivers/i2c/muxes/i2c-mux-pca954x.c ++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -62,6 +63,7 @@ struct pca954x { + struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS]; + + u8 last_chan; /* last register value */ ++ u8 disable_mux; /* do not disable mux if val not 0 */ + }; + + struct chip_desc { +@@ -133,7 +135,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap, + msg.len = 1; + buf[0] = val; + msg.buf = buf; +- ret = adap->algo->master_xfer(adap, &msg, 1); ++ ret = __i2c_transfer(adap, &msg, 1); + } else { + union i2c_smbus_data data; + ret = adap->algo->smbus_xfer(adap, client->addr, +@@ -173,6 +175,13 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap, + { + struct pca954x *data = i2c_get_clientdata(client); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++ return pca954x_reg_write(adap, client, data->disable_mux); ++#endif + /* Deselect active channel */ + data->last_chan = 0; + return pca954x_reg_write(adap, client, data->last_chan); +@@ -186,6 +195,8 @@ static int pca954x_probe(struct i2c_client *client, + { + struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); + struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); ++ struct device_node *of_node = client->dev.of_node; ++ bool idle_disconnect_dt; + struct gpio_desc *gpio; + int num, force, class; + struct pca954x *data; +@@ -198,27 +209,55 @@ static int pca954x_probe(struct i2c_client *client, + if (!data) + return -ENOMEM; + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ /* The point here is that you must not disable a mux if there ++ * are no pullups on the input or you mess up the I2C. This ++ * needs to be put into the DTS really as the kernel cannot ++ * know this otherwise. ++ */ ++ data->type = id->driver_data; ++ data->disable_mux = of_node && ++ of_property_read_bool(of_node, "i2c-mux-never-disable") && ++ chips[data->type].muxtype == pca954x_ismux ? ++ chips[data->type].enable : 0; ++ /* force the first selection */ ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++#endif + i2c_set_clientdata(client, data); + + /* Get the mux out of reset if a reset GPIO is specified. */ +- gpio = devm_gpiod_get(&client->dev, "reset"); +- if (!IS_ERR(gpio)) +- gpiod_direction_output(gpio, 0); ++ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW); ++ if (IS_ERR(gpio)) ++ return PTR_ERR(gpio); + + /* Write the mux register at addr to verify + * that the mux is in fact present. This also + * initializes the mux to disconnected state. + */ ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { ++#else + if (i2c_smbus_write_byte(client, 0) < 0) { ++#endif + dev_warn(&client->dev, "probe failed\n"); + return -ENODEV; + } + ++#ifndef CONFIG_ARCH_LAYERSCAPE + data->type = id->driver_data; + data->last_chan = 0; /* force the first selection */ ++#endif ++ ++ idle_disconnect_dt = of_node && ++ of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); + + /* Now create an adapter for each channel */ + for (num = 0; num < chips[data->type].nchans; num++) { ++ bool idle_disconnect_pd = false; ++ + force = 0; /* dynamic adap number */ + class = 0; /* no class by default */ + if (pdata) { +@@ -229,12 +268,13 @@ static int pca954x_probe(struct i2c_client *client, + } else + /* discard unconfigured channels */ + break; ++ idle_disconnect_pd = pdata->modes[num].deselect_on_exit; + } + + data->virt_adaps[num] = + i2c_add_mux_adapter(adap, &client->dev, client, + force, num, class, pca954x_select_chan, +- (pdata && pdata->modes[num].deselect_on_exit) ++ (idle_disconnect_pd || idle_disconnect_dt) + ? pca954x_deselect_mux : NULL); + + if (data->virt_adaps[num] == NULL) { +@@ -280,6 +320,13 @@ static int pca954x_resume(struct device *dev) + struct i2c_client *client = to_i2c_client(dev); + struct pca954x *data = i2c_get_clientdata(client); + ++#ifdef CONFIG_ARCH_LAYERSCAPE ++ if (data->disable_mux != 0) ++ data->last_chan = chips[data->type].nchans; ++ else ++ data->last_chan = 0; ++ return i2c_smbus_write_byte(client, data->disable_mux); ++#endif + data->last_chan = 0; + return i2c_smbus_write_byte(client, 0); + } +diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c +index 80ac68d..9396c85 100644 +--- a/drivers/iommu/fsl_pamu.c ++++ b/drivers/iommu/fsl_pamu.c +@@ -31,7 +31,7 @@ + #include + #include + #include +-#include ++#include + + #include "fsl_pamu.h" + +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +index 5a500ed..fd6dd22 100644 +--- a/drivers/iommu/io-pgtable-arm.c ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -56,7 +56,8 @@ + ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ + * (d)->bits_per_level) + (d)->pg_shift) + +-#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) ++#define ARM_LPAE_PAGES_PER_PGD(d) \ ++ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) + + /* + * Calculate the index at level l used to map virtual address a using the +@@ -66,7 +67,7 @@ + ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + + #define ARM_LPAE_LVL_IDX(a,l,d) \ +- (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ++ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) + + /* Calculate the block/page mapping size at level l for pagetable in d. */ +@@ -115,6 +116,8 @@ + #define ARM_32_LPAE_TCR_EAE (1 << 31) + #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + ++#define ARM_LPAE_TCR_EPD1 (1 << 23) ++ + #define ARM_LPAE_TCR_TG0_4K (0 << 14) + #define ARM_LPAE_TCR_TG0_64K (1 << 14) + #define ARM_LPAE_TCR_TG0_16K (2 << 14) +@@ -283,6 +286,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + if (prot & IOMMU_CACHE) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE + << ARM_LPAE_PTE_ATTRINDX_SHIFT); ++ else if (prot & IOMMU_MMIO) ++ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV ++ << ARM_LPAE_PTE_ATTRINDX_SHIFT); + } else { + pte = ARM_LPAE_PTE_HAP_FAULT; + if (prot & IOMMU_READ) +@@ -291,6 +297,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + pte |= ARM_LPAE_PTE_HAP_WRITE; + if (prot & IOMMU_CACHE) + pte |= ARM_LPAE_PTE_MEMATTR_OIWB; ++ else if (prot & IOMMU_MMIO) ++ pte |= ARM_LPAE_PTE_MEMATTR_DEV; + else + pte |= ARM_LPAE_PTE_MEMATTR_NC; + } +@@ -620,6 +628,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) + } + + reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; ++ ++ /* Disable speculative walks through TTBR1 */ ++ reg |= ARM_LPAE_TCR_EPD1; + cfg->arm_lpae_s1_cfg.tcr = reg; + + /* MAIRs */ +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index caf590c..e72e239 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -5,8 +5,15 @@ config IRQCHIP + config ARM_GIC + bool + select IRQ_DOMAIN ++ select IRQ_DOMAIN_HIERARCHY + select MULTI_IRQ_HANDLER + ++config ARM_GIC_V2M ++ bool ++ depends on ARM_GIC ++ depends on PCI && PCI_MSI ++ select PCI_MSI_IRQ_DOMAIN ++ + config GIC_NON_BANKED + bool + +@@ -14,6 +21,7 @@ config ARM_GIC_V3 + bool + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER ++ select IRQ_DOMAIN_HIERARCHY + + config ARM_GIC_V3_ITS + bool +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +index ec3621d..1c4f9a4 100644 +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o + obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o + obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o + obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o ++obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o + obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o + obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o + obj-$(CONFIG_ARM_NVIC) += irq-nvic.o +diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c +index 61541ff..ad96ebb 100644 +--- a/drivers/irqchip/irq-gic-common.c ++++ b/drivers/irqchip/irq-gic-common.c +@@ -21,7 +21,7 @@ + + #include "irq-gic-common.h" + +-void gic_configure_irq(unsigned int irq, unsigned int type, ++int gic_configure_irq(unsigned int irq, unsigned int type, + void __iomem *base, void (*sync_access)(void)) + { + u32 enablemask = 1 << (irq % 32); +@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type, + u32 confmask = 0x2 << ((irq % 16) * 2); + u32 confoff = (irq / 16) * 4; + bool enabled = false; +- u32 val; ++ u32 val, oldval; ++ int ret = 0; + + /* + * Read current configuration register, and insert the config + * for "irq", depending on "type". + */ +- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); +- if (type == IRQ_TYPE_LEVEL_HIGH) ++ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); ++ if (type & IRQ_TYPE_LEVEL_MASK) + val &= ~confmask; +- else if (type == IRQ_TYPE_EDGE_RISING) ++ else if (type & IRQ_TYPE_EDGE_BOTH) + val |= confmask; + + /* +@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type, + + /* + * Write back the new configuration, and possibly re-enable +- * the interrupt. ++ * the interrupt. If we tried to write a new configuration and failed, ++ * return an error. + */ + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); ++ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) ++ ret = -EINVAL; + + if (enabled) + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); + + if (sync_access) + sync_access(); ++ ++ return ret; + } + + void __init gic_dist_config(void __iomem *base, int gic_irqs, +diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h +index b41f024..35a9884 100644 +--- a/drivers/irqchip/irq-gic-common.h ++++ b/drivers/irqchip/irq-gic-common.h +@@ -20,7 +20,7 @@ + #include + #include + +-void gic_configure_irq(unsigned int irq, unsigned int type, ++int gic_configure_irq(unsigned int irq, unsigned int type, + void __iomem *base, void (*sync_access)(void)); + void gic_dist_config(void __iomem *base, int gic_irqs, + void (*sync_access)(void)); +diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c +new file mode 100644 +index 0000000..fdf7065 +--- /dev/null ++++ b/drivers/irqchip/irq-gic-v2m.c +@@ -0,0 +1,333 @@ ++/* ++ * ARM GIC v2m MSI(-X) support ++ * Support for Message Signaled Interrupts for systems that ++ * implement ARM Generic Interrupt Controller: GICv2m. ++ * ++ * Copyright (C) 2014 Advanced Micro Devices, Inc. ++ * Authors: Suravee Suthikulpanit ++ * Harish Kasiviswanathan ++ * Brandon Anderson ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#define pr_fmt(fmt) "GICv2m: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++* MSI_TYPER: ++* [31:26] Reserved ++* [25:16] lowest SPI assigned to MSI ++* [15:10] Reserved ++* [9:0] Numer of SPIs assigned to MSI ++*/ ++#define V2M_MSI_TYPER 0x008 ++#define V2M_MSI_TYPER_BASE_SHIFT 16 ++#define V2M_MSI_TYPER_BASE_MASK 0x3FF ++#define V2M_MSI_TYPER_NUM_MASK 0x3FF ++#define V2M_MSI_SETSPI_NS 0x040 ++#define V2M_MIN_SPI 32 ++#define V2M_MAX_SPI 1019 ++ ++#define V2M_MSI_TYPER_BASE_SPI(x) \ ++ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) ++ ++#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) ++ ++struct v2m_data { ++ spinlock_t msi_cnt_lock; ++ struct msi_controller mchip; ++ struct resource res; /* GICv2m resource */ ++ void __iomem *base; /* GICv2m virt address */ ++ u32 spi_start; /* The SPI number that MSIs start */ ++ u32 nr_spis; /* The number of SPIs for MSIs */ ++ unsigned long *bm; /* MSI vector bitmap */ ++ struct irq_domain *domain; ++}; ++ ++static void gicv2m_mask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void gicv2m_unmask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ ++static struct irq_chip gicv2m_msi_irq_chip = { ++ .name = "MSI", ++ .irq_mask = gicv2m_mask_msi_irq, ++ .irq_unmask = gicv2m_unmask_msi_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_write_msi_msg = pci_msi_domain_write_msg, ++}; ++ ++static struct msi_domain_info gicv2m_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_PCI_MSIX), ++ .chip = &gicv2m_msi_irq_chip, ++}; ++ ++static int gicv2m_set_affinity(struct irq_data *irq_data, ++ const struct cpumask *mask, bool force) ++{ ++ int ret; ++ ++ ret = irq_chip_set_affinity_parent(irq_data, mask, force); ++ if (ret == IRQ_SET_MASK_OK) ++ ret = IRQ_SET_MASK_OK_DONE; ++ ++ return ret; ++} ++ ++static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ++{ ++ struct v2m_data *v2m = irq_data_get_irq_chip_data(data); ++ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; ++ ++ msg->address_hi = (u32) (addr >> 32); ++ msg->address_lo = (u32) (addr); ++ msg->data = data->hwirq; ++} ++ ++static struct irq_chip gicv2m_irq_chip = { ++ .name = "GICv2m", ++ .irq_mask = irq_chip_mask_parent, ++ .irq_unmask = irq_chip_unmask_parent, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_set_affinity = gicv2m_set_affinity, ++ .irq_compose_msi_msg = gicv2m_compose_msi_msg, ++}; ++ ++static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, ++ irq_hw_number_t hwirq) ++{ ++ struct of_phandle_args args; ++ struct irq_data *d; ++ int err; ++ ++ args.np = domain->parent->of_node; ++ args.args_count = 3; ++ args.args[0] = 0; ++ args.args[1] = hwirq - 32; ++ args.args[2] = IRQ_TYPE_EDGE_RISING; ++ ++ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); ++ if (err) ++ return err; ++ ++ /* Configure the interrupt line to be edge */ ++ d = irq_domain_get_irq_data(domain->parent, virq); ++ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); ++ return 0; ++} ++ ++static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) ++{ ++ int pos; ++ ++ pos = hwirq - v2m->spi_start; ++ if (pos < 0 || pos >= v2m->nr_spis) { ++ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); ++ return; ++ } ++ ++ spin_lock(&v2m->msi_cnt_lock); ++ __clear_bit(pos, v2m->bm); ++ spin_unlock(&v2m->msi_cnt_lock); ++} ++ ++static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ struct v2m_data *v2m = domain->host_data; ++ int hwirq, offset, err = 0; ++ ++ spin_lock(&v2m->msi_cnt_lock); ++ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); ++ if (offset < v2m->nr_spis) ++ __set_bit(offset, v2m->bm); ++ else ++ err = -ENOSPC; ++ spin_unlock(&v2m->msi_cnt_lock); ++ ++ if (err) ++ return err; ++ ++ hwirq = v2m->spi_start + offset; ++ ++ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); ++ if (err) { ++ gicv2m_unalloc_msi(v2m, hwirq); ++ return err; ++ } ++ ++ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, ++ &gicv2m_irq_chip, v2m); ++ ++ return 0; ++} ++ ++static void gicv2m_irq_domain_free(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct v2m_data *v2m = irq_data_get_irq_chip_data(d); ++ ++ BUG_ON(nr_irqs != 1); ++ gicv2m_unalloc_msi(v2m, d->hwirq); ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++static const struct irq_domain_ops gicv2m_domain_ops = { ++ .alloc = gicv2m_irq_domain_alloc, ++ .free = gicv2m_irq_domain_free, ++}; ++ ++static bool is_msi_spi_valid(u32 base, u32 num) ++{ ++ if (base < V2M_MIN_SPI) { ++ pr_err("Invalid MSI base SPI (base:%u)\n", base); ++ return false; ++ } ++ ++ if ((num == 0) || (base + num > V2M_MAX_SPI)) { ++ pr_err("Number of SPIs (%u) exceed maximum (%u)\n", ++ num, V2M_MAX_SPI - V2M_MIN_SPI + 1); ++ return false; ++ } ++ ++ return true; ++} ++ ++static int __init gicv2m_init_one(struct device_node *node, ++ struct irq_domain *parent) ++{ ++ int ret; ++ struct v2m_data *v2m; ++ ++ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); ++ if (!v2m) { ++ pr_err("Failed to allocate struct v2m_data.\n"); ++ return -ENOMEM; ++ } ++ ++ ret = of_address_to_resource(node, 0, &v2m->res); ++ if (ret) { ++ pr_err("Failed to allocate v2m resource.\n"); ++ goto err_free_v2m; ++ } ++ ++ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); ++ if (!v2m->base) { ++ pr_err("Failed to map GICv2m resource\n"); ++ ret = -ENOMEM; ++ goto err_free_v2m; ++ } ++ ++ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && ++ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { ++ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", ++ v2m->spi_start, v2m->nr_spis); ++ } else { ++ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); ++ ++ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); ++ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); ++ } ++ ++ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { ++ ret = -EINVAL; ++ goto err_iounmap; ++ } ++ ++ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), ++ GFP_KERNEL); ++ if (!v2m->bm) { ++ ret = -ENOMEM; ++ goto err_iounmap; ++ } ++ ++ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); ++ if (!v2m->domain) { ++ pr_err("Failed to create GICv2m domain\n"); ++ ret = -ENOMEM; ++ goto err_free_bm; ++ } ++ ++ v2m->domain->parent = parent; ++ v2m->mchip.of_node = node; ++ v2m->mchip.domain = pci_msi_create_irq_domain(node, ++ &gicv2m_msi_domain_info, ++ v2m->domain); ++ if (!v2m->mchip.domain) { ++ pr_err("Failed to create MSI domain\n"); ++ ret = -ENOMEM; ++ goto err_free_domains; ++ } ++ ++ spin_lock_init(&v2m->msi_cnt_lock); ++ ++ ret = of_pci_msi_chip_add(&v2m->mchip); ++ if (ret) { ++ pr_err("Failed to add msi_chip.\n"); ++ goto err_free_domains; ++ } ++ ++ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, ++ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, ++ v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); ++ ++ return 0; ++ ++err_free_domains: ++ if (v2m->mchip.domain) ++ irq_domain_remove(v2m->mchip.domain); ++ if (v2m->domain) ++ irq_domain_remove(v2m->domain); ++err_free_bm: ++ kfree(v2m->bm); ++err_iounmap: ++ iounmap(v2m->base); ++err_free_v2m: ++ kfree(v2m); ++ return ret; ++} ++ ++static struct of_device_id gicv2m_device_id[] = { ++ { .compatible = "arm,gic-v2m-frame", }, ++ {}, ++}; ++ ++int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) ++{ ++ int ret = 0; ++ struct device_node *child; ++ ++ for (child = of_find_matching_node(node, gicv2m_device_id); child; ++ child = of_find_matching_node(child, gicv2m_device_id)) { ++ if (!of_find_property(child, "msi-controller", NULL)) ++ continue; ++ ++ ret = gicv2m_init_one(child, parent); ++ if (ret) { ++ of_node_put(node); ++ break; ++ } ++ } ++ ++ return ret; ++} +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index 43c50ed..d689158 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -1293,7 +1293,8 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + + dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); + dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); +- return __its_msi_prepare(domain->parent, dev_alias.dev_id, dev, dev_alias.count, info); ++ return __its_msi_prepare(domain, dev_alias.dev_id, ++ dev, dev_alias.count, info); + } + + static struct msi_domain_ops its_pci_msi_ops = { +@@ -1535,13 +1536,14 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); + + if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { +- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); ++ its->domain = irq_domain_add_tree(node, &its_domain_ops, its); + if (!its->domain) { + err = -ENOMEM; + goto out_free_tables; + } + + its->domain->parent = parent; ++ its->domain->bus_token = DOMAIN_BUS_NEXUS; + + its->msi_chip.domain = pci_msi_create_irq_domain(node, + &its_pci_msi_domain_info, +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index 34feda3..fd8850d 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -238,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + if (irq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + if (gic_irq_in_rdist(d)) { +@@ -249,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + rwp_wait = gic_dist_wait_for_rwp; + } + +- gic_configure_irq(irq, type, base, rwp_wait); +- +- return 0; ++ return gic_configure_irq(irq, type, base, rwp_wait); + } + + static u64 gic_mpidr_to_affinity(u64 mpidr) +@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + tlist |= 1 << (mpidr & 0xf); + + cpu = cpumask_next(cpu, mask); +- if (cpu == nr_cpu_ids) ++ if (cpu >= nr_cpu_ids) + goto out; + + mpidr = cpu_logical_map(cpu); +@@ -481,15 +481,19 @@ out: + return tlist; + } + ++#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ ++ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ ++ << ICC_SGI1R_AFFINITY_## level ##_SHIFT) ++ + static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) + { + u64 val; + +- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | +- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | +- irq << 24 | +- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | +- tlist); ++ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | ++ irq << ICC_SGI1R_SGI_ID_SHIFT | ++ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | ++ tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +@@ -617,14 +621,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + /* PPIs */ + if (hw < 32) { + irq_set_percpu_devid(irq); +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_percpu_devid_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_percpu_devid_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } + /* SPIs */ + if (hw >= 32 && hw < gic_data.irq_nr) { +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_fasteoi_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + /* LPIs */ +@@ -667,9 +671,41 @@ static int gic_irq_domain_xlate(struct irq_domain *d, + return 0; + } + ++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ int i, ret; ++ irq_hw_number_t hwirq; ++ unsigned int type = IRQ_TYPE_NONE; ++ struct of_phandle_args *irq_data = arg; ++ ++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, ++ irq_data->args_count, &hwirq, &type); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) ++ gic_irq_domain_map(domain, virq + i, hwirq + i); ++ ++ return 0; ++} ++ ++static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); ++ irq_set_handler(virq + i, NULL); ++ irq_domain_reset_irq_data(d); ++ } ++} ++ + static const struct irq_domain_ops gic_irq_domain_ops = { +- .map = gic_irq_domain_map, + .xlate = gic_irq_domain_xlate, ++ .alloc = gic_irq_domain_alloc, ++ .free = gic_irq_domain_free, + }; + + static int __init gic_of_init(struct device_node *node, struct device_node *parent) +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +index 38493ff..ab0b1cb 100644 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + { + void __iomem *base = gic_dist_base(d); + unsigned int gicirq = gic_irq(d); ++ int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (gicirq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + raw_spin_lock(&irq_controller_lock); +@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type) + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + +- gic_configure_irq(gicirq, type, base, NULL); ++ ret = gic_configure_irq(gicirq, type, base, NULL); + + raw_spin_unlock(&irq_controller_lock); + +- return 0; ++ return ret; + } + + static int gic_retrigger(struct irq_data *d) +@@ -788,17 +791,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + { + if (hw < 32) { + irq_set_percpu_devid(irq); +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_percpu_devid_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_percpu_devid_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } else { +- irq_set_chip_and_handler(irq, &gic_chip, +- handle_fasteoi_irq); ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + gic_routable_irq_domain_ops->map(d, irq, hw); + } +- irq_set_chip_data(irq, d->host_data); + return 0; + } + +@@ -858,6 +860,31 @@ static struct notifier_block gic_cpu_notifier = { + }; + #endif + ++static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ int i, ret; ++ irq_hw_number_t hwirq; ++ unsigned int type = IRQ_TYPE_NONE; ++ struct of_phandle_args *irq_data = arg; ++ ++ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, ++ irq_data->args_count, &hwirq, &type); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) ++ gic_irq_domain_map(domain, virq + i, hwirq + i); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { ++ .xlate = gic_irq_domain_xlate, ++ .alloc = gic_irq_domain_alloc, ++ .free = irq_domain_free_irqs_top, ++}; ++ + static const struct irq_domain_ops gic_irq_domain_ops = { + .map = gic_irq_domain_map, + .unmap = gic_irq_domain_unmap, +@@ -948,18 +975,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + gic_cpu_map[i] = 0xff; + + /* +- * For primary GICs, skip over SGIs. +- * For secondary GICs, skip over PPIs, too. +- */ +- if (gic_nr == 0 && (irq_start & 31) > 0) { +- hwirq_base = 16; +- if (irq_start != -1) +- irq_start = (irq_start & ~31) + 16; +- } else { +- hwirq_base = 32; +- } +- +- /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources. + */ +@@ -969,10 +984,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + gic_irqs = 1020; + gic->gic_irqs = gic_irqs; + +- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ ++ if (node) { /* DT case */ ++ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; ++ ++ if (!of_property_read_u32(node, "arm,routable-irqs", ++ &nr_routable_irqs)) { ++ ops = &gic_irq_domain_ops; ++ gic_irqs = nr_routable_irqs; ++ } ++ ++ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); ++ } else { /* Non-DT case */ ++ /* ++ * For primary GICs, skip over SGIs. ++ * For secondary GICs, skip over PPIs, too. ++ */ ++ if (gic_nr == 0 && (irq_start & 31) > 0) { ++ hwirq_base = 16; ++ if (irq_start != -1) ++ irq_start = (irq_start & ~31) + 16; ++ } else { ++ hwirq_base = 32; ++ } ++ ++ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + +- if (of_property_read_u32(node, "arm,routable-irqs", +- &nr_routable_irqs)) { + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { +@@ -983,10 +1019,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, + + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); +- } else { +- gic->domain = irq_domain_add_linear(node, nr_routable_irqs, +- &gic_irq_domain_ops, +- gic); + } + + if (WARN_ON(!gic->domain)) +@@ -1037,6 +1069,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) + irq = irq_of_parse_and_map(node, 0); + gic_cascade_irq(gic_cnt, irq); + } ++ ++ if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) ++ gicv2m_of_init(node, gic_data[gic_cnt].domain); ++ + gic_cnt++; + return 0; + } +diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c +index 9c8f833..5507a0c 100644 +--- a/drivers/irqchip/irq-hip04.c ++++ b/drivers/irqchip/irq-hip04.c +@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) + { + void __iomem *base = hip04_dist_base(d); + unsigned int irq = hip04_irq(d); ++ int ret; + + /* Interrupt configuration for SGIs can't be changed */ + if (irq < 16) + return -EINVAL; + +- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) ++ /* SPIs have restrictions on the supported types */ ++ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && ++ type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + raw_spin_lock(&irq_controller_lock); + +- gic_configure_irq(irq, type, base, NULL); ++ ret = gic_configure_irq(irq, type, base, NULL); + + raw_spin_unlock(&irq_controller_lock); + +- return 0; ++ return ret; + } + + #ifdef CONFIG_SMP +diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig +index 6d91c27..d6af99f 100644 +--- a/drivers/memory/Kconfig ++++ b/drivers/memory/Kconfig +@@ -83,6 +83,6 @@ config FSL_CORENET_CF + + config FSL_IFC + bool +- depends on FSL_SOC ++ depends on FSL_SOC || ARCH_LAYERSCAPE + + endif +diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c +index 3d5d792..1b182b1 100644 +--- a/drivers/memory/fsl_ifc.c ++++ b/drivers/memory/fsl_ifc.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -30,7 +31,9 @@ + #include + #include + #include +-#include ++#include ++#include ++#include + + struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + EXPORT_SYMBOL(fsl_ifc_ctrl_dev); +@@ -58,11 +61,11 @@ int fsl_ifc_find(phys_addr_t addr_base) + { + int i = 0; + +- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) ++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) + return -ENODEV; + +- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { +- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); ++ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { ++ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); + if (cspr & CSPR_V && (cspr & CSPR_BA) == + convert_ifc_address(addr_base)) + return i; +@@ -74,21 +77,21 @@ EXPORT_SYMBOL(fsl_ifc_find); + + static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) + { +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; + + /* + * Clear all the common status and event registers + */ +- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) +- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); ++ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) ++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); + + /* enable all error and events */ +- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); ++ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en); + + /* enable all error and event interrupts */ +- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); +- out_be32(&ifc->cm_erattr0, 0x0); +- out_be32(&ifc->cm_erattr1, 0x0); ++ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en); ++ ifc_out32(0x0, &ifc->cm_erattr0); ++ ifc_out32(0x0, &ifc->cm_erattr1); + + return 0; + } +@@ -103,7 +106,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) + irq_dispose_mapping(ctrl->nand_irq); + irq_dispose_mapping(ctrl->irq); + +- iounmap(ctrl->regs); ++ iounmap(ctrl->gregs); + + dev_set_drvdata(&dev->dev, NULL); + kfree(ctrl); +@@ -121,15 +124,15 @@ static DEFINE_SPINLOCK(nand_irq_lock); + + static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) + { +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + unsigned long flags; + u32 stat; + + spin_lock_irqsave(&nand_irq_lock, flags); + +- stat = in_be32(&ifc->ifc_nand.nand_evter_stat); ++ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat); + if (stat) { +- out_be32(&ifc->ifc_nand.nand_evter_stat, stat); ++ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat); + ctrl->nand_stat = stat; + wake_up(&ctrl->nand_wait); + } +@@ -156,21 +159,21 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) + static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) + { + struct fsl_ifc_ctrl *ctrl = data; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; + u32 err_axiid, err_srcid, status, cs_err, err_addr; + irqreturn_t ret = IRQ_NONE; + + /* read for chip select error */ +- cs_err = in_be32(&ifc->cm_evter_stat); ++ cs_err = ifc_in32(&ifc->cm_evter_stat); + if (cs_err) { + dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" + "any memory bank 0x%08X\n", cs_err); + /* clear the chip select error */ +- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); ++ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); + + /* read error attribute registers print the error information */ +- status = in_be32(&ifc->cm_erattr0); +- err_addr = in_be32(&ifc->cm_erattr1); ++ status = ifc_in32(&ifc->cm_erattr0); ++ err_addr = ifc_in32(&ifc->cm_erattr1); + + if (status & IFC_CM_ERATTR0_ERTYP_READ) + dev_err(ctrl->dev, "Read transaction error" +@@ -213,7 +216,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) + static int fsl_ifc_ctrl_probe(struct platform_device *dev) + { + int ret = 0; +- ++ int version, banks; ++ void __iomem *addr; + + dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); + +@@ -224,16 +228,41 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) + dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); + + /* IOMAP the entire IFC region */ +- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); +- if (!fsl_ifc_ctrl_dev->regs) { ++ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); ++ if (!fsl_ifc_ctrl_dev->gregs) { + dev_err(&dev->dev, "failed to get memory region\n"); + ret = -ENODEV; + goto err; + } + ++ if (of_property_read_bool(dev->dev.of_node, "little-endian")) { ++ fsl_ifc_ctrl_dev->little_endian = true; ++ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); ++ } else { ++ fsl_ifc_ctrl_dev->little_endian = false; ++ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); ++ } ++ ++ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & ++ FSL_IFC_VERSION_MASK; ++ ++ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; ++ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", ++ version >> 24, (version >> 16) & 0xf, banks); ++ ++ fsl_ifc_ctrl_dev->version = version; ++ fsl_ifc_ctrl_dev->banks = banks; ++ ++ addr = fsl_ifc_ctrl_dev->gregs; ++ if (version >= FSL_IFC_VERSION_2_0_0) ++ addr += PGOFFSET_64K; ++ else ++ addr += PGOFFSET_4K; ++ fsl_ifc_ctrl_dev->rregs = addr; ++ + /* get the Controller level irq */ + fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); +- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { ++ if (fsl_ifc_ctrl_dev->irq == 0) { + dev_err(&dev->dev, "failed to get irq resource " + "for IFC\n"); + ret = -ENODEV; +diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c +index 9e21e4f..8f43ab8 100644 +--- a/drivers/mfd/vexpress-sysreg.c ++++ b/drivers/mfd/vexpress-sysreg.c +@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) + vexpress_config_set_master(vexpress_sysreg_get_master()); + + /* Confirm board type against DT property, if available */ +- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { ++ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) { + u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); + u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; + +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 10ecc0a..d356dbc 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -2402,6 +2402,10 @@ static const struct mmc_fixup blk_fixups[] = + * + * N.B. This doesn't affect SD cards. + */ ++ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, ++ MMC_QUIRK_BLK_NO_CMD23), ++ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, ++ MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig +index 1386065..b8c9b73 100644 +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -66,7 +66,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + has the effect of scrambling the addresses and formats of data + accessed in sizes other than the datum size. + +- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. ++ This is the case for the Nintendo Wii SDHCI. + + config MMC_SDHCI_PCI + tristate "SDHCI support on PCI bus" +@@ -130,8 +130,10 @@ config MMC_SDHCI_OF_ARASAN + config MMC_SDHCI_OF_ESDHC + tristate "SDHCI OF support for the Freescale eSDHC controller" + depends on MMC_SDHCI_PLTFM +- depends on PPC_OF +- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER ++ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE ++ select MMC_SDHCI_IO_ACCESSORS ++ select FSL_SOC_DRIVERS ++ select FSL_GUTS + help + This selects the Freescale eSDHC controller support. + +@@ -142,7 +144,7 @@ config MMC_SDHCI_OF_ESDHC + config MMC_SDHCI_OF_HLWD + tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" + depends on MMC_SDHCI_PLTFM +- depends on PPC_OF ++ depends on PPC + select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + help + This selects the Secure Digital Host Controller Interface (SDHCI) +diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h +index a870c42..f2baede 100644 +--- a/drivers/mmc/host/sdhci-esdhc.h ++++ b/drivers/mmc/host/sdhci-esdhc.h +@@ -21,16 +21,23 @@ + #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ + SDHCI_QUIRK_NO_BUSY_IRQ | \ + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ +- SDHCI_QUIRK_PIO_NEEDS_DELAY) ++ SDHCI_QUIRK_PIO_NEEDS_DELAY | \ ++ SDHCI_QUIRK_NO_HISPD_BIT) ++ ++#define ESDHC_PROCTL 0x28 + + #define ESDHC_SYSTEM_CONTROL 0x2c + #define ESDHC_CLOCK_MASK 0x0000fff0 + #define ESDHC_PREDIV_SHIFT 8 + #define ESDHC_DIVIDER_SHIFT 4 ++#define ESDHC_CLOCK_CRDEN 0x00000008 + #define ESDHC_CLOCK_PEREN 0x00000004 + #define ESDHC_CLOCK_HCKEN 0x00000002 + #define ESDHC_CLOCK_IPGEN 0x00000001 + ++#define ESDHC_PRESENT_STATE 0x24 ++#define ESDHC_CLOCK_STABLE 0x00000008 ++ + /* pltfm-specific */ + #define ESDHC_HOST_CONTROL_LE 0x20 + +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c +index 8872c85..4a4a693 100644 +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -18,128 +18,334 @@ + #include + #include + #include ++#include ++#include + #include + #include "sdhci-pltfm.h" + #include "sdhci-esdhc.h" + + #define VENDOR_V_22 0x12 + #define VENDOR_V_23 0x13 +-static u32 esdhc_readl(struct sdhci_host *host, int reg) ++ ++struct sdhci_esdhc { ++ u8 vendor_ver; ++ u8 spec_ver; ++ u32 soc_ver; ++ u8 soc_rev; ++}; ++ ++/** ++ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register ++ * to make it compatible with SD spec. ++ * ++ * @host: pointer to sdhci_host ++ * @spec_reg: SD spec register address ++ * @value: 32bit eSDHC register value on spec_reg address ++ * ++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC ++ * registers are 32 bits. There are differences in register size, register ++ * address, register function, bit position and function between eSDHC spec ++ * and SD spec. ++ * ++ * Return a fixed up register value ++ */ ++static u32 esdhc_readl_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; + u32 ret; + +- ret = in_be32(host->ioaddr + reg); + /* + * The bit of ADMA flag in eSDHC is not compatible with standard + * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is + * supported by eSDHC. + * And for many FSL eSDHC controller, the reset value of field +- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, ++ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, + * only these vendor version is greater than 2.2/0x12 support ADMA. +- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the +- * the verdor version number, oxFE is SDHCI_HOST_VERSION. + */ +- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { +- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- if (tmp > VENDOR_V_22) +- ret |= SDHCI_CAN_DO_ADMA2; ++ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { ++ if (esdhc->vendor_ver > VENDOR_V_22) { ++ ret = value | SDHCI_CAN_DO_ADMA2; ++ return ret; ++ } + } +- ++ ret = value; + return ret; + } + +-static u16 esdhc_readw(struct sdhci_host *host, int reg) ++static u16 esdhc_readw_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { + u16 ret; +- int base = reg & ~0x3; +- int shift = (reg & 0x2) * 8; ++ int shift = (spec_reg & 0x2) * 8; + +- if (unlikely(reg == SDHCI_HOST_VERSION)) +- ret = in_be32(host->ioaddr + base) & 0xffff; ++ if (spec_reg == SDHCI_HOST_VERSION) ++ ret = value & 0xffff; + else +- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; ++ ret = (value >> shift) & 0xffff; + return ret; + } + +-static u8 esdhc_readb(struct sdhci_host *host, int reg) ++static u8 esdhc_readb_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value) + { +- int base = reg & ~0x3; +- int shift = (reg & 0x3) * 8; +- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; ++ u8 ret; ++ u8 dma_bits; ++ int shift = (spec_reg & 0x3) * 8; ++ ++ ret = (value >> shift) & 0xff; + + /* + * "DMA select" locates at offset 0x28 in SD specification, but on + * P5020 or P3041, it locates at 0x29. + */ +- if (reg == SDHCI_HOST_CONTROL) { +- u32 dma_bits; +- +- dma_bits = in_be32(host->ioaddr + reg); ++ if (spec_reg == SDHCI_HOST_CONTROL) { + /* DMA select is 22,23 bits in Protocol Control Register */ +- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; +- ++ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; + /* fixup the result */ + ret &= ~SDHCI_CTRL_DMA_MASK; + ret |= dma_bits; + } +- + return ret; + } + +-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) ++/** ++ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be ++ * written into eSDHC register. ++ * ++ * @host: pointer to sdhci_host ++ * @spec_reg: SD spec register address ++ * @value: 8/16/32bit SD spec register value that would be written ++ * @old_value: 32bit eSDHC register value on spec_reg address ++ * ++ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC ++ * registers are 32 bits. There are differences in register size, register ++ * address, register function, bit position and function between eSDHC spec ++ * and SD spec. ++ * ++ * Return a fixed up register value ++ */ ++static u32 esdhc_writel_fixup(struct sdhci_host *host, ++ int spec_reg, u32 value, u32 old_value) + { ++ u32 ret; ++ + /* +- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] +- * when SYSCTL[RSTD]) is set for some special operations. +- * No any impact other operation. ++ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] ++ * when SYSCTL[RSTD] is set for some special operations. ++ * No any impact on other operation. + */ +- if (reg == SDHCI_INT_ENABLE) +- val |= SDHCI_INT_BLK_GAP; +- sdhci_be32bs_writel(host, val, reg); ++ if (spec_reg == SDHCI_INT_ENABLE) ++ ret = value | SDHCI_INT_BLK_GAP; ++ else ++ ret = value; ++ ++ return ret; + } + +-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) ++static u32 esdhc_writew_fixup(struct sdhci_host *host, ++ int spec_reg, u16 value, u32 old_value) + { +- if (reg == SDHCI_BLOCK_SIZE) { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ int shift = (spec_reg & 0x2) * 8; ++ u32 ret; ++ ++ switch (spec_reg) { ++ case SDHCI_TRANSFER_MODE: ++ /* ++ * Postpone this write, we must do it together with a ++ * command write that is down below. Return old value. ++ */ ++ pltfm_host->xfer_mode_shadow = value; ++ return old_value; ++ case SDHCI_COMMAND: ++ ret = (value << 16) | pltfm_host->xfer_mode_shadow; ++ return ret; ++ } ++ ++ ret = old_value & (~(0xffff << shift)); ++ ret |= (value << shift); ++ ++ if (spec_reg == SDHCI_BLOCK_SIZE) { + /* + * Two last DMA bits are reserved, and first one is used for + * non-standard blksz of 4096 bytes that we don't support + * yet. So clear the DMA boundary bits. + */ +- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); ++ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); + } +- sdhci_be32bs_writew(host, val, reg); ++ return ret; + } + +-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) ++static u32 esdhc_writeb_fixup(struct sdhci_host *host, ++ int spec_reg, u8 value, u32 old_value) + { ++ u32 ret; ++ u32 dma_bits; ++ u8 tmp; ++ int shift = (spec_reg & 0x3) * 8; ++ ++ /* ++ * eSDHC doesn't have a standard power control register, so we do ++ * nothing here to avoid incorrect operation. ++ */ ++ if (spec_reg == SDHCI_POWER_CONTROL) ++ return old_value; + /* + * "DMA select" location is offset 0x28 in SD specification, but on + * P5020 or P3041, it's located at 0x29. + */ +- if (reg == SDHCI_HOST_CONTROL) { +- u32 dma_bits; +- ++ if (spec_reg == SDHCI_HOST_CONTROL) { + /* + * If host control register is not standard, exit + * this function + */ + if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) +- return; ++ return old_value; + + /* DMA select is 22,23 bits in Protocol Control Register */ +- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; +- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, +- dma_bits); +- val &= ~SDHCI_CTRL_DMA_MASK; +- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; ++ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; ++ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; ++ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | ++ (old_value & SDHCI_CTRL_DMA_MASK); ++ ret = (ret & (~0xff)) | tmp; ++ ++ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ ++ ret &= ~ESDHC_HOST_CONTROL_RES; ++ return ret; + } + +- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ +- if (reg == SDHCI_HOST_CONTROL) +- val &= ~ESDHC_HOST_CONTROL_RES; +- sdhci_be32bs_writeb(host, val, reg); ++ ret = (old_value & (~(0xff << shift))) | (value << shift); ++ return ret; ++} ++ ++static u32 esdhc_be_readl(struct sdhci_host *host, int reg) ++{ ++ u32 ret; ++ u32 value; ++ ++ value = ioread32be(host->ioaddr + reg); ++ ret = esdhc_readl_fixup(host, reg, value); ++ ++ return ret; ++} ++ ++static u32 esdhc_le_readl(struct sdhci_host *host, int reg) ++{ ++ u32 ret; ++ u32 value; ++ ++ value = ioread32(host->ioaddr + reg); ++ ret = esdhc_readl_fixup(host, reg, value); ++ ++ return ret; ++} ++ ++static u16 esdhc_be_readw(struct sdhci_host *host, int reg) ++{ ++ u16 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_readw_fixup(host, reg, value); ++ return ret; ++} ++ ++static u16 esdhc_le_readw(struct sdhci_host *host, int reg) ++{ ++ u16 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_readw_fixup(host, reg, value); ++ return ret; ++} ++ ++static u8 esdhc_be_readb(struct sdhci_host *host, int reg) ++{ ++ u8 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_readb_fixup(host, reg, value); ++ return ret; ++} ++ ++static u8 esdhc_le_readb(struct sdhci_host *host, int reg) ++{ ++ u8 ret; ++ u32 value; ++ int base = reg & ~0x3; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_readb_fixup(host, reg, value); ++ return ret; ++} ++ ++static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ u32 value; ++ ++ value = esdhc_writel_fixup(host, reg, val, 0); ++ iowrite32be(value, host->ioaddr + reg); ++} ++ ++static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ u32 value; ++ ++ value = esdhc_writel_fixup(host, reg, val, 0); ++ iowrite32(value, host->ioaddr + reg); ++} ++ ++static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_writew_fixup(host, reg, val, value); ++ if (reg != SDHCI_TRANSFER_MODE) ++ iowrite32be(ret, host->ioaddr + base); ++} ++ ++static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_writew_fixup(host, reg, val, value); ++ if (reg != SDHCI_TRANSFER_MODE) ++ iowrite32(ret, host->ioaddr + base); ++} ++ ++static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32be(host->ioaddr + base); ++ ret = esdhc_writeb_fixup(host, reg, val, value); ++ iowrite32be(ret, host->ioaddr + base); ++} ++ ++static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ int base = reg & ~0x3; ++ u32 value; ++ u32 ret; ++ ++ value = ioread32(host->ioaddr + base); ++ ret = esdhc_writeb_fixup(host, reg, val, value); ++ iowrite32(ret, host->ioaddr + base); + } + + /* +@@ -149,37 +355,116 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) + * For Continue, apply soft reset for data(SYSCTL[RSTD]); + * and re-issue the entire read transaction from beginning. + */ +-static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) ++static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) + { +- u32 tmp; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; + bool applicable; + dma_addr_t dmastart; + dma_addr_t dmanow; + +- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- + applicable = (intmask & SDHCI_INT_DATA_END) && +- (intmask & SDHCI_INT_BLK_GAP) && +- (tmp == VENDOR_V_23); +- if (!applicable) ++ (intmask & SDHCI_INT_BLK_GAP) && ++ (esdhc->vendor_ver == VENDOR_V_23); ++ if (applicable) { ++ ++ sdhci_reset(host, SDHCI_RESET_DATA); ++ host->data->error = 0; ++ dmastart = sg_dma_address(host->data->sg); ++ dmanow = dmastart + host->data->bytes_xfered; ++ /* ++ * Force update to the next DMA block boundary. ++ */ ++ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + ++ SDHCI_DEFAULT_BOUNDARY_SIZE; ++ host->data->bytes_xfered = dmanow - dmastart; ++ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); ++ + return; ++ } + +- host->data->error = 0; +- dmastart = sg_dma_address(host->data->sg); +- dmanow = dmastart + host->data->bytes_xfered; + /* +- * Force update to the next DMA block boundary. ++ * Check for A-004388: eSDHC DMA might not stop if error ++ * occurs on system transaction ++ * Impact list: ++ * T4240-4160-R1.0 B4860-4420-R1.0-R2.0 P1010-1014-R1.0 ++ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0 ++ * P5020-5010-R2.0-R1.0 P5040-5021-R2.0-R2.1 + */ +- dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + +- SDHCI_DEFAULT_BOUNDARY_SIZE; +- host->data->bytes_xfered = dmanow - dmastart; +- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); ++ if (!(((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)))) ++ return; ++ ++ sdhci_reset(host, SDHCI_RESET_DATA); ++ ++ if (host->flags & SDHCI_USE_ADMA) { ++ u32 mod, i, offset; ++ u8 *desc; ++ dma_addr_t addr; ++ struct scatterlist *sg; ++ __le32 *dataddr; ++ __le32 *cmdlen; ++ ++ /* ++ * If block count was enabled, in case read transfer there ++ * is no data was corrupted ++ */ ++ mod = sdhci_readl(host, SDHCI_TRANSFER_MODE); ++ if ((mod & SDHCI_TRNS_BLK_CNT_EN) && ++ (host->data->flags & MMC_DATA_READ)) ++ host->data->error = 0; ++ ++ BUG_ON(!host->data); ++ desc = host->adma_table; ++ for_each_sg(host->data->sg, sg, host->sg_count, i) { ++ addr = sg_dma_address(sg); ++ offset = (4 - (addr & 0x3)) & 0x3; ++ if (offset) ++ desc += 8; ++ desc += 8; ++ } ++ ++ /* ++ * Add an extra zero descriptor next to the ++ * terminating descriptor. ++ */ ++ desc += 8; ++ WARN_ON((desc - (u8 *)(host->adma_table)) > (128 * 2 + 1) * 4); ++ ++ dataddr = (__le32 __force *)(desc + 4); ++ cmdlen = (__le32 __force *)desc; ++ ++ cmdlen[0] = cpu_to_le32(0); ++ dataddr[0] = cpu_to_le32(0); ++ } ++ ++ if ((host->flags & SDHCI_USE_SDMA) && ++ (host->data->flags & MMC_DATA_READ)) ++ host->data->error = 0; ++ ++ return; + } + + static int esdhc_of_enable_dma(struct sdhci_host *host) + { +- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); ++ u32 value; ++ ++ value = sdhci_readl(host, ESDHC_DMA_SYSCTL); ++ value |= ESDHC_DMA_SNOOP; ++ sdhci_writel(host, value, ESDHC_DMA_SYSCTL); + return 0; + } + +@@ -199,15 +484,22 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) + + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + { +- int pre_div = 2; ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; ++ int pre_div = 1; + int div = 1; + u32 temp; ++ u32 timeout; + + host->mmc->actual_clock = 0; + + if (clock == 0) + return; + ++ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ ++ if (esdhc->vendor_ver < VENDOR_V_23) ++ pre_div = 2; ++ + /* Workaround to reduce the clock frequency for p1010 esdhc */ + if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { + if (clock > 20000000) +@@ -218,7 +510,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +- | ESDHC_CLOCK_MASK); ++ | ESDHC_CLOCK_CRDEN | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) +@@ -229,7 +521,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); +- ++ host->mmc->actual_clock = host->max_clk / pre_div / div; + pre_div >>= 1; + div--; + +@@ -238,70 +530,117 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); +- mdelay(1); +-} + +-static void esdhc_of_platform_init(struct sdhci_host *host) +-{ +- u32 vvn; +- +- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +- if (vvn == VENDOR_V_22) +- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; ++ /* Wait max 20 ms */ ++ timeout = 20; ++ while (!(sdhci_readl(host, ESDHC_PRESENT_STATE) & ESDHC_CLOCK_STABLE)) { ++ if (timeout == 0) { ++ pr_err("%s: Internal clock never stabilised.\n", ++ mmc_hostname(host->mmc)); ++ return; ++ } ++ timeout--; ++ mdelay(1); ++ } + +- if (vvn > VENDOR_V_22) +- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; ++ temp |= ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + } + + static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) + { + u32 ctrl; + ++ ctrl = sdhci_readl(host, ESDHC_PROCTL); ++ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); + switch (width) { + case MMC_BUS_WIDTH_8: +- ctrl = ESDHC_CTRL_8BITBUS; ++ ctrl |= ESDHC_CTRL_8BITBUS; + break; + + case MMC_BUS_WIDTH_4: +- ctrl = ESDHC_CTRL_4BITBUS; ++ ctrl |= ESDHC_CTRL_4BITBUS; + break; + + default: +- ctrl = 0; + break; + } + +- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, +- ESDHC_CTRL_BUSWIDTH_MASK, ctrl); ++ sdhci_writel(host, ctrl, ESDHC_PROCTL); + } + +-static const struct sdhci_ops sdhci_esdhc_ops = { +- .read_l = esdhc_readl, +- .read_w = esdhc_readw, +- .read_b = esdhc_readb, +- .write_l = esdhc_writel, +- .write_w = esdhc_writew, +- .write_b = esdhc_writeb, +- .set_clock = esdhc_of_set_clock, +- .enable_dma = esdhc_of_enable_dma, +- .get_max_clock = esdhc_of_get_max_clock, +- .get_min_clock = esdhc_of_get_min_clock, +- .platform_init = esdhc_of_platform_init, +- .adma_workaround = esdhci_of_adma_workaround, +- .set_bus_width = esdhc_pltfm_set_bus_width, +- .reset = sdhci_reset, +- .set_uhs_signaling = sdhci_set_uhs_signaling, +-}; ++/* ++ * A-003980: SDHC: Glitch is generated on the card clock with software reset ++ * or clock divider change ++ * Workaround: ++ * A simple workaround is to disable the SD card clock before the software ++ * reset, and enable it when the module resumes normal operation. The Host ++ * and the SD card are in a master-slave relationship. The Host provides ++ * clock and control transfer across the interface. Therefore, any existing ++ * operation is discarded when the Host controller is reset. ++ */ ++static int esdhc_of_reset_workaround(struct sdhci_host *host, u8 mask) ++{ ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = pltfm_host->priv; ++ bool disable_clk_before_reset = false; ++ u32 temp; + +-#ifdef CONFIG_PM ++ /* ++ * Check for A-003980 ++ * Impact list: ++ * T4240-4160-R1.0-R2.0 B4860-4420-R1.0-R2.0 P5040-5021-R1.0-R2.0-R2.1 ++ * P5020-5010-R1.0-R2.0 P3041-R1.0-R1.1-R2.0 P2041-2040-R1.0-R1.1-R2.0 ++ * P1010-1014-R1.0 ++ */ ++ if (((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || ++ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)) || ++ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || ++ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || ++ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10))) ++ disable_clk_before_reset = true; ++ ++ if (disable_clk_before_reset && (mask & SDHCI_RESET_ALL)) { ++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); ++ temp &= ~ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); ++ sdhci_reset(host, mask); ++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); ++ temp |= ESDHC_CLOCK_CRDEN; ++ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); ++ return 1; ++ } ++ return 0; ++} ++ ++static void esdhc_reset(struct sdhci_host *host, u8 mask) ++{ ++ if (!esdhc_of_reset_workaround(host, mask)) ++ sdhci_reset(host, mask); + ++ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); ++ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++} ++ ++#ifdef CONFIG_PM + static u32 esdhc_proctl; + static int esdhc_of_suspend(struct device *dev) + { + struct sdhci_host *host = dev_get_drvdata(dev); + +- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); ++ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); + + return sdhci_suspend_host(host); + } +@@ -311,11 +650,8 @@ static int esdhc_of_resume(struct device *dev) + struct sdhci_host *host = dev_get_drvdata(dev); + int ret = sdhci_resume_host(host); + +- if (ret == 0) { +- /* Isn't this already done by sdhci_resume_host() ? --rmk */ +- esdhc_of_enable_dma(host); +- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); +- } ++ if (ret == 0) ++ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); + + return ret; + } +@@ -329,30 +665,120 @@ static const struct dev_pm_ops esdhc_pmops = { + #define ESDHC_PMOPS NULL + #endif + +-static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { +- /* +- * card detection could be handled via GPIO +- * eSDHC cannot support End Attribute in NOP ADMA descriptor +- */ ++static const struct sdhci_ops sdhci_esdhc_be_ops = { ++ .read_l = esdhc_be_readl, ++ .read_w = esdhc_be_readw, ++ .read_b = esdhc_be_readb, ++ .write_l = esdhc_be_writel, ++ .write_w = esdhc_be_writew, ++ .write_b = esdhc_be_writeb, ++ .set_clock = esdhc_of_set_clock, ++ .enable_dma = esdhc_of_enable_dma, ++ .get_max_clock = esdhc_of_get_max_clock, ++ .get_min_clock = esdhc_of_get_min_clock, ++ .adma_workaround = esdhc_of_adma_workaround, ++ .set_bus_width = esdhc_pltfm_set_bus_width, ++ .reset = esdhc_reset, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++}; ++ ++static const struct sdhci_ops sdhci_esdhc_le_ops = { ++ .read_l = esdhc_le_readl, ++ .read_w = esdhc_le_readw, ++ .read_b = esdhc_le_readb, ++ .write_l = esdhc_le_writel, ++ .write_w = esdhc_le_writew, ++ .write_b = esdhc_le_writeb, ++ .set_clock = esdhc_of_set_clock, ++ .enable_dma = esdhc_of_enable_dma, ++ .get_max_clock = esdhc_of_get_max_clock, ++ .get_min_clock = esdhc_of_get_min_clock, ++ .adma_workaround = esdhc_of_adma_workaround, ++ .set_bus_width = esdhc_pltfm_set_bus_width, ++ .reset = esdhc_reset, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++}; ++ ++static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION + | SDHCI_QUIRK_NO_CARD_NO_RESET + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +- .ops = &sdhci_esdhc_ops, ++ .ops = &sdhci_esdhc_be_ops, + }; + ++static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { ++ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION ++ | SDHCI_QUIRK_NO_CARD_NO_RESET ++ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, ++ .ops = &sdhci_esdhc_le_ops, ++}; ++ ++static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) ++{ ++ struct sdhci_pltfm_host *pltfm_host; ++ struct sdhci_esdhc *esdhc; ++ u16 host_ver; ++ u32 svr; ++ ++ pltfm_host = sdhci_priv(host); ++ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc), ++ GFP_KERNEL); ++ pltfm_host->priv = esdhc; ++ ++ svr = guts_get_svr(); ++ esdhc->soc_ver = SVR_SOC_VER(svr); ++ esdhc->soc_rev = SVR_REV(svr); ++ ++ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); ++ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> ++ SDHCI_VENDOR_VER_SHIFT; ++ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; ++} ++ + static int sdhci_esdhc_probe(struct platform_device *pdev) + { + struct sdhci_host *host; + struct device_node *np; ++ struct sdhci_pltfm_host *pltfm_host; ++ struct sdhci_esdhc *esdhc; + int ret; + +- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); ++ np = pdev->dev.of_node; ++ ++ if (of_get_property(np, "little-endian", NULL)) ++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0); ++ else ++ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0); ++ + if (IS_ERR(host)) + return PTR_ERR(host); + ++ esdhc_init(pdev, host); ++ + sdhci_get_of_property(pdev); + +- np = pdev->dev.of_node; ++ pltfm_host = sdhci_priv(host); ++ esdhc = pltfm_host->priv; ++ if (esdhc->vendor_ver == VENDOR_V_22) ++ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; ++ ++ if (esdhc->vendor_ver > VENDOR_V_22) ++ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; ++ ++ if (of_device_is_compatible(np, "fsl,p5040-esdhc") || ++ of_device_is_compatible(np, "fsl,p5020-esdhc") || ++ of_device_is_compatible(np, "fsl,p4080-esdhc") || ++ of_device_is_compatible(np, "fsl,p1020-esdhc") || ++ of_device_is_compatible(np, "fsl,t1040-esdhc") || ++ of_device_is_compatible(np, "fsl,ls1021a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls2080a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls2085a-esdhc") || ++ of_device_is_compatible(np, "fsl,ls1043a-esdhc")) ++ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; ++ ++ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) ++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; ++ + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + /* + * Freescale messed up with P2020 as it has a non-standard +@@ -362,13 +788,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) + } + + /* call to generic mmc_of_parse to support additional capabilities */ +- mmc_of_parse(host->mmc); ++ ret = mmc_of_parse(host->mmc); ++ if (ret) ++ goto err; ++ + mmc_of_parse_voltage(np, &host->ocr_mask); + + ret = sdhci_add_host(host); + if (ret) +- sdhci_pltfm_free(pdev); ++ goto err; + ++ return 0; ++ err: ++ sdhci_pltfm_free(pdev); + return ret; + } + +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 023c201..8af38a6 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -44,8 +44,6 @@ + + #define MAX_TUNING_LOOP 40 + +-#define ADMA_SIZE ((128 * 2 + 1) * 4) +- + static unsigned int debug_quirks = 0; + static unsigned int debug_quirks2; + +@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host) + pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); + +- if (host->flags & SDHCI_USE_ADMA) +- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", +- readl(host->ioaddr + SDHCI_ADMA_ERROR), +- readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ if (host->flags & SDHCI_USE_ADMA) { ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", ++ readl(host->ioaddr + SDHCI_ADMA_ERROR), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ else ++ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", ++ readl(host->ioaddr + SDHCI_ADMA_ERROR), ++ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); ++ } + + pr_debug(DRIVER_NAME ": ===========================================\n"); + } +@@ -231,6 +236,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; + ++ if (host->flags & SDHCI_AUTO_CMD12) ++ host->ier |= SDHCI_INT_ACMD12ERR; ++ + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + +@@ -448,18 +456,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) + local_irq_restore(*flags); + } + +-static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) ++static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, ++ dma_addr_t addr, int len, unsigned cmd) + { +- __le32 *dataddr = (__le32 __force *)(desc + 4); +- __le16 *cmdlen = (__le16 __force *)desc; ++ struct sdhci_adma2_64_desc *dma_desc = desc; ++ ++ /* 32-bit and 64-bit descriptors have these members in same position */ ++ dma_desc->cmd = cpu_to_le16(cmd); ++ dma_desc->len = cpu_to_le16(len); ++ dma_desc->addr_lo = cpu_to_le32((u32)addr); + +- /* SDHCI specification says ADMA descriptors should be 4 byte +- * aligned, so using 16 or 32bit operations should be safe. */ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); ++} + +- cmdlen[0] = cpu_to_le16(cmd); +- cmdlen[1] = cpu_to_le16(len); ++static void sdhci_adma_mark_end(void *desc) ++{ ++ struct sdhci_adma2_64_desc *dma_desc = desc; + +- dataddr[0] = cpu_to_le32(addr); ++ /* 32-bit and 64-bit descriptors have 'cmd' in same position */ ++ dma_desc->cmd |= cpu_to_le16(ADMA2_END); + } + + static int sdhci_adma_table_pre(struct sdhci_host *host, +@@ -467,8 +483,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + { + int direction; + +- u8 *desc; +- u8 *align; ++ void *desc; ++ void *align; + dma_addr_t addr; + dma_addr_t align_addr; + int len, offset; +@@ -489,17 +505,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + direction = DMA_TO_DEVICE; + + host->align_addr = dma_map_single(mmc_dev(host->mmc), +- host->align_buffer, 128 * 4, direction); ++ host->align_buffer, host->align_buffer_sz, direction); + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) + goto fail; +- BUG_ON(host->align_addr & 0x3); ++ BUG_ON(host->align_addr & host->align_mask); + + host->sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, direction); + if (host->sg_count == 0) + goto unmap_align; + +- desc = host->adma_desc; ++ desc = host->adma_table; + align = host->align_buffer; + + align_addr = host->align_addr; +@@ -515,24 +531,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + * the (up to three) bytes that screw up the + * alignment. + */ +- offset = (4 - (addr & 0x3)) & 0x3; ++ offset = (host->align_sz - (addr & host->align_mask)) & ++ host->align_mask; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); +- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); ++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > ++ (PAGE_SIZE - offset)); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + /* tran, valid */ +- sdhci_set_adma_desc(desc, align_addr, offset, 0x21); ++ sdhci_adma_write_desc(host, desc, align_addr, offset, ++ ADMA2_TRAN_VALID); + + BUG_ON(offset > 65536); + +- align += 4; +- align_addr += 4; ++ align += host->align_sz; ++ align_addr += host->align_sz; + +- desc += 8; ++ desc += host->desc_sz; + + addr += offset; + len -= offset; +@@ -541,23 +560,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + BUG_ON(len > 65536); + + /* tran, valid */ +- sdhci_set_adma_desc(desc, addr, len, 0x21); +- desc += 8; ++ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); ++ desc += host->desc_sz; + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ +- WARN_ON((desc - host->adma_desc) > ADMA_SIZE); ++ WARN_ON((desc - host->adma_table) >= host->adma_table_sz); + } + + if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { + /* + * Mark the last descriptor as the terminating descriptor + */ +- if (desc != host->adma_desc) { +- desc -= 8; +- desc[0] |= 0x2; /* end */ ++ if (desc != host->adma_table) { ++ desc -= host->desc_sz; ++ sdhci_adma_mark_end(desc); + } + } else { + /* +@@ -565,7 +584,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + */ + + /* nop, end, valid */ +- sdhci_set_adma_desc(desc, 0, 0, 0x3); ++ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); + } + + /* +@@ -573,14 +592,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, + */ + if (data->flags & MMC_DATA_WRITE) { + dma_sync_single_for_device(mmc_dev(host->mmc), +- host->align_addr, 128 * 4, direction); ++ host->align_addr, host->align_buffer_sz, direction); + } + + return 0; + + unmap_align: + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, +- 128 * 4, direction); ++ host->align_buffer_sz, direction); + fail: + return -EINVAL; + } +@@ -592,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + + struct scatterlist *sg; + int i, size; +- u8 *align; ++ void *align; + char *buffer; + unsigned long flags; + bool has_unaligned; +@@ -603,12 +622,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + direction = DMA_TO_DEVICE; + + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, +- 128 * 4, direction); ++ host->align_buffer_sz, direction); + + /* Do a quick scan of the SG list for any unaligned mappings */ + has_unaligned = false; + for_each_sg(data->sg, sg, host->sg_count, i) +- if (sg_dma_address(sg) & 3) { ++ if (sg_dma_address(sg) & host->align_mask) { + has_unaligned = true; + break; + } +@@ -620,15 +639,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host, + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { +- if (sg_dma_address(sg) & 0x3) { +- size = 4 - (sg_dma_address(sg) & 0x3); ++ if (sg_dma_address(sg) & host->align_mask) { ++ size = host->align_sz - ++ (sg_dma_address(sg) & host->align_mask); + + buffer = sdhci_kmap_atomic(sg, &flags); +- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); ++ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > ++ (PAGE_SIZE - size)); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + +- align += 4; ++ align += host->align_sz; + } + } + } +@@ -822,6 +843,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) + } else { + sdhci_writel(host, host->adma_addr, + SDHCI_ADMA_ADDRESS); ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ sdhci_writel(host, ++ (u64)host->adma_addr >> 32, ++ SDHCI_ADMA_ADDRESS_HI); + } + } else { + int sg_cnt; +@@ -855,10 +880,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && +- (host->flags & SDHCI_USE_ADMA)) +- ctrl |= SDHCI_CTRL_ADMA32; +- else ++ (host->flags & SDHCI_USE_ADMA)) { ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ ctrl |= SDHCI_CTRL_ADMA64; ++ else ++ ctrl |= SDHCI_CTRL_ADMA32; ++ } else { + ctrl |= SDHCI_CTRL_SDMA; ++ } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } + +@@ -1797,6 +1826,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + ++ /* Some controller need to do more when switching */ ++ if (host->ops->voltage_switch) ++ host->ops->voltage_switch(host); ++ + /* 1.8V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) +@@ -2250,7 +2283,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | +- SDHCI_INT_INDEX)) ++ SDHCI_INT_INDEX | SDHCI_INT_ACMD12ERR)) + host->cmd->error = -EILSEQ; + + if (host->cmd->error) { +@@ -2292,32 +2325,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) + } + + #ifdef CONFIG_MMC_DEBUG +-static void sdhci_show_adma_error(struct sdhci_host *host) ++static void sdhci_adma_show_error(struct sdhci_host *host) + { + const char *name = mmc_hostname(host->mmc); +- u8 *desc = host->adma_desc; +- __le32 *dma; +- __le16 *len; +- u8 attr; ++ void *desc = host->adma_table; + + sdhci_dumpregs(host); + + while (true) { +- dma = (__le32 *)(desc + 4); +- len = (__le16 *)(desc + 2); +- attr = *desc; +- +- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", +- name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); ++ struct sdhci_adma2_64_desc *dma_desc = desc; ++ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", ++ name, desc, le32_to_cpu(dma_desc->addr_hi), ++ le32_to_cpu(dma_desc->addr_lo), ++ le16_to_cpu(dma_desc->len), ++ le16_to_cpu(dma_desc->cmd)); ++ else ++ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", ++ name, desc, le32_to_cpu(dma_desc->addr_lo), ++ le16_to_cpu(dma_desc->len), ++ le16_to_cpu(dma_desc->cmd)); + +- desc += 8; ++ desc += host->desc_sz; + +- if (attr & 2) ++ if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) + break; + } + } + #else +-static void sdhci_show_adma_error(struct sdhci_host *host) { } ++static void sdhci_adma_show_error(struct sdhci_host *host) { } + #endif + + static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) +@@ -2380,7 +2417,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) + host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) { + pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); +- sdhci_show_adma_error(host); ++ sdhci_adma_show_error(host); + host->data->error = -EIO; + if (host->ops->adma_workaround) + host->ops->adma_workaround(host, intmask); +@@ -2859,6 +2896,16 @@ int sdhci_add_host(struct sdhci_host *host) + host->flags &= ~SDHCI_USE_ADMA; + } + ++ /* ++ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask ++ * and *must* do 64-bit DMA. A driver has the opportunity to change ++ * that during the first call to ->enable_dma(). Similarly ++ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to ++ * implement. ++ */ ++ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) ++ host->flags |= SDHCI_USE_64_BIT_DMA; ++ + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) { + if (host->ops->enable_dma(host)) { +@@ -2870,33 +2917,59 @@ int sdhci_add_host(struct sdhci_host *host) + } + } + ++ /* SDMA does not support 64-bit DMA */ ++ if (host->flags & SDHCI_USE_64_BIT_DMA) ++ host->flags &= ~SDHCI_USE_SDMA; ++ + if (host->flags & SDHCI_USE_ADMA) { + /* +- * We need to allocate descriptors for all sg entries +- * (128) and potentially one alignment transfer for +- * each of those entries. ++ * The DMA descriptor table size is calculated as the maximum ++ * number of segments times 2, to allow for an alignment ++ * descriptor for each segment, plus 1 for a nop end descriptor, ++ * all multipled by the descriptor size. + */ +- host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), +- ADMA_SIZE, &host->adma_addr, +- GFP_KERNEL); +- host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); +- if (!host->adma_desc || !host->align_buffer) { +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ if (host->flags & SDHCI_USE_64_BIT_DMA) { ++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * ++ SDHCI_ADMA2_64_DESC_SZ; ++ host->align_buffer_sz = SDHCI_MAX_SEGS * ++ SDHCI_ADMA2_64_ALIGN; ++ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; ++ host->align_sz = SDHCI_ADMA2_64_ALIGN; ++ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; ++ } else { ++ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * ++ SDHCI_ADMA2_32_DESC_SZ; ++ host->align_buffer_sz = SDHCI_MAX_SEGS * ++ SDHCI_ADMA2_32_ALIGN; ++ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; ++ host->align_sz = SDHCI_ADMA2_32_ALIGN; ++ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; ++ } ++ host->adma_table = dma_alloc_coherent(mmc_dev(mmc), ++ host->adma_table_sz, ++ &host->adma_addr, ++ GFP_KERNEL); ++ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); ++ if (!host->adma_table || !host->align_buffer) { ++ if (host->adma_table) ++ dma_free_coherent(mmc_dev(mmc), ++ host->adma_table_sz, ++ host->adma_table, ++ host->adma_addr); + kfree(host->align_buffer); + pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; +- } else if (host->adma_addr & 3) { ++ } else if (host->adma_addr & host->align_mask) { + pr_warn("%s: unable to allocate aligned ADMA descriptor\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, ++ host->adma_table, host->adma_addr); + kfree(host->align_buffer); +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; + } + } +@@ -2995,7 +3068,8 @@ int sdhci_add_host(struct sdhci_host *host) + /* Auto-CMD23 stuff only works in ADMA or PIO. */ + if ((host->version >= SDHCI_SPEC_300) && + ((host->flags & SDHCI_USE_ADMA) || +- !(host->flags & SDHCI_USE_SDMA))) { ++ !(host->flags & SDHCI_USE_SDMA)) && ++ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { + host->flags |= SDHCI_AUTO_CMD23; + DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); + } else { +@@ -3152,13 +3226,14 @@ int sdhci_add_host(struct sdhci_host *host) + SDHCI_MAX_CURRENT_MULTIPLIER; + } + +- /* If OCR set by external regulators, use it instead */ ++ /* If OCR set by host, use it instead. */ ++ if (host->ocr_mask) ++ ocr_avail = host->ocr_mask; ++ ++ /* If OCR set by external regulators, give it highest prio. */ + if (mmc->ocr_avail) + ocr_avail = mmc->ocr_avail; + +- if (host->ocr_mask) +- ocr_avail &= host->ocr_mask; +- + mmc->ocr_avail = ocr_avail; + mmc->ocr_avail_sdio = ocr_avail; + if (host->ocr_avail_sdio) +@@ -3185,11 +3260,11 @@ int sdhci_add_host(struct sdhci_host *host) + * can do scatter/gather or not. + */ + if (host->flags & SDHCI_USE_ADMA) +- mmc->max_segs = 128; ++ mmc->max_segs = SDHCI_MAX_SEGS; + else if (host->flags & SDHCI_USE_SDMA) + mmc->max_segs = 1; + else /* PIO */ +- mmc->max_segs = 128; ++ mmc->max_segs = SDHCI_MAX_SEGS; + + /* + * Maximum number of sectors in one transfer. Limited by DMA boundary +@@ -3287,7 +3362,8 @@ int sdhci_add_host(struct sdhci_host *host) + + pr_info("%s: SDHCI controller on %s [%s] using %s\n", + mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), +- (host->flags & SDHCI_USE_ADMA) ? "ADMA" : ++ (host->flags & SDHCI_USE_ADMA) ? ++ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : + (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); + + sdhci_enable_card_detection(host); +@@ -3355,12 +3431,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); + +- if (host->adma_desc) +- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, +- host->adma_desc, host->adma_addr); ++ if (host->adma_table) ++ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, ++ host->adma_table, host->adma_addr); + kfree(host->align_buffer); + +- host->adma_desc = NULL; ++ host->adma_table = NULL; + host->align_buffer = NULL; + } + +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index 31896a7..5220f36 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -227,6 +227,7 @@ + /* 55-57 reserved */ + + #define SDHCI_ADMA_ADDRESS 0x58 ++#define SDHCI_ADMA_ADDRESS_HI 0x5C + + /* 60-FB reserved */ + +@@ -266,6 +267,46 @@ + #define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) + #define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) + ++/* ADMA2 32-bit DMA descriptor size */ ++#define SDHCI_ADMA2_32_DESC_SZ 8 ++ ++/* ADMA2 32-bit DMA alignment */ ++#define SDHCI_ADMA2_32_ALIGN 4 ++ ++/* ADMA2 32-bit descriptor */ ++struct sdhci_adma2_32_desc { ++ __le16 cmd; ++ __le16 len; ++ __le32 addr; ++} __packed __aligned(SDHCI_ADMA2_32_ALIGN); ++ ++/* ADMA2 64-bit DMA descriptor size */ ++#define SDHCI_ADMA2_64_DESC_SZ 12 ++ ++/* ADMA2 64-bit DMA alignment */ ++#define SDHCI_ADMA2_64_ALIGN 8 ++ ++/* ++ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte ++ * aligned. ++ */ ++struct sdhci_adma2_64_desc { ++ __le16 cmd; ++ __le16 len; ++ __le32 addr_lo; ++ __le32 addr_hi; ++} __packed __aligned(4); ++ ++#define ADMA2_TRAN_VALID 0x21 ++#define ADMA2_NOP_END_VALID 0x3 ++#define ADMA2_END 0x2 ++ ++/* ++ * Maximum segments assuming a 512KiB maximum requisition size and a minimum ++ * 4KiB page size. ++ */ ++#define SDHCI_MAX_SEGS 128 ++ + struct sdhci_ops { + #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS + u32 (*read_l)(struct sdhci_host *host, int reg); +@@ -296,6 +337,7 @@ struct sdhci_ops { + void (*adma_workaround)(struct sdhci_host *host, u32 intmask); + void (*platform_init)(struct sdhci_host *host); + void (*card_event)(struct sdhci_host *host); ++ void (*voltage_switch)(struct sdhci_host *host); + }; + + #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +index dd10646..34ce759 100644 +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -429,7 +429,7 @@ config MTD_NAND_FSL_ELBC + + config MTD_NAND_FSL_IFC + tristate "NAND support for Freescale IFC controller" +- depends on MTD_NAND && FSL_SOC ++ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) + select FSL_IFC + select MEMORY + help +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 2338124..c8be272 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -31,7 +31,6 @@ + #include + #include + +-#define FSL_IFC_V1_1_0 0x01010000 + #define ERR_BYTE 0xFF /* Value returned for read + bytes when read failed */ + #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait +@@ -234,13 +233,13 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) + struct nand_chip *chip = mtd->priv; + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + int buf_num; + + ifc_nand_ctrl->page = page_addr; + /* Program ROW0/COL0 */ +- iowrite32be(page_addr, &ifc->ifc_nand.row0); +- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); ++ ifc_out32(page_addr, &ifc->ifc_nand.row0); ++ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); + + buf_num = page_addr & priv->bufnum_mask; + +@@ -297,28 +296,28 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + u32 eccstat[4]; + int i; + + /* set the chip select for NAND Transaction */ +- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT, +- &ifc->ifc_nand.nand_csel); ++ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT, ++ &ifc->ifc_nand.nand_csel); + + dev_vdbg(priv->dev, + "%s: fir0=%08x fcr0=%08x\n", + __func__, +- ioread32be(&ifc->ifc_nand.nand_fir0), +- ioread32be(&ifc->ifc_nand.nand_fcr0)); ++ ifc_in32(&ifc->ifc_nand.nand_fir0), ++ ifc_in32(&ifc->ifc_nand.nand_fcr0)); + + ctrl->nand_stat = 0; + + /* start read/write seq */ +- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); ++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); + + /* wait for command complete flag or timeout */ + wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, +- IFC_TIMEOUT_MSECS * HZ/1000); ++ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); + + /* ctrl->nand_stat will be updated from IRQ context */ + if (!ctrl->nand_stat) +@@ -337,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) + int sector_end = sector + chip->ecc.steps - 1; + + for (i = sector / 4; i <= sector_end / 4; i++) +- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]); ++ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); + + for (i = sector; i <= sector_end; i++) { + errors = check_read_ecc(mtd, ctrl, eccstat, i); +@@ -373,37 +372,37 @@ static void fsl_ifc_do_read(struct nand_chip *chip, + { + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + + /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ + if (mtd->writesize > 512) { +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | +- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); +- +- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | +- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | ++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); ++ ++ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | ++ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), ++ &ifc->ifc_nand.nand_fcr0); + } else { +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); + + if (oob) +- iowrite32be(NAND_CMD_READOOB << +- IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(NAND_CMD_READOOB << ++ IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + else +- iowrite32be(NAND_CMD_READ0 << +- IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(NAND_CMD_READ0 << ++ IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + } + } + +@@ -413,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + struct nand_chip *chip = mtd->priv; + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + + /* clear the read buffer */ + ifc_nand_ctrl->read_bytes = 0; +@@ -423,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + switch (command) { + /* READ0 read the entire buffer to use hardware ECC. */ + case NAND_CMD_READ0: +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, page_addr, 0); + + ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; +@@ -438,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + /* READOOB reads only the OOB because no ECC is performed. */ + case NAND_CMD_READOOB: +- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, column, page_addr, 1); + + ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; +@@ -454,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + if (command == NAND_CMD_PARAM) + timing = IFC_FIR_OP_RBCD; + +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | +- (timing << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(column, &ifc->ifc_nand.row3); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | ++ (timing << IFC_NAND_FIR0_OP2_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(column, &ifc->ifc_nand.row3); + + /* + * although currently it's 8 bytes for READID, we always read + * the maximum 256 bytes(for PARAM) + */ +- iowrite32be(256, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(256, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = 256; + + set_addr(mtd, 0, 0, 0); +@@ -481,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + /* ERASE2 uses the block and page address from ERASE1 */ + case NAND_CMD_ERASE2: +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), ++ &ifc->ifc_nand.nand_fir0); + +- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | +- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | ++ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), ++ &ifc->ifc_nand.nand_fcr0); + +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = 0; + fsl_ifc_run_command(mtd); + return; +@@ -507,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | + (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); + +- iowrite32be( +- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | +- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be( +- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | +- (IFC_FIR_OP_RDSTAT << +- IFC_NAND_FIR1_OP6_SHIFT) | +- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), +- &ifc->ifc_nand.nand_fir1); ++ ifc_out32( ++ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | ++ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | ++ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | ++ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32( ++ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) | ++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), ++ &ifc->ifc_nand.nand_fir1); + } else { + nand_fcr0 = ((NAND_CMD_PAGEPROG << + IFC_NAND_FCR0_CMD1_SHIFT) | +@@ -528,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + (NAND_CMD_STATUS << + IFC_NAND_FCR0_CMD3_SHIFT)); + +- iowrite32be( ++ ifc_out32( + (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | + (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | + (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), + &ifc->ifc_nand.nand_fir0); +- iowrite32be( +- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | +- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | +- (IFC_FIR_OP_RDSTAT << +- IFC_NAND_FIR1_OP7_SHIFT) | +- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), +- &ifc->ifc_nand.nand_fir1); ++ ifc_out32( ++ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | ++ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) | ++ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), ++ &ifc->ifc_nand.nand_fir1); + + if (column >= mtd->writesize) + nand_fcr0 |= +@@ -556,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + column -= mtd->writesize; + ifc_nand_ctrl->oob = 1; + } +- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0); + set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); + return; + } +@@ -564,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ + case NAND_CMD_PAGEPROG: { + if (ifc_nand_ctrl->oob) { +- iowrite32be(ifc_nand_ctrl->index - +- ifc_nand_ctrl->column, +- &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(ifc_nand_ctrl->index - ++ ifc_nand_ctrl->column, ++ &ifc->ifc_nand.nand_fbcr); + } else { +- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); + } + + fsl_ifc_run_command(mtd); + return; + } + +- case NAND_CMD_STATUS: +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); ++ case NAND_CMD_STATUS: { ++ void __iomem *addr; ++ ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, 0, 0); + ifc_nand_ctrl->read_bytes = 1; + +@@ -591,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + * The chip always seems to report that it is + * write-protected, even when it is not. + */ ++ addr = ifc_nand_ctrl->addr; + if (chip->options & NAND_BUSWIDTH_16) +- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP); ++ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr); + else +- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); ++ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr); + return; ++ } + + case NAND_CMD_RESET: +- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); + fsl_ifc_run_command(mtd); + return; + +@@ -659,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd) + */ + if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { + offset = ifc_nand_ctrl->index++; +- return in_8(ifc_nand_ctrl->addr + offset); ++ return ifc_in8(ifc_nand_ctrl->addr + offset); + } + + dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); +@@ -681,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) + * next byte. + */ + if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { +- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); ++ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); + ifc_nand_ctrl->index += 2; + return (uint8_t) data; + } +@@ -723,22 +724,22 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) + { + struct fsl_ifc_mtd *priv = chip->priv; + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; + u32 nand_fsr; + + /* Use READ_STATUS command, but wait for the device to be ready */ +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), ++ &ifc->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc->ifc_nand.nand_fcr0); ++ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); + set_addr(mtd, 0, 0, 0); + ifc_nand_ctrl->read_bytes = 1; + + fsl_ifc_run_command(mtd); + +- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr); ++ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); + + /* + * The chip always seems to report that it is +@@ -825,67 +826,72 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) + static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) + { + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; ++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; + uint32_t csor = 0, csor_8k = 0, csor_ext = 0; + uint32_t cs = priv->bank; + + /* Save CSOR and CSOR_ext */ +- csor = ioread32be(&ifc->csor_cs[cs].csor); +- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext); ++ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); ++ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); + + /* chage PageSize 8K and SpareSize 1K*/ + csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; +- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor); +- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext); ++ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); ++ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); + + /* READID */ +- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | ++ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | + (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), +- &ifc->ifc_nand.nand_fir0); +- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, +- &ifc->ifc_nand.nand_fcr0); +- iowrite32be(0x0, &ifc->ifc_nand.row3); ++ &ifc_runtime->ifc_nand.nand_fir0); ++ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, ++ &ifc_runtime->ifc_nand.nand_fcr0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); + +- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); + + /* Program ROW0/COL0 */ +- iowrite32be(0x0, &ifc->ifc_nand.row0); +- iowrite32be(0x0, &ifc->ifc_nand.col0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); + + /* set the chip select for NAND Transaction */ +- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); ++ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, ++ &ifc_runtime->ifc_nand.nand_csel); + + /* start read seq */ +- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); ++ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, ++ &ifc_runtime->ifc_nand.nandseq_strt); + + /* wait for command complete flag or timeout */ + wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, +- IFC_TIMEOUT_MSECS * HZ/1000); ++ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); + + if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) + printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); + + /* Restore CSOR and CSOR_ext */ +- iowrite32be(csor, &ifc->csor_cs[cs].csor); +- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext); ++ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); ++ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); + } + + static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + { + struct fsl_ifc_ctrl *ctrl = priv->ctrl; +- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; ++ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; ++ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; + struct nand_chip *chip = &priv->chip; + struct nand_ecclayout *layout; +- u32 csor, ver; ++ u32 csor; + + /* Fill in fsl_ifc_mtd structure */ + priv->mtd.priv = chip; +- priv->mtd.owner = THIS_MODULE; ++ priv->mtd.dev.parent = priv->dev; + + /* fill in nand_chip structure */ + /* set up function call table */ +- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) ++ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) ++ & CSPR_PORT_SIZE_16) + chip->read_byte = fsl_ifc_read_byte16; + else + chip->read_byte = fsl_ifc_read_byte; +@@ -899,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + +- iowrite32be(0x0, &ifc->ifc_nand.ncfgr); ++ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); + + /* set up nand options */ + chip->bbt_options = NAND_BBT_USE_FLASH; + chip->options = NAND_NO_SUBPAGE_WRITE; + +- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { ++ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) ++ & CSPR_PORT_SIZE_16) { + chip->read_byte = fsl_ifc_read_byte16; + chip->options |= NAND_BUSWIDTH_16; + } else { +@@ -918,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->ecc.read_page = fsl_ifc_read_page; + chip->ecc.write_page = fsl_ifc_write_page; + +- csor = ioread32be(&ifc->csor_cs[priv->bank].csor); ++ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); + + /* Hardware generates ECC per 512 Bytes */ + chip->ecc.size = 512; +@@ -984,8 +991,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) + chip->ecc.mode = NAND_ECC_SOFT; + } + +- ver = ioread32be(&ifc->ifc_rev); +- if (ver == FSL_IFC_V1_1_0) ++ if (ctrl->version == FSL_IFC_VERSION_1_1_0) + fsl_ifc_sram_init(priv); + + return 0; +@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) + return 0; + } + +-static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, ++static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, + phys_addr_t addr) + { +- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr); ++ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); + + if (!(cspr & CSPR_V)) + return 0; +@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); + + static int fsl_ifc_nand_probe(struct platform_device *dev) + { +- struct fsl_ifc_regs __iomem *ifc; ++ struct fsl_ifc_runtime __iomem *ifc; + struct fsl_ifc_mtd *priv; + struct resource res; + static const char *part_probe_types[] +@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + struct mtd_part_parser_data ppdata; + + ppdata.of_node = dev->dev.of_node; +- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) ++ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) + return -ENODEV; +- ifc = fsl_ifc_ctrl_dev->regs; ++ ifc = fsl_ifc_ctrl_dev->rregs; + + /* get, allocate and map the memory resource */ + ret = of_address_to_resource(node, 0, &res); +@@ -1045,12 +1051,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + } + + /* find which chip select it is connected to */ +- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) { +- if (match_bank(ifc, bank, res.start)) ++ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { ++ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) + break; + } + +- if (bank >= FSL_IFC_BANK_COUNT) { ++ if (bank >= fsl_ifc_ctrl_dev->banks) { + dev_err(&dev->dev, "%s: address did not match any chip selects\n", + __func__); + return -ENODEV; +@@ -1094,16 +1100,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) + + dev_set_drvdata(priv->dev, priv); + +- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN | +- IFC_NAND_EVTER_EN_FTOER_EN | +- IFC_NAND_EVTER_EN_WPER_EN, +- &ifc->ifc_nand.nand_evter_en); ++ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN | ++ IFC_NAND_EVTER_EN_FTOER_EN | ++ IFC_NAND_EVTER_EN_WPER_EN, ++ &ifc->ifc_nand.nand_evter_en); + + /* enable NAND Machine Interrupts */ +- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN | +- IFC_NAND_EVTER_INTR_FTOERIR_EN | +- IFC_NAND_EVTER_INTR_WPERIR_EN, +- &ifc->ifc_nand.nand_evter_intr_en); ++ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN | ++ IFC_NAND_EVTER_INTR_FTOERIR_EN | ++ IFC_NAND_EVTER_INTR_WPERIR_EN, ++ &ifc->ifc_nand.nand_evter_intr_en); + priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); + if (!priv->mtd.name) { + ret = -ENOMEM; +@@ -1163,6 +1169,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = { + }, + {} + }; ++MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match); + + static struct platform_driver fsl_ifc_nand_driver = { + .driver = { +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index a4a7396..0359cfd 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -86,11 +86,11 @@ + #include + #include + #include ++#include + + #include + #ifdef CONFIG_PPC + #include +-#include + #endif + #include + #include +@@ -1720,8 +1720,10 @@ static void gfar_configure_serdes(struct net_device *dev) + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ +- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) ++ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { ++ put_device(&tbiphy->dev); + return; ++ } + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 2973c60..cdc9f8a 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -65,6 +65,11 @@ config VITESSE_PHY + ---help--- + Currently supports the vsc8244 + ++config TERANETICS_PHY ++ tristate "Drivers for the Teranetics PHYs" ++ ---help--- ++ Currently supports the Teranetics TN2020 ++ + config SMSC_PHY + tristate "Drivers for SMSC PHYs" + ---help--- +@@ -124,8 +129,8 @@ config MICREL_PHY + Supports the KSZ9021, VSC8201, KS8001 PHYs. + + config FIXED_PHY +- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" +- depends on PHYLIB=y ++ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" ++ depends on PHYLIB + ---help--- + Adds the platform "fixed" MDIO Bus to cover the boards that use + PHYs that are not connected to the real MDIO bus. +@@ -207,6 +212,11 @@ config MDIO_BUS_MUX_MMIOREG + the FPGA's registers. + + Currently, only 8-bit registers are supported. ++config FSL_10GBASE_KR ++ tristate "Support for 10GBASE-KR on Freescale XFI interface" ++ depends on OF_MDIO ++ help ++ This module provides a driver for Freescale XFI's 10GBASE-KR. + + config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" +diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile +index b5c8f9f..8ad4ac6 100644 +--- a/drivers/net/phy/Makefile ++++ b/drivers/net/phy/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o + obj-$(CONFIG_LXT_PHY) += lxt.o + obj-$(CONFIG_QSEMI_PHY) += qsemi.o + obj-$(CONFIG_SMSC_PHY) += smsc.o ++obj-$(CONFIG_TERANETICS_PHY) += teranetics.o + obj-$(CONFIG_VITESSE_PHY) += vitesse.o + obj-$(CONFIG_BROADCOM_PHY) += broadcom.o + obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o +@@ -18,7 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o + obj-$(CONFIG_ICPLUS_PHY) += icplus.o + obj-$(CONFIG_REALTEK_PHY) += realtek.o + obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o +-obj-$(CONFIG_FIXED_PHY) += fixed.o ++obj-$(CONFIG_FIXED_PHY) += fixed_phy.o + obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o + obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o + obj-$(CONFIG_NATIONAL_PHY) += national.o +@@ -32,6 +33,7 @@ obj-$(CONFIG_AMD_PHY) += amd.o + obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o + obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o + obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o ++obj-$(CONFIG_FSL_10GBASE_KR) += fsl_10gkr.o + obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o + obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o + obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o +diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c +index fdc1b41..a4f0886 100644 +--- a/drivers/net/phy/at803x.c ++++ b/drivers/net/phy/at803x.c +@@ -307,6 +307,8 @@ static struct phy_driver at803x_driver[] = { + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, ++ .ack_interrupt = at803x_ack_interrupt, ++ .config_intr = at803x_config_intr, + .driver = { + .owner = THIS_MODULE, + }, +@@ -326,6 +328,8 @@ static struct phy_driver at803x_driver[] = { + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, ++ .ack_interrupt = at803x_ack_interrupt, ++ .config_intr = at803x_config_intr, + .driver = { + .owner = THIS_MODULE, + }, +diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c +deleted file mode 100644 +index 47872ca..0000000 +--- a/drivers/net/phy/fixed.c ++++ /dev/null +@@ -1,336 +0,0 @@ +-/* +- * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) +- * +- * Author: Vitaly Bordug +- * Anton Vorontsov +- * +- * Copyright (c) 2006-2007 MontaVista Software, Inc. +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; either version 2 of the License, or (at your +- * option) any later version. +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#define MII_REGS_NUM 29 +- +-struct fixed_mdio_bus { +- int irqs[PHY_MAX_ADDR]; +- struct mii_bus *mii_bus; +- struct list_head phys; +-}; +- +-struct fixed_phy { +- int addr; +- u16 regs[MII_REGS_NUM]; +- struct phy_device *phydev; +- struct fixed_phy_status status; +- int (*link_update)(struct net_device *, struct fixed_phy_status *); +- struct list_head node; +-}; +- +-static struct platform_device *pdev; +-static struct fixed_mdio_bus platform_fmb = { +- .phys = LIST_HEAD_INIT(platform_fmb.phys), +-}; +- +-static int fixed_phy_update_regs(struct fixed_phy *fp) +-{ +- u16 bmsr = BMSR_ANEGCAPABLE; +- u16 bmcr = 0; +- u16 lpagb = 0; +- u16 lpa = 0; +- +- if (fp->status.duplex) { +- bmcr |= BMCR_FULLDPLX; +- +- switch (fp->status.speed) { +- case 1000: +- bmsr |= BMSR_ESTATEN; +- bmcr |= BMCR_SPEED1000; +- lpagb |= LPA_1000FULL; +- break; +- case 100: +- bmsr |= BMSR_100FULL; +- bmcr |= BMCR_SPEED100; +- lpa |= LPA_100FULL; +- break; +- case 10: +- bmsr |= BMSR_10FULL; +- lpa |= LPA_10FULL; +- break; +- default: +- pr_warn("fixed phy: unknown speed\n"); +- return -EINVAL; +- } +- } else { +- switch (fp->status.speed) { +- case 1000: +- bmsr |= BMSR_ESTATEN; +- bmcr |= BMCR_SPEED1000; +- lpagb |= LPA_1000HALF; +- break; +- case 100: +- bmsr |= BMSR_100HALF; +- bmcr |= BMCR_SPEED100; +- lpa |= LPA_100HALF; +- break; +- case 10: +- bmsr |= BMSR_10HALF; +- lpa |= LPA_10HALF; +- break; +- default: +- pr_warn("fixed phy: unknown speed\n"); +- return -EINVAL; +- } +- } +- +- if (fp->status.link) +- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; +- +- if (fp->status.pause) +- lpa |= LPA_PAUSE_CAP; +- +- if (fp->status.asym_pause) +- lpa |= LPA_PAUSE_ASYM; +- +- fp->regs[MII_PHYSID1] = 0; +- fp->regs[MII_PHYSID2] = 0; +- +- fp->regs[MII_BMSR] = bmsr; +- fp->regs[MII_BMCR] = bmcr; +- fp->regs[MII_LPA] = lpa; +- fp->regs[MII_STAT1000] = lpagb; +- +- return 0; +-} +- +-static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) +-{ +- struct fixed_mdio_bus *fmb = bus->priv; +- struct fixed_phy *fp; +- +- if (reg_num >= MII_REGS_NUM) +- return -1; +- +- /* We do not support emulating Clause 45 over Clause 22 register reads +- * return an error instead of bogus data. +- */ +- switch (reg_num) { +- case MII_MMD_CTRL: +- case MII_MMD_DATA: +- return -1; +- default: +- break; +- } +- +- list_for_each_entry(fp, &fmb->phys, node) { +- if (fp->addr == phy_addr) { +- /* Issue callback if user registered it. */ +- if (fp->link_update) { +- fp->link_update(fp->phydev->attached_dev, +- &fp->status); +- fixed_phy_update_regs(fp); +- } +- return fp->regs[reg_num]; +- } +- } +- +- return 0xFFFF; +-} +- +-static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, +- u16 val) +-{ +- return 0; +-} +- +-/* +- * If something weird is required to be done with link/speed, +- * network driver is able to assign a function to implement this. +- * May be useful for PHY's that need to be software-driven. +- */ +-int fixed_phy_set_link_update(struct phy_device *phydev, +- int (*link_update)(struct net_device *, +- struct fixed_phy_status *)) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp; +- +- if (!link_update || !phydev || !phydev->bus) +- return -EINVAL; +- +- list_for_each_entry(fp, &fmb->phys, node) { +- if (fp->addr == phydev->addr) { +- fp->link_update = link_update; +- fp->phydev = phydev; +- return 0; +- } +- } +- +- return -ENOENT; +-} +-EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); +- +-int fixed_phy_add(unsigned int irq, int phy_addr, +- struct fixed_phy_status *status) +-{ +- int ret; +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp; +- +- fp = kzalloc(sizeof(*fp), GFP_KERNEL); +- if (!fp) +- return -ENOMEM; +- +- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); +- +- fmb->irqs[phy_addr] = irq; +- +- fp->addr = phy_addr; +- fp->status = *status; +- +- ret = fixed_phy_update_regs(fp); +- if (ret) +- goto err_regs; +- +- list_add_tail(&fp->node, &fmb->phys); +- +- return 0; +- +-err_regs: +- kfree(fp); +- return ret; +-} +-EXPORT_SYMBOL_GPL(fixed_phy_add); +- +-void fixed_phy_del(int phy_addr) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp, *tmp; +- +- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { +- if (fp->addr == phy_addr) { +- list_del(&fp->node); +- kfree(fp); +- return; +- } +- } +-} +-EXPORT_SYMBOL_GPL(fixed_phy_del); +- +-static int phy_fixed_addr; +-static DEFINE_SPINLOCK(phy_fixed_addr_lock); +- +-struct phy_device *fixed_phy_register(unsigned int irq, +- struct fixed_phy_status *status, +- struct device_node *np) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct phy_device *phy; +- int phy_addr; +- int ret; +- +- /* Get the next available PHY address, up to PHY_MAX_ADDR */ +- spin_lock(&phy_fixed_addr_lock); +- if (phy_fixed_addr == PHY_MAX_ADDR) { +- spin_unlock(&phy_fixed_addr_lock); +- return ERR_PTR(-ENOSPC); +- } +- phy_addr = phy_fixed_addr++; +- spin_unlock(&phy_fixed_addr_lock); +- +- ret = fixed_phy_add(PHY_POLL, phy_addr, status); +- if (ret < 0) +- return ERR_PTR(ret); +- +- phy = get_phy_device(fmb->mii_bus, phy_addr, false); +- if (!phy || IS_ERR(phy)) { +- fixed_phy_del(phy_addr); +- return ERR_PTR(-EINVAL); +- } +- +- of_node_get(np); +- phy->dev.of_node = np; +- +- ret = phy_device_register(phy); +- if (ret) { +- phy_device_free(phy); +- of_node_put(np); +- fixed_phy_del(phy_addr); +- return ERR_PTR(ret); +- } +- +- return phy; +-} +- +-static int __init fixed_mdio_bus_init(void) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- int ret; +- +- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); +- if (IS_ERR(pdev)) { +- ret = PTR_ERR(pdev); +- goto err_pdev; +- } +- +- fmb->mii_bus = mdiobus_alloc(); +- if (fmb->mii_bus == NULL) { +- ret = -ENOMEM; +- goto err_mdiobus_reg; +- } +- +- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); +- fmb->mii_bus->name = "Fixed MDIO Bus"; +- fmb->mii_bus->priv = fmb; +- fmb->mii_bus->parent = &pdev->dev; +- fmb->mii_bus->read = &fixed_mdio_read; +- fmb->mii_bus->write = &fixed_mdio_write; +- fmb->mii_bus->irq = fmb->irqs; +- +- ret = mdiobus_register(fmb->mii_bus); +- if (ret) +- goto err_mdiobus_alloc; +- +- return 0; +- +-err_mdiobus_alloc: +- mdiobus_free(fmb->mii_bus); +-err_mdiobus_reg: +- platform_device_unregister(pdev); +-err_pdev: +- return ret; +-} +-module_init(fixed_mdio_bus_init); +- +-static void __exit fixed_mdio_bus_exit(void) +-{ +- struct fixed_mdio_bus *fmb = &platform_fmb; +- struct fixed_phy *fp, *tmp; +- +- mdiobus_unregister(fmb->mii_bus); +- mdiobus_free(fmb->mii_bus); +- platform_device_unregister(pdev); +- +- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { +- list_del(&fp->node); +- kfree(fp); +- } +-} +-module_exit(fixed_mdio_bus_exit); +- +-MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); +-MODULE_AUTHOR("Vitaly Bordug"); +-MODULE_LICENSE("GPL"); +diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c +new file mode 100644 +index 0000000..88b8194 +--- /dev/null ++++ b/drivers/net/phy/fixed_phy.c +@@ -0,0 +1,370 @@ ++/* ++ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) ++ * ++ * Author: Vitaly Bordug ++ * Anton Vorontsov ++ * ++ * Copyright (c) 2006-2007 MontaVista Software, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MII_REGS_NUM 29 ++ ++struct fixed_mdio_bus { ++ int irqs[PHY_MAX_ADDR]; ++ struct mii_bus *mii_bus; ++ struct list_head phys; ++}; ++ ++struct fixed_phy { ++ int addr; ++ u16 regs[MII_REGS_NUM]; ++ struct phy_device *phydev; ++ struct fixed_phy_status status; ++ int (*link_update)(struct net_device *, struct fixed_phy_status *); ++ struct list_head node; ++}; ++ ++static struct platform_device *pdev; ++static struct fixed_mdio_bus platform_fmb = { ++ .phys = LIST_HEAD_INIT(platform_fmb.phys), ++}; ++ ++static int fixed_phy_update_regs(struct fixed_phy *fp) ++{ ++ u16 bmsr = BMSR_ANEGCAPABLE; ++ u16 bmcr = 0; ++ u16 lpagb = 0; ++ u16 lpa = 0; ++ ++ if (fp->status.duplex) { ++ bmcr |= BMCR_FULLDPLX; ++ ++ switch (fp->status.speed) { ++ case 10000: ++ break; ++ case 1000: ++ bmsr |= BMSR_ESTATEN; ++ bmcr |= BMCR_SPEED1000; ++ lpagb |= LPA_1000FULL; ++ break; ++ case 100: ++ bmsr |= BMSR_100FULL; ++ bmcr |= BMCR_SPEED100; ++ lpa |= LPA_100FULL; ++ break; ++ case 10: ++ bmsr |= BMSR_10FULL; ++ lpa |= LPA_10FULL; ++ break; ++ default: ++ pr_warn("fixed phy: unknown speed\n"); ++ return -EINVAL; ++ } ++ } else { ++ switch (fp->status.speed) { ++ case 10000: ++ break; ++ case 1000: ++ bmsr |= BMSR_ESTATEN; ++ bmcr |= BMCR_SPEED1000; ++ lpagb |= LPA_1000HALF; ++ break; ++ case 100: ++ bmsr |= BMSR_100HALF; ++ bmcr |= BMCR_SPEED100; ++ lpa |= LPA_100HALF; ++ break; ++ case 10: ++ bmsr |= BMSR_10HALF; ++ lpa |= LPA_10HALF; ++ break; ++ default: ++ pr_warn("fixed phy: unknown speed\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (fp->status.link) ++ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; ++ ++ if (fp->status.pause) ++ lpa |= LPA_PAUSE_CAP; ++ ++ if (fp->status.asym_pause) ++ lpa |= LPA_PAUSE_ASYM; ++ ++ fp->regs[MII_PHYSID1] = 0; ++ fp->regs[MII_PHYSID2] = 0; ++ ++ fp->regs[MII_BMSR] = bmsr; ++ fp->regs[MII_BMCR] = bmcr; ++ fp->regs[MII_LPA] = lpa; ++ fp->regs[MII_STAT1000] = lpagb; ++ ++ return 0; ++} ++ ++static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) ++{ ++ struct fixed_mdio_bus *fmb = bus->priv; ++ struct fixed_phy *fp; ++ ++ if (reg_num >= MII_REGS_NUM) ++ return -1; ++ ++ /* We do not support emulating Clause 45 over Clause 22 register reads ++ * return an error instead of bogus data. ++ */ ++ switch (reg_num) { ++ case MII_MMD_CTRL: ++ case MII_MMD_DATA: ++ return -1; ++ default: ++ break; ++ } ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phy_addr) { ++ /* Issue callback if user registered it. */ ++ if (fp->link_update) { ++ fp->link_update(fp->phydev->attached_dev, ++ &fp->status); ++ fixed_phy_update_regs(fp); ++ } ++ return fp->regs[reg_num]; ++ } ++ } ++ ++ return 0xFFFF; ++} ++ ++static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, ++ u16 val) ++{ ++ return 0; ++} ++ ++/* ++ * If something weird is required to be done with link/speed, ++ * network driver is able to assign a function to implement this. ++ * May be useful for PHY's that need to be software-driven. ++ */ ++int fixed_phy_set_link_update(struct phy_device *phydev, ++ int (*link_update)(struct net_device *, ++ struct fixed_phy_status *)) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ if (!phydev || !phydev->bus) ++ return -EINVAL; ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phydev->addr) { ++ fp->link_update = link_update; ++ fp->phydev = phydev; ++ return 0; ++ } ++ } ++ ++ return -ENOENT; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); ++ ++int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ if (!phydev || !phydev->bus) ++ return -EINVAL; ++ ++ list_for_each_entry(fp, &fmb->phys, node) { ++ if (fp->addr == phydev->addr) { ++#define _UPD(x) if (changed->x) \ ++ fp->status.x = status->x ++ _UPD(link); ++ _UPD(speed); ++ _UPD(duplex); ++ _UPD(pause); ++ _UPD(asym_pause); ++#undef _UPD ++ fixed_phy_update_regs(fp); ++ return 0; ++ } ++ } ++ ++ return -ENOENT; ++} ++EXPORT_SYMBOL(fixed_phy_update_state); ++ ++int fixed_phy_add(unsigned int irq, int phy_addr, ++ struct fixed_phy_status *status) ++{ ++ int ret; ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp; ++ ++ fp = kzalloc(sizeof(*fp), GFP_KERNEL); ++ if (!fp) ++ return -ENOMEM; ++ ++ memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); ++ ++ fmb->irqs[phy_addr] = irq; ++ ++ fp->addr = phy_addr; ++ fp->status = *status; ++ ++ ret = fixed_phy_update_regs(fp); ++ if (ret) ++ goto err_regs; ++ ++ list_add_tail(&fp->node, &fmb->phys); ++ ++ return 0; ++ ++err_regs: ++ kfree(fp); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_add); ++ ++void fixed_phy_del(int phy_addr) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp, *tmp; ++ ++ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { ++ if (fp->addr == phy_addr) { ++ list_del(&fp->node); ++ kfree(fp); ++ return; ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(fixed_phy_del); ++ ++static int phy_fixed_addr; ++static DEFINE_SPINLOCK(phy_fixed_addr_lock); ++ ++struct phy_device *fixed_phy_register(unsigned int irq, ++ struct fixed_phy_status *status, ++ struct device_node *np) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct phy_device *phy; ++ int phy_addr; ++ int ret; ++ ++ /* Get the next available PHY address, up to PHY_MAX_ADDR */ ++ spin_lock(&phy_fixed_addr_lock); ++ if (phy_fixed_addr == PHY_MAX_ADDR) { ++ spin_unlock(&phy_fixed_addr_lock); ++ return ERR_PTR(-ENOSPC); ++ } ++ phy_addr = phy_fixed_addr++; ++ spin_unlock(&phy_fixed_addr_lock); ++ ++ ret = fixed_phy_add(PHY_POLL, phy_addr, status); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ ++ phy = get_phy_device(fmb->mii_bus, phy_addr, false); ++ if (!phy || IS_ERR(phy)) { ++ fixed_phy_del(phy_addr); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ of_node_get(np); ++ phy->dev.of_node = np; ++ ++ ret = phy_device_register(phy); ++ if (ret) { ++ phy_device_free(phy); ++ of_node_put(np); ++ fixed_phy_del(phy_addr); ++ return ERR_PTR(ret); ++ } ++ ++ return phy; ++} ++EXPORT_SYMBOL_GPL(fixed_phy_register); ++ ++static int __init fixed_mdio_bus_init(void) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ int ret; ++ ++ pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); ++ if (IS_ERR(pdev)) { ++ ret = PTR_ERR(pdev); ++ goto err_pdev; ++ } ++ ++ fmb->mii_bus = mdiobus_alloc(); ++ if (fmb->mii_bus == NULL) { ++ ret = -ENOMEM; ++ goto err_mdiobus_reg; ++ } ++ ++ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); ++ fmb->mii_bus->name = "Fixed MDIO Bus"; ++ fmb->mii_bus->priv = fmb; ++ fmb->mii_bus->parent = &pdev->dev; ++ fmb->mii_bus->read = &fixed_mdio_read; ++ fmb->mii_bus->write = &fixed_mdio_write; ++ fmb->mii_bus->irq = fmb->irqs; ++ ++ ret = mdiobus_register(fmb->mii_bus); ++ if (ret) ++ goto err_mdiobus_alloc; ++ ++ return 0; ++ ++err_mdiobus_alloc: ++ mdiobus_free(fmb->mii_bus); ++err_mdiobus_reg: ++ platform_device_unregister(pdev); ++err_pdev: ++ return ret; ++} ++module_init(fixed_mdio_bus_init); ++ ++static void __exit fixed_mdio_bus_exit(void) ++{ ++ struct fixed_mdio_bus *fmb = &platform_fmb; ++ struct fixed_phy *fp, *tmp; ++ ++ mdiobus_unregister(fmb->mii_bus); ++ mdiobus_free(fmb->mii_bus); ++ platform_device_unregister(pdev); ++ ++ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { ++ list_del(&fp->node); ++ kfree(fp); ++ } ++} ++module_exit(fixed_mdio_bus_exit); ++ ++MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); ++MODULE_AUTHOR("Vitaly Bordug"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c +index 225c033..969a198 100644 +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -50,6 +50,7 @@ + #define MII_M1011_PHY_SCR 0x10 + #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 + ++#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 + #define MII_M1145_PHY_EXT_SR 0x1b + #define MII_M1145_PHY_EXT_CR 0x14 + #define MII_M1145_RGMII_RX_DELAY 0x0080 +@@ -495,6 +496,16 @@ static int m88e1111_config_init(struct phy_device *phydev) + err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + if (err < 0) + return err; ++ ++ /* make sure copper is selected */ ++ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); ++ if (err < 0) ++ return err; ++ ++ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, ++ err & (~0xff)); ++ if (err < 0) ++ return err; + } + + if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 50051f2..accd605 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -288,8 +288,11 @@ int mdiobus_register(struct mii_bus *bus) + + error: + while (--i >= 0) { +- if (bus->phy_map[i]) +- device_unregister(&bus->phy_map[i]->dev); ++ struct phy_device *phydev = bus->phy_map[i]; ++ if (phydev) { ++ phy_device_remove(phydev); ++ phy_device_free(phydev); ++ } + } + device_del(&bus->dev); + return err; +@@ -305,9 +308,11 @@ void mdiobus_unregister(struct mii_bus *bus) + + device_del(&bus->dev); + for (i = 0; i < PHY_MAX_ADDR; i++) { +- if (bus->phy_map[i]) +- device_unregister(&bus->phy_map[i]->dev); +- bus->phy_map[i] = NULL; ++ struct phy_device *phydev = bus->phy_map[i]; ++ if (phydev) { ++ phy_device_remove(phydev); ++ phy_device_free(phydev); ++ } + } + } + EXPORT_SYMBOL(mdiobus_unregister); +@@ -421,6 +426,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) + { + struct phy_device *phydev = to_phy_device(dev); + struct phy_driver *phydrv = to_phy_driver(drv); ++ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); ++ int i; + + if (of_driver_match_device(dev, drv)) + return 1; +@@ -428,8 +435,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) + if (phydrv->match_phy_device) + return phydrv->match_phy_device(phydev); + +- return (phydrv->phy_id & phydrv->phy_id_mask) == +- (phydev->phy_id & phydrv->phy_id_mask); ++ if (phydev->is_c45) { ++ for (i = 1; i < num_ids; i++) { ++ if (!(phydev->c45_ids.devices_in_package & (1 << i))) ++ continue; ++ ++ if ((phydrv->phy_id & phydrv->phy_id_mask) == ++ (phydev->c45_ids.device_ids[i] & ++ phydrv->phy_id_mask)) ++ return 1; ++ } ++ return 0; ++ } else { ++ return (phydrv->phy_id & phydrv->phy_id_mask) == ++ (phydev->phy_id & phydrv->phy_id_mask); ++ } + } + + #ifdef CONFIG_PM +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c +index 91d6d03..840075e 100644 +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -768,6 +768,7 @@ void phy_state_machine(struct work_struct *work) + container_of(dwork, struct phy_device, state_queue); + bool needs_aneg = false, do_suspend = false, do_resume = false; + int err = 0; ++ int old_link; + + mutex_lock(&phydev->lock); + +@@ -814,6 +815,9 @@ void phy_state_machine(struct work_struct *work) + needs_aneg = true; + break; + case PHY_NOLINK: ++ if (phy_interrupt_is_valid(phydev)) ++ break; ++ + err = phy_read_status(phydev); + if (err) + break; +@@ -851,11 +855,18 @@ void phy_state_machine(struct work_struct *work) + phydev->adjust_link(phydev->attached_dev); + break; + case PHY_RUNNING: +- /* Only register a CHANGE if we are +- * polling or ignoring interrupts ++ /* Only register a CHANGE if we are polling or ignoring ++ * interrupts and link changed since latest checking. + */ +- if (!phy_interrupt_is_valid(phydev)) +- phydev->state = PHY_CHANGELINK; ++ if (!phy_interrupt_is_valid(phydev)) { ++ old_link = phydev->link; ++ err = phy_read_status(phydev); ++ if (err) ++ break; ++ ++ if (old_link != phydev->link) ++ phydev->state = PHY_CHANGELINK; ++ } + break; + case PHY_CHANGELINK: + err = phy_read_status(phydev); +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 70a0d88..07b1aa9 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -205,6 +205,37 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, + } + EXPORT_SYMBOL(phy_device_create); + ++/* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. ++ * @bus: the target MII bus ++ * @addr: PHY address on the MII bus ++ * @dev_addr: MMD address in the PHY. ++ * @devices_in_package: where to store the devices in package information. ++ * ++ * Description: reads devices in package registers of a MMD at @dev_addr ++ * from PHY at @addr on @bus. ++ * ++ * Returns: 0 on success, -EIO on failure. ++ */ ++static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, ++ u32 *devices_in_package) ++{ ++ int phy_reg, reg_addr; ++ ++ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2; ++ phy_reg = mdiobus_read(bus, addr, reg_addr); ++ if (phy_reg < 0) ++ return -EIO; ++ *devices_in_package = (phy_reg & 0xffff) << 16; ++ ++ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; ++ phy_reg = mdiobus_read(bus, addr, reg_addr); ++ if (phy_reg < 0) ++ return -EIO; ++ *devices_in_package |= (phy_reg & 0xffff); ++ ++ return 0; ++} ++ + /** + * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. + * @bus: the target MII bus +@@ -223,31 +254,32 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, + int phy_reg; + int i, reg_addr; + const int num_ids = ARRAY_SIZE(c45_ids->device_ids); ++ u32 *devs = &c45_ids->devices_in_package; + +- /* Find first non-zero Devices In package. Device +- * zero is reserved, so don't probe it. ++ /* Find first non-zero Devices In package. Device zero is reserved ++ * for 802.3 c45 complied PHYs, so don't probe it at first. + */ +- for (i = 1; +- i < num_ids && c45_ids->devices_in_package == 0; +- i++) { +- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; +- phy_reg = mdiobus_read(bus, addr, reg_addr); +- if (phy_reg < 0) +- return -EIO; +- c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; +- +- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; +- phy_reg = mdiobus_read(bus, addr, reg_addr); ++ for (i = 1; i < num_ids && *devs == 0; i++) { ++ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); + if (phy_reg < 0) + return -EIO; +- c45_ids->devices_in_package |= (phy_reg & 0xffff); + +- /* If mostly Fs, there is no device there, +- * let's get out of here. +- */ +- if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { +- *phy_id = 0xffffffff; +- return 0; ++ if ((*devs & 0x1fffffff) == 0x1fffffff) { ++ /* If mostly Fs, there is no device there, ++ * then let's continue to probe more, as some ++ * 10G PHYs have zero Devices In package, ++ * e.g. Cortina CS4315/CS4340 PHY. ++ */ ++ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); ++ if (phy_reg < 0) ++ return -EIO; ++ /* no device there, let's get out of here */ ++ if ((*devs & 0x1fffffff) == 0x1fffffff) { ++ *phy_id = 0xffffffff; ++ return 0; ++ } else { ++ break; ++ } + } + } + +@@ -376,6 +408,24 @@ int phy_device_register(struct phy_device *phydev) + EXPORT_SYMBOL(phy_device_register); + + /** ++ * phy_device_remove - Remove a previously registered phy device from the MDIO bus ++ * @phydev: phy_device structure to remove ++ * ++ * This doesn't free the phy_device itself, it merely reverses the effects ++ * of phy_device_register(). Use phy_device_free() to free the device ++ * after calling this function. ++ */ ++void phy_device_remove(struct phy_device *phydev) ++{ ++ struct mii_bus *bus = phydev->bus; ++ int addr = phydev->addr; ++ ++ device_del(&phydev->dev); ++ bus->phy_map[addr] = NULL; ++} ++EXPORT_SYMBOL(phy_device_remove); ++ ++/** + * phy_find_first - finds the first PHY device on the bus + * @bus: the target MII bus + */ +diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c +index 45483fd..badcf24 100644 +--- a/drivers/net/phy/realtek.c ++++ b/drivers/net/phy/realtek.c +@@ -22,8 +22,12 @@ + #define RTL821x_INER 0x12 + #define RTL821x_INER_INIT 0x6400 + #define RTL821x_INSR 0x13 ++#define RTL8211E_INER_LINK_STATUS 0x400 + +-#define RTL8211E_INER_LINK_STATUS 0x400 ++#define RTL8211F_INER_LINK_STATUS 0x0010 ++#define RTL8211F_INSR 0x1d ++#define RTL8211F_PAGE_SELECT 0x1f ++#define RTL8211F_TX_DELAY 0x100 + + MODULE_DESCRIPTION("Realtek PHY driver"); + MODULE_AUTHOR("Johnson Leung"); +@@ -38,6 +42,18 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev) + return (err < 0) ? err : 0; + } + ++static int rtl8211f_ack_interrupt(struct phy_device *phydev) ++{ ++ int err; ++ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43); ++ err = phy_read(phydev, RTL8211F_INSR); ++ /* restore to default page 0 */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); ++ ++ return (err < 0) ? err : 0; ++} ++ + static int rtl8211b_config_intr(struct phy_device *phydev) + { + int err; +@@ -64,6 +80,41 @@ static int rtl8211e_config_intr(struct phy_device *phydev) + return err; + } + ++static int rtl8211f_config_intr(struct phy_device *phydev) ++{ ++ int err; ++ ++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) ++ err = phy_write(phydev, RTL821x_INER, ++ RTL8211F_INER_LINK_STATUS); ++ else ++ err = phy_write(phydev, RTL821x_INER, 0); ++ ++ return err; ++} ++ ++static int rtl8211f_config_init(struct phy_device *phydev) ++{ ++ int ret; ++ u16 reg; ++ ++ ret = genphy_config_init(phydev); ++ if (ret < 0) ++ return ret; ++ ++ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { ++ /* enable TXDLY */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); ++ reg = phy_read(phydev, 0x11); ++ reg |= RTL8211F_TX_DELAY; ++ phy_write(phydev, 0x11, reg); ++ /* restore to default page 0 */ ++ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); ++ } ++ ++ return 0; ++} ++ + static struct phy_driver realtek_drvs[] = { + { + .phy_id = 0x00008201, +@@ -86,6 +137,19 @@ static struct phy_driver realtek_drvs[] = { + .config_intr = &rtl8211b_config_intr, + .driver = { .owner = THIS_MODULE,}, + }, { ++ .phy_id = 0x001cc914, ++ .name = "RTL8211DN Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .config_aneg = genphy_config_aneg, ++ .read_status = genphy_read_status, ++ .ack_interrupt = rtl821x_ack_interrupt, ++ .config_intr = rtl8211e_config_intr, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .driver = { .owner = THIS_MODULE,}, ++ }, { + .phy_id = 0x001cc915, + .name = "RTL8211E Gigabit Ethernet", + .phy_id_mask = 0x001fffff, +@@ -98,6 +162,20 @@ static struct phy_driver realtek_drvs[] = { + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, ++ }, { ++ .phy_id = 0x001cc916, ++ .name = "RTL8211F Gigabit Ethernet", ++ .phy_id_mask = 0x001fffff, ++ .features = PHY_GBIT_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .config_aneg = &genphy_config_aneg, ++ .config_init = &rtl8211f_config_init, ++ .read_status = &genphy_read_status, ++ .ack_interrupt = &rtl8211f_ack_interrupt, ++ .config_intr = &rtl8211f_config_intr, ++ .suspend = genphy_suspend, ++ .resume = genphy_resume, ++ .driver = { .owner = THIS_MODULE }, + }, + }; + +@@ -116,7 +194,9 @@ module_exit(realtek_exit); + + static struct mdio_device_id __maybe_unused realtek_tbl[] = { + { 0x001cc912, 0x001fffff }, ++ { 0x001cc914, 0x001fffff }, + { 0x001cc915, 0x001fffff }, ++ { 0x001cc916, 0x001fffff }, + { } + }; + +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 469d2b7..210c876 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -32,8 +32,8 @@ + + LIST_HEAD(aliases_lookup); + +-struct device_node *of_allnodes; +-EXPORT_SYMBOL(of_allnodes); ++struct device_node *of_root; ++EXPORT_SYMBOL(of_root); + struct device_node *of_chosen; + struct device_node *of_aliases; + struct device_node *of_stdout; +@@ -48,7 +48,7 @@ struct kset *of_kset; + */ + DEFINE_MUTEX(of_mutex); + +-/* use when traversing tree through the allnext, child, sibling, ++/* use when traversing tree through the child, sibling, + * or parent members of struct device_node. + */ + DEFINE_RAW_SPINLOCK(devtree_lock); +@@ -204,7 +204,7 @@ static int __init of_init(void) + mutex_unlock(&of_mutex); + + /* Symlink in /proc as required by userspace ABI */ +- if (of_allnodes) ++ if (of_root) + proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); + + return 0; +@@ -245,6 +245,23 @@ struct property *of_find_property(const struct device_node *np, + } + EXPORT_SYMBOL(of_find_property); + ++struct device_node *__of_find_all_nodes(struct device_node *prev) ++{ ++ struct device_node *np; ++ if (!prev) { ++ np = of_root; ++ } else if (prev->child) { ++ np = prev->child; ++ } else { ++ /* Walk back up looking for a sibling, or the end of the structure */ ++ np = prev; ++ while (np->parent && !np->sibling) ++ np = np->parent; ++ np = np->sibling; /* Might be null at the end of the tree */ ++ } ++ return np; ++} ++ + /** + * of_find_all_nodes - Get next node in global list + * @prev: Previous node or NULL to start iteration +@@ -259,10 +276,8 @@ struct device_node *of_find_all_nodes(struct device_node *prev) + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = prev ? prev->allnext : of_allnodes; +- for (; np != NULL; np = np->allnext) +- if (of_node_get(np)) +- break; ++ np = __of_find_all_nodes(prev); ++ of_node_get(np); + of_node_put(prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +@@ -736,7 +751,7 @@ struct device_node *of_find_node_by_path(const char *path) + unsigned long flags; + + if (strcmp(path, "/") == 0) +- return of_node_get(of_allnodes); ++ return of_node_get(of_root); + + /* The path could begin with an alias */ + if (*path != '/') { +@@ -761,7 +776,7 @@ struct device_node *of_find_node_by_path(const char *path) + /* Step down the tree matching path components */ + raw_spin_lock_irqsave(&devtree_lock, flags); + if (!np) +- np = of_node_get(of_allnodes); ++ np = of_node_get(of_root); + while (np && *path == '/') { + path++; /* Increment past '/' delimiter */ + np = __of_find_node_by_path(np, path); +@@ -790,8 +805,7 @@ struct device_node *of_find_node_by_name(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) ++ for_each_of_allnodes_from(from, np) + if (np->name && (of_node_cmp(np->name, name) == 0) + && of_node_get(np)) + break; +@@ -820,8 +834,7 @@ struct device_node *of_find_node_by_type(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) ++ for_each_of_allnodes_from(from, np) + if (np->type && (of_node_cmp(np->type, type) == 0) + && of_node_get(np)) + break; +@@ -852,12 +865,10 @@ struct device_node *of_find_compatible_node(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) + if (__of_device_is_compatible(np, compatible, type, NULL) && + of_node_get(np)) + break; +- } + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +@@ -884,8 +895,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) { + for (pp = np->properties; pp; pp = pp->next) { + if (of_prop_cmp(pp->name, prop_name) == 0) { + of_node_get(np); +@@ -967,8 +977,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from, + *match = NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- np = from ? from->allnext : of_allnodes; +- for (; np; np = np->allnext) { ++ for_each_of_allnodes_from(from, np) { + m = __of_match_node(matches, np); + if (m && of_node_get(np)) { + if (match) +@@ -1025,7 +1034,7 @@ struct device_node *of_find_node_by_phandle(phandle handle) + return NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); +- for (np = of_allnodes; np; np = np->allnext) ++ for_each_of_allnodes(np) + if (np->phandle == handle) + break; + of_node_get(np); +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c +index d499417..d43f305 100644 +--- a/drivers/of/dynamic.c ++++ b/drivers/of/dynamic.c +@@ -117,8 +117,6 @@ void __of_attach_node(struct device_node *np) + + np->child = NULL; + np->sibling = np->parent->child; +- np->allnext = np->parent->allnext; +- np->parent->allnext = np; + np->parent->child = np; + of_node_clear_flag(np, OF_DETACHED); + } +@@ -154,17 +152,6 @@ void __of_detach_node(struct device_node *np) + if (WARN_ON(!parent)) + return; + +- if (of_allnodes == np) +- of_allnodes = np->allnext; +- else { +- struct device_node *prev; +- for (prev = of_allnodes; +- prev->allnext != np; +- prev = prev->allnext) +- ; +- prev->allnext = np->allnext; +- } +- + if (parent->child == np) + parent->child = np->sibling; + else { +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c +index d134710..f6eda02 100644 +--- a/drivers/of/fdt.c ++++ b/drivers/of/fdt.c +@@ -145,15 +145,15 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, + * @mem: Memory chunk to use for allocating device nodes and properties + * @p: pointer to node in flat tree + * @dad: Parent struct device_node +- * @allnextpp: pointer to ->allnext from last allocated device_node + * @fpsize: Size of the node path up at the current depth. + */ + static void * unflatten_dt_node(void *blob, + void *mem, + int *poffset, + struct device_node *dad, +- struct device_node ***allnextpp, +- unsigned long fpsize) ++ struct device_node **nodepp, ++ unsigned long fpsize, ++ bool dryrun) + { + const __be32 *p; + struct device_node *np; +@@ -200,7 +200,7 @@ static void * unflatten_dt_node(void *blob, + + np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, + __alignof__(struct device_node)); +- if (allnextpp) { ++ if (!dryrun) { + char *fn; + of_node_init(np); + np->full_name = fn = ((char *)np) + sizeof(*np); +@@ -222,8 +222,6 @@ static void * unflatten_dt_node(void *blob, + memcpy(fn, pathp, l); + + prev_pp = &np->properties; +- **allnextpp = np; +- *allnextpp = &np->allnext; + if (dad != NULL) { + np->parent = dad; + /* we temporarily use the next field as `last_child'*/ +@@ -254,7 +252,7 @@ static void * unflatten_dt_node(void *blob, + has_name = 1; + pp = unflatten_dt_alloc(&mem, sizeof(struct property), + __alignof__(struct property)); +- if (allnextpp) { ++ if (!dryrun) { + /* We accept flattened tree phandles either in + * ePAPR-style "phandle" properties, or the + * legacy "linux,phandle" properties. If both +@@ -296,7 +294,7 @@ static void * unflatten_dt_node(void *blob, + sz = (pa - ps) + 1; + pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, + __alignof__(struct property)); +- if (allnextpp) { ++ if (!dryrun) { + pp->name = "name"; + pp->length = sz; + pp->value = pp + 1; +@@ -308,7 +306,7 @@ static void * unflatten_dt_node(void *blob, + (char *)pp->value); + } + } +- if (allnextpp) { ++ if (!dryrun) { + *prev_pp = NULL; + np->name = of_get_property(np, "name", NULL); + np->type = of_get_property(np, "device_type", NULL); +@@ -324,11 +322,13 @@ static void * unflatten_dt_node(void *blob, + if (depth < 0) + depth = 0; + while (*poffset > 0 && depth > old_depth) +- mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp, +- fpsize); ++ mem = unflatten_dt_node(blob, mem, poffset, np, NULL, ++ fpsize, dryrun); + + if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) + pr_err("unflatten: error %d processing FDT\n", *poffset); ++ if (nodepp) ++ *nodepp = np; + + return mem; + } +@@ -352,7 +352,6 @@ static void __unflatten_device_tree(void *blob, + unsigned long size; + int start; + void *mem; +- struct device_node **allnextp = mynodes; + + pr_debug(" -> unflatten_device_tree()\n"); + +@@ -373,7 +372,7 @@ static void __unflatten_device_tree(void *blob, + + /* First pass, scan for size */ + start = 0; +- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0); ++ size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true); + size = ALIGN(size, 4); + + pr_debug(" size is %lx, allocating...\n", size); +@@ -388,11 +387,10 @@ static void __unflatten_device_tree(void *blob, + + /* Second pass, do actual unflattening */ + start = 0; +- unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); ++ unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false); + if (be32_to_cpup(mem + size) != 0xdeadbeef) + pr_warning("End of tree marker overwritten: %08x\n", + be32_to_cpup(mem + size)); +- *allnextp = NULL; + + pr_debug(" <- unflatten_device_tree()\n"); + } +@@ -1039,7 +1037,7 @@ bool __init early_init_dt_scan(void *params) + */ + void __init unflatten_device_tree(void) + { +- __unflatten_device_tree(initial_boot_params, &of_allnodes, ++ __unflatten_device_tree(initial_boot_params, &of_root, + early_init_dt_alloc_memory_arch); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ +diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c +index 36b4035..d2acae8 100644 +--- a/drivers/of/pdt.c ++++ b/drivers/of/pdt.c +@@ -25,8 +25,7 @@ + + static struct of_pdt_ops *of_pdt_prom_ops __initdata; + +-void __initdata (*of_pdt_build_more)(struct device_node *dp, +- struct device_node ***nextp); ++void __initdata (*of_pdt_build_more)(struct device_node *dp); + + #if defined(CONFIG_SPARC) + unsigned int of_pdt_unique_id __initdata; +@@ -192,8 +191,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, + } + + static struct device_node * __init of_pdt_build_tree(struct device_node *parent, +- phandle node, +- struct device_node ***nextp) ++ phandle node) + { + struct device_node *ret = NULL, *prev_sibling = NULL; + struct device_node *dp; +@@ -210,16 +208,12 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, + ret = dp; + prev_sibling = dp; + +- *(*nextp) = dp; +- *nextp = &dp->allnext; +- + dp->full_name = of_pdt_build_full_name(dp); + +- dp->child = of_pdt_build_tree(dp, +- of_pdt_prom_ops->getchild(node), nextp); ++ dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); + + if (of_pdt_build_more) +- of_pdt_build_more(dp, nextp); ++ of_pdt_build_more(dp); + + node = of_pdt_prom_ops->getsibling(node); + } +@@ -234,20 +228,17 @@ static void * __init kernel_tree_alloc(u64 size, u64 align) + + void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops) + { +- struct device_node **nextp; +- + BUG_ON(!ops); + of_pdt_prom_ops = ops; + +- of_allnodes = of_pdt_create_node(root_node, NULL); ++ of_root = of_pdt_create_node(root_node, NULL); + #if defined(CONFIG_SPARC) +- of_allnodes->path_component_name = ""; ++ of_root->path_component_name = ""; + #endif +- of_allnodes->full_name = "/"; ++ of_root->full_name = "/"; + +- nextp = &of_allnodes->allnext; +- of_allnodes->child = of_pdt_build_tree(of_allnodes, +- of_pdt_prom_ops->getchild(of_allnodes->phandle), &nextp); ++ of_root->child = of_pdt_build_tree(of_root, ++ of_pdt_prom_ops->getchild(of_root->phandle)); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ + of_alias_scan(kernel_tree_alloc); +diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c +index e2d79af..e40089e 100644 +--- a/drivers/of/selftest.c ++++ b/drivers/of/selftest.c +@@ -148,7 +148,7 @@ static void __init of_selftest_dynamic(void) + + static int __init of_selftest_check_node_linkage(struct device_node *np) + { +- struct device_node *child, *allnext_index = np; ++ struct device_node *child; + int count = 0, rc; + + for_each_child_of_node(np, child) { +@@ -158,14 +158,6 @@ static int __init of_selftest_check_node_linkage(struct device_node *np) + return -EINVAL; + } + +- while (allnext_index && allnext_index != child) +- allnext_index = allnext_index->allnext; +- if (allnext_index != child) { +- pr_err("Node %s is ordered differently in sibling and allnode lists\n", +- child->name); +- return -EINVAL; +- } +- + rc = of_selftest_check_node_linkage(child); + if (rc < 0) + return rc; +@@ -180,12 +172,12 @@ static void __init of_selftest_check_tree_linkage(void) + struct device_node *np; + int allnode_count = 0, child_count; + +- if (!of_allnodes) ++ if (!of_root) + return; + + for_each_of_allnodes(np) + allnode_count++; +- child_count = of_selftest_check_node_linkage(of_allnodes); ++ child_count = of_selftest_check_node_linkage(of_root); + + selftest(child_count > 0, "Device node data structure is corrupted\n"); + selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match" +@@ -775,33 +767,29 @@ static void update_node_properties(struct device_node *np, + */ + static int attach_node_and_children(struct device_node *np) + { +- struct device_node *next, *root = np, *dup; ++ struct device_node *next, *dup, *child; + +- /* skip root node */ +- np = np->child; +- /* storing a copy in temporary node */ +- dup = np; ++ dup = of_find_node_by_path(np->full_name); ++ if (dup) { ++ update_node_properties(np, dup); ++ return 0; ++ } + +- while (dup) { ++ /* Children of the root need to be remembered for removal */ ++ if (np->parent == of_root) { + if (WARN_ON(last_node_index >= NO_OF_NODES)) + return -EINVAL; +- nodes[last_node_index++] = dup; +- dup = dup->sibling; ++ nodes[last_node_index++] = np; + } +- dup = NULL; + +- while (np) { +- next = np->allnext; +- dup = of_find_node_by_path(np->full_name); +- if (dup) +- update_node_properties(np, dup); +- else { +- np->child = NULL; +- if (np->parent == root) +- np->parent = of_allnodes; +- of_attach_node(np); +- } +- np = next; ++ child = np->child; ++ np->child = NULL; ++ np->sibling = NULL; ++ of_attach_node(np); ++ while (child) { ++ next = child->sibling; ++ attach_node_and_children(child); ++ child = next; + } + + return 0; +@@ -846,10 +834,10 @@ static int __init selftest_data_add(void) + return -EINVAL; + } + +- if (!of_allnodes) { ++ if (!of_root) { + /* enabling flag for removing nodes */ + selftest_live_tree = true; +- of_allnodes = selftest_data_node; ++ of_root = selftest_data_node; + + for_each_of_allnodes(np) + __of_attach_node_sysfs(np); +@@ -859,7 +847,14 @@ static int __init selftest_data_add(void) + } + + /* attach the sub-tree to live tree */ +- return attach_node_and_children(selftest_data_node); ++ np = selftest_data_node->child; ++ while (np) { ++ struct device_node *next = np->sibling; ++ np->parent = of_root; ++ attach_node_and_children(np); ++ np = next; ++ } ++ return 0; + } + + /** +@@ -889,10 +884,10 @@ static void selftest_data_remove(void) + of_node_put(of_chosen); + of_aliases = NULL; + of_chosen = NULL; +- for_each_child_of_node(of_allnodes, np) ++ for_each_child_of_node(of_root, np) + detach_node_and_children(np); +- __of_detach_node_sysfs(of_allnodes); +- of_allnodes = NULL; ++ __of_detach_node_sysfs(of_root); ++ of_root = NULL; + return; + } + +diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile +index e04fe2d..e9815ac 100644 +--- a/drivers/pci/Makefile ++++ b/drivers/pci/Makefile +@@ -35,6 +35,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o + # + obj-$(CONFIG_ALPHA) += setup-irq.o + obj-$(CONFIG_ARM) += setup-irq.o ++obj-$(CONFIG_ARM64) += setup-irq.o + obj-$(CONFIG_UNICORE32) += setup-irq.o + obj-$(CONFIG_SUPERH) += setup-irq.o + obj-$(CONFIG_MIPS) += setup-irq.o +diff --git a/drivers/pci/access.c b/drivers/pci/access.c +index 7f249b9..b965c12 100644 +--- a/drivers/pci/access.c ++++ b/drivers/pci/access.c +@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); + EXPORT_SYMBOL(pci_bus_write_config_word); + EXPORT_SYMBOL(pci_bus_write_config_dword); + ++int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where); ++ if (!addr) { ++ *val = ~0; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ if (size == 1) ++ *val = readb(addr); ++ else if (size == 2) ++ *val = readw(addr); ++ else ++ *val = readl(addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_read); ++ ++int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where); ++ if (!addr) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 1) ++ writeb(val, addr); ++ else if (size == 2) ++ writew(val, addr); ++ else ++ writel(val, addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_write); ++ ++int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ void __iomem *addr; ++ ++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); ++ if (!addr) { ++ *val = ~0; ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ *val = readl(addr); ++ ++ if (size <= 2) ++ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_read32); ++ ++int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ void __iomem *addr; ++ u32 mask, tmp; ++ ++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); ++ if (!addr) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (size == 4) { ++ writel(val, addr); ++ return PCIBIOS_SUCCESSFUL; ++ } else { ++ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); ++ } ++ ++ tmp = readl(addr) & mask; ++ tmp |= val << ((where & 0x3) * 8); ++ writel(tmp, addr); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++EXPORT_SYMBOL_GPL(pci_generic_config_write32); ++ + /** + * pci_bus_set_ops - Set raw operations of pci bus + * @bus: pci bus struct +diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig +index 96586b1..dafa3dc 100644 +--- a/drivers/pci/host/Kconfig ++++ b/drivers/pci/host/Kconfig +@@ -50,7 +50,7 @@ config PCI_RCAR_GEN2_PCIE + + config PCI_HOST_GENERIC + bool "Generic PCI host controller" +- depends on ARM && OF ++ depends on (ARM || ARM64) && OF + help + Say Y here if you want to support a simple generic PCI host + controller, such as the one emulated by kvmtool. +diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c +index d491b0f..baa1232 100644 +--- a/drivers/pci/host/pci-layerscape.c ++++ b/drivers/pci/host/pci-layerscape.c +@@ -36,12 +36,21 @@ + #define LTSSM_PCIE_L0 0x11 /* L0 state */ + #define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ + ++#define PCIE_SRIOV_OFFSET 0x178 ++ ++/* CS2 */ ++#define PCIE_CS2_OFFSET 0x1000 /* For PCIe without SR-IOV */ ++#define PCIE_ENABLE_CS2 0x80000000 /* For PCIe with SR-IOV */ ++ + /* PEX Internal Configuration Registers */ + #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ + #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ ++#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ ++#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ + + /* PEX LUT registers */ + #define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ ++#define PCIE_LUT_CTRL0 0x7f8 + #define PCIE_LUT_UDR(n) (0x800 + (n) * 8) + #define PCIE_LUT_LDR(n) (0x804 + (n) * 8) + #define PCIE_LUT_MASK_ALL 0xffff +@@ -72,6 +81,8 @@ + #define CPLD_RST_PCIE_SLOT 0x14 + #define CPLD_RST_PCIESLOT 0x3 + ++#define PCIE_IATU_NUM 6 ++ + struct ls_pcie; + + struct ls_pcie_pm_data { +@@ -111,6 +122,8 @@ struct ls_pcie { + + #define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) + ++static void ls_pcie_host_init(struct pcie_port *pp); ++ + u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) + { + u32 index, streamid; +@@ -163,6 +176,42 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) + iowrite32(val, pcie->dbi + PCIE_STRFMR1); + } + ++/* Disable all bars in RC mode */ ++static void ls_pcie_disable_bars(struct ls_pcie *pcie) ++{ ++ u32 header; ++ ++ header = ioread32(pcie->dbi + PCIE_SRIOV_OFFSET); ++ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_SRIOV) { ++ iowrite32(PCIE_ENABLE_CS2, pcie->lut + PCIE_LUT_CTRL0); ++ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_0); ++ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_1); ++ iowrite32(0, pcie->dbi + PCI_ROM_ADDRESS1); ++ iowrite32(0, pcie->lut + PCIE_LUT_CTRL0); ++ } else { ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); ++ iowrite32(0, ++ pcie->dbi + PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); ++ } ++} ++ ++static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) ++{ ++ int i; ++ ++ for (i = 0; i < PCIE_IATU_NUM; i++) ++ dw_pcie_disable_outbound_atu(&pcie->pp, i); ++} ++ ++/* Forward error response of outbound non-posted requests */ ++static void ls_pcie_fix_error_response(struct ls_pcie *pcie) ++{ ++ iowrite32(PCIE_ABSERR_SETTING, pcie->dbi + PCIE_ABSERR); ++} ++ + static int ls1021_pcie_link_up(struct pcie_port *pp) + { + u32 state; +@@ -272,19 +321,24 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) + } + pcie->index = index[1]; + +- dw_pcie_setup_rc(pp); ++ ls_pcie_host_init(pp); + +- ls_pcie_drop_msg_tlp(pcie); ++ dw_pcie_setup_rc(pp); + } + + static int ls_pcie_link_up(struct pcie_port *pp) + { + struct ls_pcie *pcie = to_ls_pcie(pp); +- u32 state; ++ u32 state, offset; + +- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> +- pcie->drvdata->ltssm_shift) & +- LTSSM_STATE_MASK; ++ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) ++ offset = 0x407fc; ++ else ++ offset = PCIE_LUT_DBG; ++ ++ state = (ioread32(pcie->lut + offset) >> ++ pcie->drvdata->ltssm_shift) & ++ LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; +@@ -308,6 +362,10 @@ static void ls_pcie_host_init(struct pcie_port *pp) + ls_pcie_clear_multifunction(pcie); + ls_pcie_drop_msg_tlp(pcie); + iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); ++ ++ ls_pcie_disable_bars(pcie); ++ ls_pcie_disable_outbound_atus(pcie); ++ ls_pcie_fix_error_response(pcie); + } + + static int ls_pcie_msi_host_init(struct pcie_port *pp, +@@ -426,6 +484,11 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, + + pp = &pcie->pp; + ++ if (dw_pcie_link_up(&pcie->pp)) ++ pcie->in_slot = true; ++ else ++ pcie->in_slot = false; ++ + pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); + if (pcie->pme_irq < 0) { + dev_err(&pdev->dev, +@@ -462,11 +525,6 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, + val |= PCIE_PEX_RCR_PMEIE; + iowrite16(val, pcie->dbi + PCIE_PEX_RCR); + +- if (dw_pcie_link_up(&pcie->pp)) +- pcie->in_slot = true; +- else +- pcie->in_slot = false; +- + return 0; + } + +@@ -590,12 +648,14 @@ static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) + u32 state; + int i = 0; + u16 val; +- +- ls_pcie_host_init(&pcie->pp); ++ struct pcie_port *pp = &pcie->pp; + + if (!pcie->in_slot) + return 0; + ++ dw_pcie_setup_rc(pp); ++ ls_pcie_host_init(pp); ++ + /* Put RC in D0 */ + val = ioread16(pcie->dbi + PCIE_PM_SCR); + val &= PCIE_PM_SCR_PMEPS_D0; +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c +index 8a9241b..0961ffc 100644 +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -159,6 +159,13 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + } + ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index) ++{ ++ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, ++ PCIE_ATU_VIEWPORT); ++ dw_pcie_writel_rc(pp, 0, PCIE_ATU_CR2); ++} ++ + int dw_pcie_link_up(struct pcie_port *pp) + { + if (pp->ops->link_up) +@@ -495,6 +502,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) + u32 membase; + u32 memlimit; + ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ PCIE_ATU_TYPE_IO, pp->io_base, ++ pp->io_bus_addr, pp->io_size); ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, ++ PCIE_ATU_TYPE_MEM, pp->mem_base, ++ pp->mem_bus_addr, pp->mem_size); ++ + /* set the number of lanes */ + dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); + val &= ~PORT_LINK_MODE_MASK; +diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h +index 2f01284..fcd6431 100644 +--- a/drivers/pci/host/pcie-designware.h ++++ b/drivers/pci/host/pcie-designware.h +@@ -80,5 +80,6 @@ void dw_pcie_msi_init(struct pcie_port *pp); + int dw_pcie_link_up(struct pcie_port *pp); + void dw_pcie_setup_rc(struct pcie_port *pp); + int dw_pcie_host_init(struct pcie_port *pp); ++void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); + + #endif /* _PCIE_DESIGNWARE_H */ +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 5dd4c96..5e64d37 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -667,11 +667,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) + { + resource_size_t phys_addr; + u32 table_offset; ++ unsigned long flags; + u8 bir; + + pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); ++ flags = pci_resource_flags(dev, bir); ++ if (!flags || (flags & IORESOURCE_UNSET)) ++ return NULL; ++ + table_offset &= PCI_MSIX_TABLE_OFFSET; + phys_addr = pci_resource_start(dev, bir) + table_offset; + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index ce0aa47..a6783a5 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -2467,6 +2467,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) + *pinp = pin; + return PCI_SLOT(dev->devfn); + } ++EXPORT_SYMBOL_GPL(pci_common_swizzle); + + /** + * pci_release_region - Release a PCI bar +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c +index 2f0ce66..95ef171 100644 +--- a/drivers/pci/pcie/portdrv_core.c ++++ b/drivers/pci/pcie/portdrv_core.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include "../pci.h" + #include "portdrv.h" +@@ -199,6 +200,28 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) + static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) + { + int i, irq = -1; ++ int ret; ++ struct device_node *np = NULL; ++ ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) ++ irqs[i] = 0; ++ ++ if (dev->bus->dev.of_node) ++ np = dev->bus->dev.of_node; ++ ++ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, ++ * request irq for aer ++ */ ++ if (IS_ENABLED(CONFIG_OF_IRQ) && np && ++ (mask & PCIE_PORT_SERVICE_PME)) { ++ ret = of_irq_get_byname(np, "aer"); ++ if (ret > 0) { ++ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; ++ if (dev->irq) ++ irq = dev->irq; ++ goto no_msi; ++ } ++ } + + /* + * If MSI cannot be used for PCIe PME or hotplug, we have to use +@@ -224,11 +247,13 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) + irq = dev->irq; + + no_msi: +- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) +- irqs[i] = irq; ++ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { ++ if (!irqs[i]) ++ irqs[i] = irq; ++ } + irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; + +- if (irq < 0) ++ if (irq < 0 && irqs[PCIE_PORT_SERVICE_AER_SHIFT] < 0) + return -ENODEV; + return 0; + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 6bdeb75..0b16384 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -2024,6 +2024,7 @@ err_out: + kfree(b); + return NULL; + } ++EXPORT_SYMBOL_GPL(pci_create_root_bus); + + int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) + { +diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c +index 8bd76c9..8a280e9 100644 +--- a/drivers/pci/remove.c ++++ b/drivers/pci/remove.c +@@ -139,6 +139,7 @@ void pci_stop_root_bus(struct pci_bus *bus) + /* stop the host bridge */ + device_release_driver(&host_bridge->dev); + } ++EXPORT_SYMBOL_GPL(pci_stop_root_bus); + + void pci_remove_root_bus(struct pci_bus *bus) + { +@@ -158,3 +159,4 @@ void pci_remove_root_bus(struct pci_bus *bus) + /* remove the host bridge */ + device_unregister(&host_bridge->dev); + } ++EXPORT_SYMBOL_GPL(pci_remove_root_bus); +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c +index e3e17f3..8169597 100644 +--- a/drivers/pci/setup-bus.c ++++ b/drivers/pci/setup-bus.c +@@ -1750,3 +1750,4 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) + __pci_bus_assign_resources(bus, &add_list, NULL); + BUG_ON(!list_empty(&add_list)); + } ++EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources); +diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c +index 4e2d595..95c225b 100644 +--- a/drivers/pci/setup-irq.c ++++ b/drivers/pci/setup-irq.c +@@ -65,3 +65,4 @@ void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), + for_each_pci_dev(dev) + pdev_fixup_irq(dev, swizzle, map_irq); + } ++EXPORT_SYMBOL_GPL(pci_fixup_irqs); +diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig +index 76d6bd4..d4bcacf 100644 +--- a/drivers/soc/Kconfig ++++ b/drivers/soc/Kconfig +@@ -4,4 +4,17 @@ source "drivers/soc/qcom/Kconfig" + source "drivers/soc/ti/Kconfig" + source "drivers/soc/versatile/Kconfig" + ++config FSL_SOC_DRIVERS ++ bool "Freescale Soc Drivers" ++ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE ++ default n ++ help ++ Say y here to enable Freescale Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on Freescale platform. ++ ++if FSL_SOC_DRIVERS ++ source "drivers/soc/fsl/Kconfig" ++endif ++ + endmenu +diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile +index 063113d..ef82e45 100644 +--- a/drivers/soc/Makefile ++++ b/drivers/soc/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_ARCH_QCOM) += qcom/ + obj-$(CONFIG_ARCH_TEGRA) += tegra/ + obj-$(CONFIG_SOC_TI) += ti/ + obj-$(CONFIG_PLAT_VERSATILE) += versatile/ ++obj-$(CONFIG_FSL_SOC_DRIVERS) += fsl/ +diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig +new file mode 100644 +index 0000000..92a085e +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig +@@ -0,0 +1,6 @@ ++config FSL_GUTS ++ bool ++ ++if ARM || ARM64 ++source "drivers/soc/fsl/Kconfig.arm" ++endif +diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm +new file mode 100644 +index 0000000..5f2d214 +--- /dev/null ++++ b/drivers/soc/fsl/Kconfig.arm +@@ -0,0 +1,25 @@ ++# ++# Freescale ARM SOC Drivers ++# ++ ++config LS1_SOC_DRIVERS ++ bool "LS1021A Soc Drivers" ++ depends on SOC_LS1021A ++ default n ++ help ++ Say y here to enable Freescale LS1021A Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on LS1021A platform. ++ ++config LS_SOC_DRIVERS ++ bool "Layerscape Soc Drivers" ++ depends on ARCH_LAYERSCAPE ++ default n ++ help ++ Say y here to enable Freescale Layerscape Soc Device Drivers support. ++ The Soc Drivers provides the device driver that is a specific block ++ or feature on Layerscape platform. ++ ++if LS1_SOC_DRIVERS ++ source "drivers/soc/fsl/ls1/Kconfig" ++endif +diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile +new file mode 100644 +index 0000000..9fc17b3 +--- /dev/null ++++ b/drivers/soc/fsl/Makefile +@@ -0,0 +1,6 @@ ++# ++# Makefile for Freescale Soc specific device drivers. ++# ++ ++obj-$(CONFIG_LS1_SOC_DRIVERS) += ls1/ ++obj-$(CONFIG_FSL_GUTS) += guts.o +diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c +new file mode 100644 +index 0000000..11065c2 +--- /dev/null ++++ b/drivers/soc/fsl/guts.c +@@ -0,0 +1,123 @@ ++/* ++ * Freescale QorIQ Platforms GUTS Driver ++ * ++ * Copyright (C) 2016 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct guts { ++ struct ccsr_guts __iomem *regs; ++ bool little_endian; ++}; ++ ++static struct guts *guts; ++ ++u32 guts_get_svr(void) ++{ ++ u32 svr = 0; ++ ++ if ((!guts) || (!(guts->regs))) { ++#ifdef CONFIG_PPC ++ svr = mfspr(SPRN_SVR); ++#endif ++ return svr; ++ } ++ ++ if (guts->little_endian) ++ svr = ioread32(&guts->regs->svr); ++ else ++ svr = ioread32be(&guts->regs->svr); ++ ++ return svr; ++} ++EXPORT_SYMBOL_GPL(guts_get_svr); ++ ++static int guts_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ ++ guts = kzalloc(sizeof(*guts), GFP_KERNEL); ++ if (!guts) ++ return -ENOMEM; ++ ++ if (of_property_read_bool(np, "little-endian")) ++ guts->little_endian = true; ++ else ++ guts->little_endian = false; ++ ++ guts->regs = of_iomap(np, 0); ++ if (!(guts->regs)) ++ return -ENOMEM; ++ ++ of_node_put(np); ++ return 0; ++} ++ ++static int guts_remove(struct platform_device *pdev) ++{ ++ iounmap(guts->regs); ++ kfree(guts); ++ return 0; ++} ++ ++/* ++ * Table for matching compatible strings, for device tree ++ * guts node, for Freescale QorIQ SOCs. ++ */ ++static const struct of_device_id guts_of_match[] = { ++ /* For T4 & B4 SOCs */ ++ { .compatible = "fsl,qoriq-device-config-1.0", }, ++ /* For P Series SOCs */ ++ { .compatible = "fsl,qoriq-device-config-2.0", }, ++ { .compatible = "fsl,p1010-guts", }, ++ { .compatible = "fsl,p1020-guts", }, ++ { .compatible = "fsl,p1021-guts", }, ++ { .compatible = "fsl,p1022-guts", }, ++ { .compatible = "fsl,p1023-guts", }, ++ { .compatible = "fsl,p2020-guts", }, ++ /* For BSC Series SOCs */ ++ { .compatible = "fsl,bsc9131-guts", }, ++ { .compatible = "fsl,bsc9132-guts", }, ++ /* For Layerscape Series SOCs */ ++ { .compatible = "fsl,ls1021a-dcfg", }, ++ { .compatible = "fsl,ls1043a-dcfg", }, ++ { .compatible = "fsl,ls2080a-dcfg", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, guts_of_match); ++ ++static struct platform_driver guts_driver = { ++ .driver = { ++ .name = "fsl-guts", ++ .of_match_table = guts_of_match, ++ }, ++ .probe = guts_probe, ++ .remove = guts_remove, ++}; ++ ++static int __init guts_drv_init(void) ++{ ++ return platform_driver_register(&guts_driver); ++} ++subsys_initcall(guts_drv_init); ++ ++static void __exit guts_drv_exit(void) ++{ ++ platform_driver_unregister(&guts_driver); ++} ++module_exit(guts_drv_exit); ++ ++MODULE_AUTHOR("Freescale Semiconductor, Inc."); ++MODULE_DESCRIPTION("Freescale QorIQ Platforms GUTS Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/fsl/ls1/Kconfig b/drivers/soc/fsl/ls1/Kconfig +new file mode 100644 +index 0000000..c9b04c4 +--- /dev/null ++++ b/drivers/soc/fsl/ls1/Kconfig +@@ -0,0 +1,11 @@ ++# ++# LS-1 Soc drivers ++# ++config FTM_ALARM ++ bool "FTM alarm driver" ++ depends on SOC_LS1021A ++ default n ++ help ++ Say y here to enable FTM alarm support. The FTM alarm provides ++ alarm functions for wakeup system from deep sleep. There is only ++ one FTM can be used in ALARM(FTM 0). +diff --git a/drivers/soc/fsl/ls1/Makefile b/drivers/soc/fsl/ls1/Makefile +new file mode 100644 +index 0000000..6299aa1 +--- /dev/null ++++ b/drivers/soc/fsl/ls1/Makefile +@@ -0,0 +1 @@ ++obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o +diff --git a/drivers/soc/fsl/ls1/ftm_alarm.c b/drivers/soc/fsl/ls1/ftm_alarm.c +new file mode 100644 +index 0000000..c42b26b +--- /dev/null ++++ b/drivers/soc/fsl/ls1/ftm_alarm.c +@@ -0,0 +1,274 @@ ++/* ++ * Freescale FlexTimer Module (FTM) Alarm driver. ++ * ++ * Copyright 2014 Freescale Semiconductor, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FTM_SC 0x00 ++#define FTM_SC_CLK_SHIFT 3 ++#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) ++#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) ++#define FTM_SC_PS_MASK 0x7 ++#define FTM_SC_TOIE BIT(6) ++#define FTM_SC_TOF BIT(7) ++ ++#define FTM_SC_CLKS_FIXED_FREQ 0x02 ++ ++#define FTM_CNT 0x04 ++#define FTM_MOD 0x08 ++#define FTM_CNTIN 0x4C ++ ++#define FIXED_FREQ_CLK 32000 ++#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK) ++#define MAX_COUNT_VAL 0xffff ++ ++static void __iomem *ftm1_base; ++static u32 alarm_freq; ++static bool big_endian; ++ ++static inline u32 ftm_readl(void __iomem *addr) ++{ ++ if (big_endian) ++ return ioread32be(addr); ++ ++ return ioread32(addr); ++} ++ ++static inline void ftm_writel(u32 val, void __iomem *addr) ++{ ++ if (big_endian) ++ iowrite32be(val, addr); ++ else ++ iowrite32(val, addr); ++} ++ ++static inline void ftm_counter_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* select and enable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ)); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_counter_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ /* disable counter clock source */ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_acknowledge(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~FTM_SC_TOF; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_enable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val |= FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_irq_disable(void __iomem *base) ++{ ++ u32 val; ++ ++ val = ftm_readl(base + FTM_SC); ++ val &= ~FTM_SC_TOIE; ++ ftm_writel(val, base + FTM_SC); ++} ++ ++static inline void ftm_reset_counter(void __iomem *base) ++{ ++ /* ++ * The CNT register contains the FTM counter value. ++ * Reset clears the CNT register. Writing any value to COUNT ++ * updates the counter with its initial value, CNTIN. ++ */ ++ ftm_writel(0x00, base + FTM_CNT); ++} ++ ++static u32 time_to_cycle(unsigned long time) ++{ ++ u32 cycle; ++ ++ cycle = time * alarm_freq; ++ if (cycle > MAX_COUNT_VAL) { ++ pr_err("Out of alarm range.\n"); ++ cycle = 0; ++ } ++ ++ return cycle; ++} ++ ++static u32 cycle_to_time(u32 cycle) ++{ ++ return cycle / alarm_freq + 1; ++} ++ ++static void ftm_clean_alarm(void) ++{ ++ ftm_counter_disable(ftm1_base); ++ ++ ftm_writel(0x00, ftm1_base + FTM_CNTIN); ++ ftm_writel(~0UL, ftm1_base + FTM_MOD); ++ ++ ftm_reset_counter(ftm1_base); ++} ++ ++static int ftm_set_alarm(u64 cycle) ++{ ++ ftm_irq_disable(ftm1_base); ++ ++ /* ++ * The counter increments until the value of MOD is reached, ++ * at which point the counter is reloaded with the value of CNTIN. ++ * The TOF (the overflow flag) bit is set when the FTM counter ++ * changes from MOD to CNTIN. So we should using the cycle - 1. ++ */ ++ ftm_writel(cycle - 1, ftm1_base + FTM_MOD); ++ ++ ftm_counter_enable(ftm1_base); ++ ++ ftm_irq_enable(ftm1_base); ++ ++ return 0; ++} ++ ++static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id) ++{ ++ ftm_irq_acknowledge(ftm1_base); ++ ftm_irq_disable(ftm1_base); ++ ftm_clean_alarm(); ++ ++ return IRQ_HANDLED; ++} ++ ++static ssize_t ftm_alarm_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ u32 count, val; ++ ++ count = ftm_readl(ftm1_base + FTM_MOD); ++ val = ftm_readl(ftm1_base + FTM_CNT); ++ val = (count & MAX_COUNT_VAL) - val; ++ val = cycle_to_time(val); ++ ++ return sprintf(buf, "%u\n", val); ++} ++ ++static ssize_t ftm_alarm_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ u32 cycle; ++ unsigned long time; ++ ++ if (kstrtoul(buf, 0, &time)) ++ return -EINVAL; ++ ++ ftm_clean_alarm(); ++ ++ cycle = time_to_cycle(time); ++ if (!cycle) ++ return -EINVAL; ++ ++ ftm_set_alarm(cycle); ++ ++ return count; ++} ++ ++static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644, ++ ftm_alarm_show, ftm_alarm_store); ++ ++static int ftm_alarm_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct resource *r; ++ int irq; ++ int ret; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!r) ++ return -ENODEV; ++ ++ ftm1_base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(ftm1_base)) ++ return PTR_ERR(ftm1_base); ++ ++ irq = irq_of_parse_and_map(np, 0); ++ if (irq <= 0) { ++ pr_err("ftm: unable to get IRQ from DT, %d\n", irq); ++ return -EINVAL; ++ } ++ ++ big_endian = of_property_read_bool(np, "big-endian"); ++ ++ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt, ++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++ ++ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes); ++ if (ret) { ++ dev_err(&pdev->dev, "create sysfs fail.\n"); ++ return ret; ++ } ++ ++ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; ++ ++ ftm_clean_alarm(); ++ ++ device_init_wakeup(&pdev->dev, true); ++ ++ return ret; ++} ++ ++static const struct of_device_id ftm_alarm_match[] = { ++ { .compatible = "fsl,ftm-alarm", }, ++ { .compatible = "fsl,ftm-timer", }, ++ { }, ++}; ++ ++static struct platform_driver ftm_alarm_driver = { ++ .probe = ftm_alarm_probe, ++ .driver = { ++ .name = "ftm-alarm", ++ .owner = THIS_MODULE, ++ .of_match_table = ftm_alarm_match, ++ }, ++}; ++ ++static int __init ftm_alarm_init(void) ++{ ++ return platform_driver_register(&ftm_alarm_driver); ++} ++device_initcall(ftm_alarm_init); +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +index 27d1a91..cb52ede 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +@@ -52,11 +52,6 @@ MODULE_LICENSE("Dual BSD/GPL"); + MODULE_AUTHOR("Freescale Semiconductor, Inc"); + MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); + +-/* Oldest DPAA2 objects version we are compatible with */ +-#define DPAA2_SUPPORTED_DPNI_VERSION 6 +-#define DPAA2_SUPPORTED_DPBP_VERSION 2 +-#define DPAA2_SUPPORTED_DPCON_VERSION 2 +- + static void validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) +@@ -261,7 +256,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + priv->buf_layout.private_data_size + + sizeof(struct dpaa2_fas)); + +- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } +@@ -362,6 +357,25 @@ static int consume_frames(struct dpaa2_eth_channel *ch) + return cleaned; + } + ++/* Configure the egress frame annotation for timestamp update */ ++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *hwa_start) ++{ ++ struct dpaa2_faead *faead; ++ u32 ctrl; ++ u32 frc; ++ ++ /* Mark the egress frame annotation area as valid */ ++ frc = dpaa2_fd_get_frc(fd); ++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); ++ ++ /* enable UPD (update prepanded data) bit in FAEAD field of ++ * hardware frame annotation area ++ */ ++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; ++ faead = hwa_start + DPAA2_FAEAD_OFFSET; ++ faead->ctrl = cpu_to_le32(ctrl); ++} ++ + /* Create a frame descriptor based on a fragmented skb */ + static int build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, +@@ -369,6 +383,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + { + struct device *dev = priv->net_dev->dev.parent; + void *sgt_buf = NULL; ++ void *hwa; + dma_addr_t addr; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct dpaa2_sg_entry *sgt; +@@ -414,7 +429,8 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + * on TX confirmation. We are clearing FAS (Frame Annotation Status) + * field here. + */ +- memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); ++ hwa = sgt_buf + priv->buf_layout.private_data_size; ++ memset(hwa, 0, 8); + + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + +@@ -459,6 +475,9 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | + DPAA2_FD_CTRL_PTV1; + ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, hwa); ++ + return 0; + + dma_map_single_failed: +@@ -479,6 +498,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + u8 *buffer_start; + struct sk_buff **skbh; + dma_addr_t addr; ++ void *hwa; + + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - + DPAA2_ETH_TX_BUF_ALIGN, +@@ -487,9 +507,10 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + /* PTA from egress side is passed as is to the confirmation side so + * we need to clear some fields here in order to find consistent values + * on TX confirmation. We are clearing FAS (Frame Annotation Status) +- * field here. ++ * field here + */ +- memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); ++ hwa = buffer_start + priv->buf_layout.private_data_size; ++ memset(hwa, 0, 8); + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it +@@ -512,6 +533,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | + DPAA2_FD_CTRL_PTV1; + ++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ++ enable_tx_tstamp(fd, hwa); ++ + return 0; + } + +@@ -579,7 +603,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, + ns = (u64 *)((void *)skbh + + priv->buf_layout.private_data_size + + sizeof(struct dpaa2_fas)); +- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); ++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_tstamp_tx(skb, &shhwtstamps); + } +@@ -779,7 +803,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) + /* Allocate buffer visible to WRIOP + skb shared info + + * alignment padding + */ +- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); ++ buf = netdev_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); + if (unlikely(!buf)) + goto err_alloc; + +@@ -973,7 +997,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) + } + + if (cleaned < budget) { +- napi_complete_done(napi, cleaned); ++ napi_complete(napi); + /* Re-enable data available notifications */ + do { + err = dpaa2_io_service_rearm(NULL, &ch->nctx); +@@ -1353,7 +1377,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) + * in promisc mode, in order to avoid frame loss while we + * progressively add entries to the table. + * We don't know whether we had been in promisc already, and +- * making an MC call to find it is expensive; so set uc promisc ++ * making an MC call to find out is expensive; so set uc promisc + * nonetheless. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); +@@ -1498,48 +1522,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) + /* Update NAPI statistics */ + ch->stats.cdan++; + +- napi_schedule_irqoff(&ch->napi); +-} +- +-/* Verify that the FLIB API version of various MC objects is supported +- * by our driver +- */ +-static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) +-{ +- char *name = ls_dev->obj_desc.type; +- struct device *dev = &ls_dev->dev; +- u16 supported_version, flib_version; +- +- if (strcmp(name, "dpni") == 0) { +- flib_version = DPNI_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPNI_VERSION; +- } else if (strcmp(name, "dpbp") == 0) { +- flib_version = DPBP_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPBP_VERSION; +- } else if (strcmp(name, "dpcon") == 0) { +- flib_version = DPCON_VER_MAJOR; +- supported_version = DPAA2_SUPPORTED_DPCON_VERSION; +- } else { +- dev_err(dev, "invalid object type (%s)\n", name); +- return -EINVAL; +- } +- +- /* Check that the FLIB-defined version matches the one reported by MC */ +- if (mc_version != flib_version) { +- dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", +- name, mc_version, flib_version); +- return -EINVAL; +- } +- +- /* ... and that we actually support it */ +- if (mc_version < supported_version) { +- dev_err(dev, "Unsupported %s FLIB version (%d)\n", +- name, mc_version); +- return -EINVAL; +- } +- dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); +- +- return 0; ++ napi_schedule(&ch->napi); + } + + /* Allocate and configure a DPCON object */ +@@ -1563,16 +1546,18 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) + goto err_open; + } + ++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); ++ if (err) { ++ dev_err(dev, "dpcon_reset() failed\n"); ++ goto err_reset; ++ } ++ + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + +- err = check_obj_version(dpcon, attrs.version.major); +- if (err) +- goto err_dpcon_ver; +- + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_enable() failed\n"); +@@ -1582,8 +1567,8 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) + return dpcon; + + err_enable: +-err_dpcon_ver: + err_get_attr: ++err_reset: + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); + err_open: + fsl_mc_object_free(dpcon); +@@ -1849,6 +1834,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) + goto err_open; + } + ++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); ++ if (err) { ++ dev_err(dev, "dpbp_reset() failed\n"); ++ goto err_reset; ++ } ++ + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); +@@ -1862,16 +1853,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) + goto err_get_attr; + } + +- err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); +- if (err) +- goto err_dpbp_ver; +- + return 0; + +-err_dpbp_ver: + err_get_attr: + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); + err_enable: ++err_reset: + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); + err_open: + fsl_mc_object_free(dpbp_dev); +@@ -1911,6 +1898,12 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) + ls_dev->mc_io = priv->mc_io; + ls_dev->mc_handle = priv->mc_token; + ++ err = dpni_reset(priv->mc_io, 0, priv->mc_token); ++ if (err) { ++ dev_err(dev, "dpni_reset() failed\n"); ++ goto err_reset; ++ } ++ + /* Map a memory region which will be used by MC to pass us an + * attribute structure + */ +@@ -1940,10 +1933,6 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) + goto err_get_attr; + } + +- err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); +- if (err) +- goto err_dpni_ver; +- + memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); + err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); + if (err) { +@@ -2019,11 +2008,11 @@ err_cls_rule: + err_data_offset: + err_buf_layout: + err_extract: +-err_dpni_ver: + err_get_attr: + err_dma_map: + kfree(dma_mem); + err_alloc: ++err_reset: + dpni_close(priv->mc_io, 0, priv->mc_token); + err_open: + return err; +@@ -2157,6 +2146,131 @@ static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, + } + #endif + ++/* default hash key fields */ ++static struct dpaa2_eth_hash_fields default_hash_fields[] = { ++ { ++ /* L2 header */ ++ .rxnfc_field = RXH_L2DA, ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_DA, ++ .size = 6, ++ }, { ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_SA, ++ .size = 6, ++ }, { ++ /* This is the last ethertype field parsed: ++ * depending on frame format, it can be the MAC ethertype ++ * or the VLAN etype. ++ */ ++ .cls_prot = NET_PROT_ETH, ++ .cls_field = NH_FLD_ETH_TYPE, ++ .size = 2, ++ }, { ++ /* VLAN header */ ++ .rxnfc_field = RXH_VLAN, ++ .cls_prot = NET_PROT_VLAN, ++ .cls_field = NH_FLD_VLAN_TCI, ++ .size = 2, ++ }, { ++ /* IP header */ ++ .rxnfc_field = RXH_IP_SRC, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_SRC, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_IP_DST, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_DST, ++ .size = 4, ++ }, { ++ .rxnfc_field = RXH_L3_PROTO, ++ .cls_prot = NET_PROT_IP, ++ .cls_field = NH_FLD_IP_PROTO, ++ .size = 1, ++ }, { ++ /* Using UDP ports, this is functionally equivalent to raw ++ * byte pairs from L4 header. ++ */ ++ .rxnfc_field = RXH_L4_B_0_1, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_SRC, ++ .size = 2, ++ }, { ++ .rxnfc_field = RXH_L4_B_2_3, ++ .cls_prot = NET_PROT_UDP, ++ .cls_field = NH_FLD_UDP_PORT_DST, ++ .size = 2, ++ }, ++}; ++ ++/* Set RX hash options */ ++int set_hash(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpkg_profile_cfg cls_cfg; ++ struct dpni_rx_tc_dist_cfg dist_cfg; ++ u8 *dma_mem; ++ int i; ++ int err = 0; ++ ++ memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ struct dpkg_extract *key = ++ &cls_cfg.extracts[cls_cfg.num_extracts]; ++ ++ key->type = DPKG_EXTRACT_FROM_HDR; ++ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; ++ key->extract.from_hdr.type = DPKG_FULL_FIELD; ++ key->extract.from_hdr.field = priv->hash_fields[i].cls_field; ++ cls_cfg.num_extracts++; ++ ++ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; ++ } ++ ++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); ++ if (!dma_mem) ++ return -ENOMEM; ++ ++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_prepare_key_cfg error %d", err); ++ return err; ++ } ++ ++ memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ ++ /* Prepare for setting the rx dist */ ++ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { ++ dev_err(dev, "DMA mapping failed\n"); ++ kfree(dma_mem); ++ return -ENOMEM; ++ } ++ ++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); ++ if (dpaa2_eth_fs_enabled(priv)) { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; ++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; ++ } else { ++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ dma_unmap_single(dev, dist_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ kfree(dma_mem); ++ if (err) { ++ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); ++ return err; ++ } ++ ++ return 0; ++} ++ + /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ +@@ -2179,15 +2293,22 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) + return err; + } + +- check_fs_support(net_dev); ++ /* Verify classification options and disable hashing and/or ++ * flow steering support in case of invalid configuration values ++ */ ++ check_cls_support(priv); + +- /* have the interface implicitly distribute traffic based on supported +- * header fields ++ /* have the interface implicitly distribute traffic based on ++ * a static hash key + */ + if (dpaa2_eth_hash_enabled(priv)) { +- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); +- if (err) ++ priv->hash_fields = default_hash_fields; ++ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); ++ err = set_hash(priv); ++ if (err) { ++ dev_err(dev, "Hashing configuration failed\n"); + return err; ++ } + } + + /* Configure handling of error frames */ +@@ -2512,7 +2633,7 @@ static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, + { + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); + +- return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); ++ return cpumask_scnprintf(buf, PAGE_SIZE, &priv->txconf_cpumask); + } + + static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +index 7274fbe..bdcdbd6 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h +@@ -40,7 +40,6 @@ + #include "../../fsl-mc/include/dpbp-cmd.h" + #include "../../fsl-mc/include/dpcon.h" + #include "../../fsl-mc/include/dpcon-cmd.h" +-#include "../../fsl-mc/include/dpmng.h" + #include "dpni.h" + #include "dpni-cmd.h" + +@@ -54,8 +53,8 @@ + */ + #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) + +-/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced +- * Max Frame Length (currently 10k). ++/* Maximum acceptable MTU value. It is in direct relation with the hardware ++ * enforced Max Frame Length (currently 10k). + */ + #define DPAA2_ETH_MFL (10 * 1024) + #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) +@@ -100,8 +99,8 @@ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + DPAA2_ETH_RX_BUF_ALIGN) + +-/* PTP nominal frequency 1MHz */ +-#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 ++/* PTP nominal frequency 1GHz */ ++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 + + /* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware +@@ -138,6 +137,18 @@ struct dpaa2_fas { + __le32 status; + } __packed; + ++/* Frame annotation egress action descriptor */ ++#define DPAA2_FAEAD_OFFSET 0x58 ++ ++struct dpaa2_faead { ++ __le32 conf_fqid; ++ __le32 ctrl; ++}; ++ ++#define DPAA2_FAEAD_A2V 0x20000000 ++#define DPAA2_FAEAD_UPDV 0x00001000 ++#define DPAA2_FAEAD_UPD 0x00000010 ++ + /* Error and status bits in the frame annotation status word */ + /* Debug frame, otherwise supposed to be discarded */ + #define DPAA2_FAS_DISC 0x80000000 +@@ -274,6 +285,14 @@ struct dpaa2_eth_cls_rule { + bool in_use; + }; + ++struct dpaa2_eth_hash_fields { ++ u64 rxnfc_field; ++ enum net_prot cls_prot; ++ int cls_field; ++ int offset; ++ int size; ++}; ++ + /* Driver private data */ + struct dpaa2_eth_priv { + struct net_device *net_dev; +@@ -318,8 +337,10 @@ struct dpaa2_eth_priv { + bool do_link_poll; + struct task_struct *poll_thread; + ++ struct dpaa2_eth_hash_fields *hash_fields; ++ u8 num_hash_fields; + /* enabled ethtool hashing bits */ +- u64 rx_hash_fields; ++ u64 rx_flow_hash; + + #ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS + struct dpaa2_debugfs dbg; +@@ -334,25 +355,24 @@ struct dpaa2_eth_priv { + bool ts_rx_en; /* Rx timestamping enabled */ + }; + +-/* default Rx hash options, set during probing */ +-#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ +- | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ +- | RXH_L4_B_2_3) +- + #define dpaa2_eth_hash_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) + + #define dpaa2_eth_fs_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) + ++#define dpaa2_eth_fs_mask_enabled(priv) \ ++ ((priv)->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT) ++ + #define DPAA2_CLASSIFIER_ENTRY_COUNT 16 + + /* Required by struct dpni_attr::ext_cfg_iova */ + #define DPAA2_EXT_CFG_SIZE 256 + +-extern const struct ethtool_ops dpaa2_ethtool_ops; ++/* size of DMA memory used to pass configuration to classifier, in bytes */ ++#define DPAA2_CLASSIFIER_DMA_SIZE 256 + +-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); ++extern const struct ethtool_ops dpaa2_ethtool_ops; + + static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) + { +@@ -372,6 +392,6 @@ static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) + priv->dpni_attrs.max_senders); + } + +-void check_fs_support(struct net_device *); ++void check_cls_support(struct dpaa2_eth_priv *priv); + + #endif /* __DPAA2_H */ +diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +index fdab07f..1d792cd 100644 +--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c ++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +@@ -32,9 +32,6 @@ + #include "dpni.h" /* DPNI_LINK_OPT_* */ + #include "dpaa2-eth.h" + +-/* size of DMA memory used to pass configuration to classifier, in bytes */ +-#define DPAA2_CLASSIFIER_DMA_SIZE 256 +- + /* To be kept in sync with 'enum dpni_counter' */ + char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "rx frames", +@@ -89,28 +86,9 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) + { +- struct mc_version mc_ver; +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- char fw_version[ETHTOOL_FWVERS_LEN]; +- char version[32]; +- int err; +- +- err = mc_get_version(priv->mc_io, 0, &mc_ver); +- if (err) { +- strlcpy(drvinfo->fw_version, "Error retrieving MC version", +- sizeof(drvinfo->fw_version)); +- } else { +- scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", +- mc_ver.major, mc_ver.minor, mc_ver.revision); +- strlcpy(drvinfo->fw_version, fw_version, +- sizeof(drvinfo->fw_version)); +- } +- +- scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, +- DPNI_VER_MINOR); +- strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); +- + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); ++ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); ++ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); + } +@@ -152,7 +130,7 @@ static int dpaa2_eth_set_settings(struct net_device *net_dev, + + netdev_dbg(net_dev, "Setting link parameters..."); + +- /* Due to a temporary firmware limitation, the DPNI must be down ++ /* Due to a temporary MC limitation, the DPNI must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ +@@ -211,7 +189,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) + } + } + +-/** Fill in hardware counters, as returned by the MC firmware. ++/** Fill in hardware counters, as returned by MC. + */ + static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, +@@ -296,203 +274,223 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + #endif + } + +-static const struct dpaa2_eth_hash_fields { +- u64 rxnfc_field; +- enum net_prot cls_prot; +- int cls_field; +- int size; +-} hash_fields[] = { +- { +- /* L2 header */ +- .rxnfc_field = RXH_L2DA, +- .cls_prot = NET_PROT_ETH, +- .cls_field = NH_FLD_ETH_DA, +- .size = 6, +- }, { +- /* VLAN header */ +- .rxnfc_field = RXH_VLAN, +- .cls_prot = NET_PROT_VLAN, +- .cls_field = NH_FLD_VLAN_TCI, +- .size = 2, +- }, { +- /* IP header */ +- .rxnfc_field = RXH_IP_SRC, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_SRC, +- .size = 4, +- }, { +- .rxnfc_field = RXH_IP_DST, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_DST, +- .size = 4, +- }, { +- .rxnfc_field = RXH_L3_PROTO, +- .cls_prot = NET_PROT_IP, +- .cls_field = NH_FLD_IP_PROTO, +- .size = 1, +- }, { +- /* Using UDP ports, this is functionally equivalent to raw +- * byte pairs from L4 header. +- */ +- .rxnfc_field = RXH_L4_B_0_1, +- .cls_prot = NET_PROT_UDP, +- .cls_field = NH_FLD_UDP_PORT_SRC, +- .size = 2, +- }, { +- .rxnfc_field = RXH_L4_B_2_3, +- .cls_prot = NET_PROT_UDP, +- .cls_field = NH_FLD_UDP_PORT_DST, +- .size = 2, +- }, +-}; +- +-static int cls_is_enabled(struct net_device *net_dev, u64 flag) +-{ +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- +- return !!(priv->rx_hash_fields & flag); +-} +- +-static int cls_key_off(struct net_device *net_dev, u64 flag) ++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) + { + int i, off = 0; + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- if (hash_fields[i].rxnfc_field & flag) ++ for (i = 0; i < priv->num_hash_fields; i++) { ++ if (priv->hash_fields[i].cls_prot == prot && ++ priv->hash_fields[i].cls_field == field) + return off; +- if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) +- off += hash_fields[i].size; ++ off += priv->hash_fields[i].size; + } + + return -1; + } + +-static u8 cls_key_size(struct net_device *net_dev) ++static u8 cls_key_size(struct dpaa2_eth_priv *priv) + { + u8 i, size = 0; + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) +- continue; +- size += hash_fields[i].size; +- } ++ for (i = 0; i < priv->num_hash_fields; i++) ++ size += priv->hash_fields[i].size; + + return size; + } + +-static u8 cls_max_key_size(struct net_device *net_dev) ++void check_cls_support(struct dpaa2_eth_priv *priv) + { +- u8 i, size = 0; ++ u8 key_size = cls_key_size(priv); ++ struct device *dev = priv->net_dev->dev.parent; ++ ++ if (dpaa2_eth_hash_enabled(priv)) { ++ if (priv->dpni_attrs.max_dist_key_size < key_size) { ++ dev_dbg(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", ++ priv->dpni_attrs.max_dist_key_size, ++ key_size); ++ goto disable_cls; ++ } ++ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { ++ dev_dbg(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", ++ DPKG_MAX_NUM_OF_EXTRACTS); ++ goto disable_cls; ++ } ++ } + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) +- size += hash_fields[i].size; ++ if (dpaa2_eth_fs_enabled(priv)) { ++ if (!dpaa2_eth_hash_enabled(priv)) { ++ dev_dbg(dev, "DPNI_OPT_DIST_HASH option missing. Steering is disabled\n"); ++ goto disable_cls; ++ } ++ if (!dpaa2_eth_fs_mask_enabled(priv)) { ++ dev_dbg(dev, "Key masks not supported. Steering is disabled\n"); ++ goto disable_fs; ++ } ++ } + +- return size; ++ return; ++ ++disable_cls: ++ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_HASH; ++disable_fs: ++ priv->dpni_attrs.options &= ~(DPNI_OPT_DIST_FS | ++ DPNI_OPT_FS_MASK_SUPPORT); + } + +-void check_fs_support(struct net_device *net_dev) ++static int prep_l4_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_tcpip4_spec *l4_value, ++ struct ethtool_tcpip4_spec *l4_mask, ++ void *key, void *mask, u8 l4_proto) + { +- u8 key_size = cls_max_key_size(net_dev); +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ int offset; + +- if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && +- priv->dpni_attrs.max_dist_key_size < key_size) { +- dev_err(&net_dev->dev, +- "max_dist_key_size = %d, expected %d. Steering is disabled\n", +- priv->dpni_attrs.max_dist_key_size, +- key_size); +- priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; ++ if (l4_mask->tos) { ++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); ++ return -EOPNOTSUPP; + } +-} + +-/* Set RX hash options +- * flags is a combination of RXH_ bits +- */ +-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +-{ +- struct device *dev = net_dev->dev.parent; +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- struct dpkg_profile_cfg cls_cfg; +- struct dpni_rx_tc_dist_cfg dist_cfg; +- u8 *dma_mem; +- u64 enabled_flags = 0; +- int i; +- int err = 0; ++ if (l4_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = l4_value->ip4src; ++ *(u32 *)(mask + offset) = l4_mask->ip4src; ++ } + +- if (!dpaa2_eth_hash_enabled(priv)) { +- dev_err(dev, "Hashing support is not enabled\n"); +- return -EOPNOTSUPP; ++ if (l4_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = l4_value->ip4dst; ++ *(u32 *)(mask + offset) = l4_mask->ip4dst; + } + +- if (flags & ~DPAA2_RXH_SUPPORTED) { +- /* RXH_DISCARD is not supported */ +- dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); +- return -EOPNOTSUPP; ++ if (l4_mask->psrc) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u32 *)(key + offset) = l4_value->psrc; ++ *(u32 *)(mask + offset) = l4_mask->psrc; + } + +- memset(&cls_cfg, 0, sizeof(cls_cfg)); ++ if (l4_mask->pdst) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u32 *)(key + offset) = l4_value->pdst; ++ *(u32 *)(mask + offset) = l4_mask->pdst; ++ } + +- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { +- struct dpkg_extract *key = +- &cls_cfg.extracts[cls_cfg.num_extracts]; ++ /* Only apply the rule for the user-specified L4 protocol ++ * and if ethertype matches IPv4 ++ */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; + +- if (!(flags & hash_fields[i].rxnfc_field)) +- continue; ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u8 *)(key + offset) = l4_proto; ++ *(u8 *)(mask + offset) = 0xFF; + +- if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { +- dev_err(dev, "error adding key extraction rule, too many rules?\n"); +- return -E2BIG; +- } ++ /* TODO: check IP version */ + +- key->type = DPKG_EXTRACT_FROM_HDR; +- key->extract.from_hdr.prot = hash_fields[i].cls_prot; +- key->extract.from_hdr.type = DPKG_FULL_FIELD; +- key->extract.from_hdr.field = hash_fields[i].cls_field; +- cls_cfg.num_extracts++; ++ return 0; ++} ++ ++static int prep_eth_rule(struct dpaa2_eth_priv *priv, ++ struct ethhdr *eth_value, struct ethhdr *eth_mask, ++ void *key, void *mask) ++{ ++ int offset; + +- enabled_flags |= hash_fields[i].rxnfc_field; ++ if (eth_mask->h_proto) { ++ netdev_err(priv->net_dev, "Ethertype is not supported!\n"); ++ return -EOPNOTSUPP; + } + +- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); +- if (!dma_mem) +- return -ENOMEM; ++ if (!is_zero_ether_addr(eth_mask->h_source)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); ++ ether_addr_copy(key + offset, eth_value->h_source); ++ ether_addr_copy(mask + offset, eth_mask->h_source); ++ } + +- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); +- if (err) { +- dev_err(dev, "dpni_prepare_key_cfg error %d", err); +- return err; ++ if (!is_zero_ether_addr(eth_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, eth_value->h_dest); ++ ether_addr_copy(mask + offset, eth_mask->h_dest); + } + +- memset(&dist_cfg, 0, sizeof(dist_cfg)); ++ return 0; ++} + +- /* Prepare for setting the rx dist */ +- dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, +- DPAA2_CLASSIFIER_DMA_SIZE, +- DMA_TO_DEVICE); +- if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { +- dev_err(dev, "DMA mapping failed\n"); +- kfree(dma_mem); +- return -ENOMEM; ++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_usrip4_spec *uip_value, ++ struct ethtool_usrip4_spec *uip_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (uip_mask->tos) ++ return -EOPNOTSUPP; ++ ++ if (uip_mask->ip4src) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); ++ *(u32 *)(key + offset) = uip_value->ip4src; ++ *(u32 *)(mask + offset) = uip_mask->ip4src; + } + +- dist_cfg.dist_size = dpaa2_eth_queue_count(priv); +- if (dpaa2_eth_fs_enabled(priv)) { +- dist_cfg.dist_mode = DPNI_DIST_MODE_FS; +- dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; +- } else { +- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ if (uip_mask->ip4dst) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); ++ *(u32 *)(key + offset) = uip_value->ip4dst; ++ *(u32 *)(mask + offset) = uip_mask->ip4dst; + } + +- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); +- dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, +- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); +- kfree(dma_mem); +- if (err) { +- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); +- return err; ++ if (uip_mask->proto) { ++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); ++ *(u32 *)(key + offset) = uip_value->proto; ++ *(u32 *)(mask + offset) = uip_mask->proto; ++ } ++ if (uip_mask->l4_4_bytes) { ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; ++ ++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); ++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; ++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; + } + +- priv->rx_hash_fields = enabled_flags; ++ /* Ethertype must be IP */ ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); ++ *(u16 *)(key + offset) = htons(ETH_P_IP); ++ *(u16 *)(mask + offset) = 0xFFFF; ++ ++ return 0; ++} ++ ++static int prep_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (ext_mask->vlan_etype) ++ return -EOPNOTSUPP; ++ ++ if (ext_mask->vlan_tci) { ++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); ++ *(u16 *)(key + offset) = ext_value->vlan_tci; ++ *(u16 *)(mask + offset) = ext_mask->vlan_tci; ++ } ++ ++ return 0; ++} ++ ++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, ++ struct ethtool_flow_ext *ext_value, ++ struct ethtool_flow_ext *ext_mask, ++ void *key, void *mask) ++{ ++ int offset; ++ ++ if (!is_zero_ether_addr(ext_mask->h_dest)) { ++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); ++ ether_addr_copy(key + offset, ext_value->h_dest); ++ ether_addr_copy(mask + offset, ext_mask->h_dest); ++ } + + return 0; + } +@@ -501,140 +499,56 @@ static int prep_cls_rule(struct net_device *net_dev, + struct ethtool_rx_flow_spec *fs, + void *key) + { +- struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; +- struct ethhdr *eth_h, *eth_m; +- struct ethtool_flow_ext *ext_h, *ext_m; +- const u8 key_size = cls_key_size(net_dev); ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ const u8 key_size = cls_key_size(priv); + void *msk = key + key_size; ++ int err; + + memset(key, 0, key_size * 2); + +- /* This code is a major mess, it has to be cleaned up after the +- * classification mask issue is fixed and key format will be made static +- */ +- + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: +- l4ip4_h = &fs->h_u.tcp_ip4_spec; +- l4ip4_m = &fs->m_u.tcp_ip4_spec; +- /* TODO: ethertype to match IPv4 and protocol to match TCP */ +- goto l4ip4; +- ++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, ++ &fs->m_u.tcp_ip4_spec, key, msk, ++ IPPROTO_TCP); ++ break; + case UDP_V4_FLOW: +- l4ip4_h = &fs->h_u.udp_ip4_spec; +- l4ip4_m = &fs->m_u.udp_ip4_spec; +- goto l4ip4; +- ++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, ++ &fs->m_u.udp_ip4_spec, key, msk, ++ IPPROTO_UDP); ++ break; + case SCTP_V4_FLOW: +- l4ip4_h = &fs->h_u.sctp_ip4_spec; +- l4ip4_m = &fs->m_u.sctp_ip4_spec; +- +-l4ip4: +- if (l4ip4_m->tos) { +- netdev_err(net_dev, +- "ToS is not supported for IPv4 L4\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { +- netdev_err(net_dev, "IP SRC not supported!\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { +- netdev_err(net_dev, "IP DST not supported!\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { +- netdev_err(net_dev, "PSRC not supported, ignored\n"); +- return -EOPNOTSUPP; +- } +- if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { +- netdev_err(net_dev, "PDST not supported, ignored\n"); +- return -EOPNOTSUPP; +- } +- +- if (cls_is_enabled(net_dev, RXH_IP_SRC)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) +- = l4ip4_h->ip4src; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) +- = l4ip4_m->ip4src; +- } +- if (cls_is_enabled(net_dev, RXH_IP_DST)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) +- = l4ip4_h->ip4dst; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) +- = l4ip4_m->ip4dst; +- } +- +- if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) +- = l4ip4_h->psrc; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) +- = l4ip4_m->psrc; +- } +- +- if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { +- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) +- = l4ip4_h->pdst; +- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) +- = l4ip4_m->pdst; +- } ++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, ++ &fs->m_u.sctp_ip4_spec, key, msk, ++ IPPROTO_SCTP); + break; +- + case ETHER_FLOW: +- eth_h = &fs->h_u.ether_spec; +- eth_m = &fs->m_u.ether_spec; +- +- if (eth_m->h_proto) { +- netdev_err(net_dev, "Ethertype is not supported!\n"); +- return -EOPNOTSUPP; +- } +- +- if (!is_zero_ether_addr(eth_m->h_source)) { +- netdev_err(net_dev, "ETH SRC is not supported!\n"); +- return -EOPNOTSUPP; +- } +- +- if (cls_is_enabled(net_dev, RXH_L2DA)) { +- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), +- eth_h->h_dest); +- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), +- eth_m->h_dest); +- } else { +- if (!is_zero_ether_addr(eth_m->h_dest)) { +- netdev_err(net_dev, +- "ETH DST is not supported!\n"); +- return -EOPNOTSUPP; +- } +- } ++ err = prep_eth_rule(priv, &fs->h_u.ether_spec, ++ &fs->m_u.ether_spec, key, msk); ++ break; ++ case IP_USER_FLOW: ++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, ++ &fs->m_u.usr_ip4_spec, key, msk); + break; +- + default: +- /* TODO: IP user flow, AH, ESP */ ++ /* TODO: AH, ESP */ + return -EOPNOTSUPP; + } ++ if (err) ++ return err; + + if (fs->flow_type & FLOW_EXT) { +- /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ +- return -EOPNOTSUPP; ++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; + } + + if (fs->flow_type & FLOW_MAC_EXT) { +- ext_h = &fs->h_ext; +- ext_m = &fs->m_ext; +- +- if (cls_is_enabled(net_dev, RXH_L2DA)) { +- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), +- ext_h->h_dest); +- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), +- ext_m->h_dest); +- } else { +- if (!is_zero_ether_addr(ext_m->h_dest)) { +- netdev_err(net_dev, +- "ETH DST is not supported!\n"); +- return -EOPNOTSUPP; +- } +- } ++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); ++ if (err) ++ return err; + } ++ + return 0; + } + +@@ -643,6 +557,7 @@ static int do_cls(struct net_device *net_dev, + bool add) + { + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct device *dev = net_dev->dev.parent; + const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; + struct dpni_rule_cfg rule_cfg; + void *dma_mem; +@@ -660,7 +575,7 @@ static int do_cls(struct net_device *net_dev, + return -EINVAL; + + memset(&rule_cfg, 0, sizeof(rule_cfg)); +- rule_cfg.key_size = cls_key_size(net_dev); ++ rule_cfg.key_size = cls_key_size(priv); + + /* allocate twice the key size, for the actual key and for mask */ + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); +@@ -671,27 +586,12 @@ static int do_cls(struct net_device *net_dev, + if (err) + goto err_free_mem; + +- rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, ++ rule_cfg.key_iova = dma_map_single(dev, dma_mem, + rule_cfg.key_size * 2, + DMA_TO_DEVICE); + + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; + +- if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { +- int i; +- u8 *mask = dma_mem + rule_cfg.key_size; +- +- /* check that nothing is masked out, otherwise it won't work */ +- for (i = 0; i < rule_cfg.key_size; i++) { +- if (mask[i] == 0xff) +- continue; +- netdev_err(net_dev, "dev does not support masking!\n"); +- err = -EOPNOTSUPP; +- goto err_free_mem; +- } +- rule_cfg.mask_iova = 0; +- } +- + /* No way to control rule order in firmware */ + if (add) + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, +@@ -700,10 +600,10 @@ static int do_cls(struct net_device *net_dev, + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, + &rule_cfg); + +- dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, ++ dma_unmap_single(dev, rule_cfg.key_iova, + rule_cfg.key_size * 2, DMA_TO_DEVICE); + if (err) { +- netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); ++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); + goto err_free_mem; + } + +@@ -746,40 +646,12 @@ static int del_cls(struct net_device *net_dev, int location) + return 0; + } + +-static void clear_cls(struct net_device *net_dev) +-{ +- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); +- int i, err; +- +- for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { +- if (!priv->cls_rule[i].in_use) +- continue; +- +- err = del_cls(net_dev, i); +- if (err) +- netdev_warn(net_dev, +- "err trying to delete classification entry %d\n", +- i); +- } +-} +- + static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc) + { + int err = 0; + + switch (rxnfc->cmd) { +- case ETHTOOL_SRXFH: +- /* first off clear ALL classification rules, chaging key +- * composition will break them anyway +- */ +- clear_cls(net_dev); +- /* we purposely ignore cmd->flow_type for now, because the +- * classifier only supports a single set of fields for all +- * protocols +- */ +- err = dpaa2_eth_set_hash(net_dev, rxnfc->data); +- break; + case ETHTOOL_SRXCLSRLINS: + err = add_cls(net_dev, &rxnfc->fs); + break; +@@ -804,11 +676,10 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: +- /* we purposely ignore cmd->flow_type for now, because the +- * classifier only supports a single set of fields for all +- * protocols ++ /* we purposely ignore cmd->flow_type, because the hashing key ++ * is the same (and fixed) for all protocols + */ +- rxnfc->data = priv->rx_hash_fields; ++ rxnfc->data = priv->rx_flow_hash; + break; + + case ETHTOOL_GRXRINGS: +diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c +index 366ad4c..fe16b8b 100644 +--- a/drivers/staging/fsl-dpaa2/mac/mac.c ++++ b/drivers/staging/fsl-dpaa2/mac/mac.c +@@ -120,7 +120,7 @@ static void dpaa2_mac_link_changed(struct net_device *netdev) + phy_print_status(phydev); + } + +- /* We must call into the MC firmware at all times, because we don't know ++ /* We must interrogate MC at all times, because we don't know + * when and whether a potential DPNI may have read the link state. + */ + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, +@@ -532,7 +532,7 @@ static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) + goto err_close; + } + +- dev_info_once(dev, "Using DPMAC API %d.%d\n", ++ dev_warn(dev, "Using DPMAC API %d.%d\n", + priv->attr.version.major, priv->attr.version.minor); + + /* Look up the DPMAC node in the device-tree. */ +diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c +index f8d8cbe..5b6fa1c 100644 +--- a/drivers/staging/fsl-mc/bus/dprc-driver.c ++++ b/drivers/staging/fsl-mc/bus/dprc-driver.c +@@ -1078,7 +1078,7 @@ int __init dprc_driver_init(void) + return fsl_mc_driver_register(&dprc_driver); + } + +-void __exit dprc_driver_exit(void) ++void dprc_driver_exit(void) + { + fsl_mc_driver_unregister(&dprc_driver); + } +diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h +index 1246ca8..58ed441 100644 +--- a/drivers/staging/fsl-mc/include/mc-private.h ++++ b/drivers/staging/fsl-mc/include/mc-private.h +@@ -143,7 +143,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, + + int __init dprc_driver_init(void); + +-void __exit dprc_driver_exit(void); ++void dprc_driver_exit(void); + + int __init fsl_mc_allocator_driver_init(void); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index f951b75..600a137 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1685,8 +1685,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + cpu_to_le32(EP_STATE_DISABLED)) || + le32_to_cpu(ctrl_ctx->drop_flags) & + xhci_get_endpoint_flag(&ep->desc)) { +- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", +- __func__, ep); ++ /* Do not warn when called after a usb_device_reset */ ++ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) ++ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", ++ __func__, ep); + return 0; + } + +diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h +index 84d971f..f13b12e 100644 +--- a/include/linux/fsl/guts.h ++++ b/include/linux/fsl/guts.h +@@ -29,83 +29,86 @@ + * #ifdefs. + */ + struct ccsr_guts { +- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ +- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ +- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ +- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ +- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ +- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ ++ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ ++ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ ++ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ ++ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ ++ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ ++ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; +- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ ++ u32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; +- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ ++ u32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; +- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ ++ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; +- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ ++ u32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; +- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ +- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ +- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ ++ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ ++ u32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ ++ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; +- __be32 devdisr; /* 0x.0070 - Device Disable Control */ ++ u32 devdisr; /* 0x.0070 - Device Disable Control */ + #define CCSR_GUTS_DEVDISR_TB1 0x00001000 + #define CCSR_GUTS_DEVDISR_TB0 0x00004000 +- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ ++ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; +- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ +- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ +- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ +- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ +- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ +- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ +- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ +- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ +- __be32 autorstsr; /* 0x.009c - Automatic reset status register */ +- __be32 pvr; /* 0x.00a0 - Processor Version Register */ +- __be32 svr; /* 0x.00a4 - System Version Register */ ++ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ ++ u32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ ++ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ ++ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ ++ u32 pmcdr; /* 0x.008c - 4Power management clock disable register */ ++ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ ++ u32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ ++ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ ++ u32 autorstsr; /* 0x.009c - Automatic reset status register */ ++ u32 pvr; /* 0x.00a0 - Processor Version Register */ ++ u32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; +- __be32 rstcr; /* 0x.00b0 - Reset Control Register */ ++ u32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; +- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register ++ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; +- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers ++ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; +- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ +- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ ++ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ ++ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; +- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ ++ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; +- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ ++ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; +- __be32 ircr; /* 0x.0900 - Infrared Control Register */ ++ u32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; +- __be32 dmacr; /* 0x.0908 - DMA Control Register */ ++ u32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; +- __be32 elbccr; /* 0x.0914 - eLBC Control Register */ ++ u32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; +- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ +- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ +- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ ++ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ ++ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ ++ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; +- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ ++ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; +- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ ++ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; +- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ +- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ ++ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ ++ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; +- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ +- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ ++ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ ++ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; +- __be32 itcr; /* 0x.0f2c - Internal transaction control register */ ++ u32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; +- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ +- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ ++ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ ++ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ + } __attribute__ ((packed)); + ++#ifdef CONFIG_FSL_GUTS ++extern u32 guts_get_svr(void); ++#endif + + /* Alternate function signal multiplex control */ + #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) +diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h +new file mode 100644 +index 0000000..8d13836 +--- /dev/null ++++ b/include/linux/fsl/svr.h +@@ -0,0 +1,95 @@ ++/* ++ * MPC85xx cpu type detection ++ * ++ * Copyright 2011-2012 Freescale Semiconductor, Inc. ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#ifndef FSL_SVR_H ++#define FSL_SVR_H ++ ++#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ ++#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ ++#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ ++ ++/* Some parts define SVR[0:23] as the SOC version */ ++#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ ++ ++#define SVR_8533 0x803400 ++#define SVR_8535 0x803701 ++#define SVR_8536 0x803700 ++#define SVR_8540 0x803000 ++#define SVR_8541 0x807200 ++#define SVR_8543 0x803200 ++#define SVR_8544 0x803401 ++#define SVR_8545 0x803102 ++#define SVR_8547 0x803101 ++#define SVR_8548 0x803100 ++#define SVR_8555 0x807100 ++#define SVR_8560 0x807000 ++#define SVR_8567 0x807501 ++#define SVR_8568 0x807500 ++#define SVR_8569 0x808000 ++#define SVR_8572 0x80E000 ++#define SVR_P1010 0x80F100 ++#define SVR_P1011 0x80E500 ++#define SVR_P1012 0x80E501 ++#define SVR_P1013 0x80E700 ++#define SVR_P1014 0x80F101 ++#define SVR_P1017 0x80F700 ++#define SVR_P1020 0x80E400 ++#define SVR_P1021 0x80E401 ++#define SVR_P1022 0x80E600 ++#define SVR_P1023 0x80F600 ++#define SVR_P1024 0x80E402 ++#define SVR_P1025 0x80E403 ++#define SVR_P2010 0x80E300 ++#define SVR_P2020 0x80E200 ++#define SVR_P2040 0x821000 ++#define SVR_P2041 0x821001 ++#define SVR_P3041 0x821103 ++#define SVR_P4040 0x820100 ++#define SVR_P4080 0x820000 ++#define SVR_P5010 0x822100 ++#define SVR_P5020 0x822000 ++#define SVR_P5021 0X820500 ++#define SVR_P5040 0x820400 ++#define SVR_T4240 0x824000 ++#define SVR_T4120 0x824001 ++#define SVR_T4160 0x824100 ++#define SVR_T4080 0x824102 ++#define SVR_C291 0x850000 ++#define SVR_C292 0x850020 ++#define SVR_C293 0x850030 ++#define SVR_B4860 0X868000 ++#define SVR_G4860 0x868001 ++#define SVR_G4060 0x868003 ++#define SVR_B4440 0x868100 ++#define SVR_G4440 0x868101 ++#define SVR_B4420 0x868102 ++#define SVR_B4220 0x868103 ++#define SVR_T1040 0x852000 ++#define SVR_T1041 0x852001 ++#define SVR_T1042 0x852002 ++#define SVR_T1020 0x852100 ++#define SVR_T1021 0x852101 ++#define SVR_T1022 0x852102 ++#define SVR_T2080 0x853000 ++#define SVR_T2081 0x853100 ++ ++#define SVR_8610 0x80A000 ++#define SVR_8641 0x809000 ++#define SVR_8641D 0x809001 ++ ++#define SVR_9130 0x860001 ++#define SVR_9131 0x860000 ++#define SVR_9132 0x861000 ++#define SVR_9232 0x861400 ++ ++#define SVR_Unknown 0xFFFFFF ++ ++#endif +diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h +index 84d60cb..3f9778c 100644 +--- a/include/linux/fsl_ifc.h ++++ b/include/linux/fsl_ifc.h +@@ -29,7 +29,20 @@ + #include + #include + +-#define FSL_IFC_BANK_COUNT 4 ++/* ++ * The actual number of banks implemented depends on the IFC version ++ * - IFC version 1.0 implements 4 banks. ++ * - IFC version 1.1 onward implements 8 banks. ++ */ ++#define FSL_IFC_BANK_COUNT 8 ++ ++#define FSL_IFC_VERSION_MASK 0x0F0F0000 ++#define FSL_IFC_VERSION_1_0_0 0x01000000 ++#define FSL_IFC_VERSION_1_1_0 0x01010000 ++#define FSL_IFC_VERSION_2_0_0 0x02000000 ++ ++#define PGOFFSET_64K (64*1024) ++#define PGOFFSET_4K (4*1024) + + /* + * CSPR - Chip Select Property Register +@@ -714,20 +727,26 @@ struct fsl_ifc_nand { + __be32 nand_evter_en; + u32 res17[0x2]; + __be32 nand_evter_intr_en; +- u32 res18[0x2]; ++ __be32 nand_vol_addr_stat; ++ u32 res18; + __be32 nand_erattr0; + __be32 nand_erattr1; + u32 res19[0x10]; + __be32 nand_fsr; +- u32 res20; +- __be32 nand_eccstat[4]; +- u32 res21[0x20]; ++ u32 res20[0x3]; ++ __be32 nand_eccstat[6]; ++ u32 res21[0x1c]; + __be32 nanndcr; + u32 res22[0x2]; + __be32 nand_autoboot_trgr; + u32 res23; + __be32 nand_mdr; +- u32 res24[0x5C]; ++ u32 res24[0x1C]; ++ __be32 nand_dll_lowcfg0; ++ __be32 nand_dll_lowcfg1; ++ u32 res25; ++ __be32 nand_dll_lowstat; ++ u32 res26[0x3c]; + }; + + /* +@@ -762,13 +781,12 @@ struct fsl_ifc_gpcm { + __be32 gpcm_erattr1; + __be32 gpcm_erattr2; + __be32 gpcm_stat; +- u32 res4[0x1F3]; + }; + + /* + * IFC Controller Registers + */ +-struct fsl_ifc_regs { ++struct fsl_ifc_global { + __be32 ifc_rev; + u32 res1[0x2]; + struct { +@@ -776,39 +794,44 @@ struct fsl_ifc_regs { + __be32 cspr; + u32 res2; + } cspr_cs[FSL_IFC_BANK_COUNT]; +- u32 res3[0x19]; ++ u32 res3[0xd]; + struct { + __be32 amask; + u32 res4[0x2]; + } amask_cs[FSL_IFC_BANK_COUNT]; +- u32 res5[0x18]; ++ u32 res5[0xc]; + struct { + __be32 csor; + __be32 csor_ext; + u32 res6; + } csor_cs[FSL_IFC_BANK_COUNT]; +- u32 res7[0x18]; ++ u32 res7[0xc]; + struct { + __be32 ftim[4]; + u32 res8[0x8]; + } ftim_cs[FSL_IFC_BANK_COUNT]; +- u32 res9[0x60]; ++ u32 res9[0x30]; + __be32 rb_stat; +- u32 res10[0x2]; ++ __be32 rb_map; ++ __be32 wb_map; + __be32 ifc_gcr; +- u32 res11[0x2]; ++ u32 res10[0x2]; + __be32 cm_evter_stat; +- u32 res12[0x2]; ++ u32 res11[0x2]; + __be32 cm_evter_en; +- u32 res13[0x2]; ++ u32 res12[0x2]; + __be32 cm_evter_intr_en; +- u32 res14[0x2]; ++ u32 res13[0x2]; + __be32 cm_erattr0; + __be32 cm_erattr1; +- u32 res15[0x2]; ++ u32 res14[0x2]; + __be32 ifc_ccr; + __be32 ifc_csr; +- u32 res16[0x2EB]; ++ __be32 ddr_ccr_low; ++}; ++ ++ ++struct fsl_ifc_runtime { + struct fsl_ifc_nand ifc_nand; + struct fsl_ifc_nor ifc_nor; + struct fsl_ifc_gpcm ifc_gpcm; +@@ -822,17 +845,70 @@ extern int fsl_ifc_find(phys_addr_t addr_base); + struct fsl_ifc_ctrl { + /* device info */ + struct device *dev; +- struct fsl_ifc_regs __iomem *regs; ++ struct fsl_ifc_global __iomem *gregs; ++ struct fsl_ifc_runtime __iomem *rregs; + int irq; + int nand_irq; + spinlock_t lock; + void *nand; ++ int version; ++ int banks; + + u32 nand_stat; + wait_queue_head_t nand_wait; ++ bool little_endian; + }; + + extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; + ++static inline u32 ifc_in32(void __iomem *addr) ++{ ++ u32 val; ++ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ val = ioread32(addr); ++ else ++ val = ioread32be(addr); ++ ++ return val; ++} ++ ++static inline u16 ifc_in16(void __iomem *addr) ++{ ++ u16 val; ++ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ val = ioread16(addr); ++ else ++ val = ioread16be(addr); ++ ++ return val; ++} ++ ++static inline u8 ifc_in8(void __iomem *addr) ++{ ++ return ioread8(addr); ++} ++ ++static inline void ifc_out32(u32 val, void __iomem *addr) ++{ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ iowrite32(val, addr); ++ else ++ iowrite32be(val, addr); ++} ++ ++static inline void ifc_out16(u16 val, void __iomem *addr) ++{ ++ if (fsl_ifc_ctrl_dev->little_endian) ++ iowrite16(val, addr); ++ else ++ iowrite16be(val, addr); ++} ++ ++static inline void ifc_out8(u8 val, void __iomem *addr) ++{ ++ iowrite8(val, addr); ++} + + #endif /* __ASM_FSL_IFC_H */ +diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h +index 69517a2..cbbe6a2 100644 +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -356,6 +356,20 @@ static inline int disable_irq_wake(unsigned int irq) + return irq_set_irq_wake(irq, 0); + } + ++/* ++ * irq_get_irqchip_state/irq_set_irqchip_state specific flags ++ */ ++enum irqchip_irq_state { ++ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ ++ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ ++ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ ++ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ ++}; ++ ++extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool *state); ++extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool state); + + #ifdef CONFIG_IRQ_FORCED_THREADING + extern bool force_irqthreads; +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index 04229cb..7421bdf 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -30,6 +30,7 @@ + #define IOMMU_WRITE (1 << 1) + #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ + #define IOMMU_NOEXEC (1 << 3) ++#define IOMMU_MMIO (1 << 4) /* Device memory access */ + + struct iommu_ops; + struct iommu_group; +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 9ba173b..4931a8b 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -30,6 +30,7 @@ + struct seq_file; + struct module; + struct msi_msg; ++enum irqchip_irq_state; + + /* + * IRQ line status. +@@ -324,6 +325,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) + * irq_request_resources + * @irq_compose_msi_msg: optional to compose message content for MSI + * @irq_write_msi_msg: optional to write message content for MSI ++ * @irq_get_irqchip_state: return the internal state of an interrupt ++ * @irq_set_irqchip_state: set the internal state of a interrupt + * @flags: chip specific flags + */ + struct irq_chip { +@@ -363,6 +366,9 @@ struct irq_chip { + void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); + void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); + ++ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); ++ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); ++ + unsigned long flags; + }; + +@@ -460,6 +466,8 @@ extern void irq_chip_eoi_parent(struct irq_data *data); + extern int irq_chip_set_affinity_parent(struct irq_data *data, + const struct cpumask *dest, + bool force); ++extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); ++extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); + #endif + + /* Handling of unhandled and spurious interrupts: */ +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h +index da1aa15..36caf46 100644 +--- a/include/linux/irqchip/arm-gic-v3.h ++++ b/include/linux/irqchip/arm-gic-v3.h +@@ -270,6 +270,18 @@ + #define ICC_SRE_EL2_SRE (1 << 0) + #define ICC_SRE_EL2_ENABLE (1 << 3) + ++#define ICC_SGI1R_TARGET_LIST_SHIFT 0 ++#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) ++#define ICC_SGI1R_AFFINITY_1_SHIFT 16 ++#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) ++#define ICC_SGI1R_SGI_ID_SHIFT 24 ++#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) ++#define ICC_SGI1R_AFFINITY_2_SHIFT 32 ++#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) ++#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 ++#define ICC_SGI1R_AFFINITY_3_SHIFT 48 ++#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) ++ + /* + * System register definitions + */ +diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h +index 13eed92..60b09ed 100644 +--- a/include/linux/irqchip/arm-gic.h ++++ b/include/linux/irqchip/arm-gic.h +@@ -106,6 +106,8 @@ static inline void gic_init(unsigned int nr, int start, + gic_init_bases(nr, start, dist, cpu, 0, NULL); + } + ++int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); ++ + void gic_send_sgi(unsigned int cpu_id, unsigned int irq); + int gic_get_cpu_id(unsigned int cpu); + void gic_migrate_target(unsigned int new_cpu_id); +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h +index ebace05..3c5ca45 100644 +--- a/include/linux/irqdomain.h ++++ b/include/linux/irqdomain.h +@@ -56,6 +56,7 @@ enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_PCI_MSI, + DOMAIN_BUS_PLATFORM_MSI, ++ DOMAIN_BUS_NEXUS, + }; + + /** +diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h +index dba793e..62d966a 100644 +--- a/include/linux/mmc/sdhci.h ++++ b/include/linux/mmc/sdhci.h +@@ -100,6 +100,10 @@ struct sdhci_host { + #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) + /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ + #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) ++/* Controller does not support 64-bit DMA */ ++#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) ++/* Controller broken with using ACMD23 */ ++#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) + + int irq; /* Device IRQ */ + void __iomem *ioaddr; /* Mapped address */ +@@ -130,6 +134,7 @@ struct sdhci_host { + #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ + #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ + #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ ++#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ + + unsigned int version; /* SDHCI spec. version */ + +@@ -155,12 +160,19 @@ struct sdhci_host { + + int sg_count; /* Mapped sg entries */ + +- u8 *adma_desc; /* ADMA descriptor table */ +- u8 *align_buffer; /* Bounce buffer */ ++ void *adma_table; /* ADMA descriptor table */ ++ void *align_buffer; /* Bounce buffer */ ++ ++ size_t adma_table_sz; /* ADMA descriptor table size */ ++ size_t align_buffer_sz; /* Bounce buffer size */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ + ++ unsigned int desc_sz; /* ADMA descriptor size */ ++ unsigned int align_sz; /* ADMA alignment */ ++ unsigned int align_mask; /* ADMA alignment mask */ ++ + struct tasklet_struct finish_tasklet; /* Tasklet structures */ + + struct timer_list timer; /* Timer for timeouts */ +diff --git a/include/linux/of.h b/include/linux/of.h +index 4a6a489..25111fb 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -57,7 +57,6 @@ struct device_node { + struct device_node *child; + struct device_node *sibling; + struct device_node *next; /* next device of same type */ +- struct device_node *allnext; /* next in list of all nodes */ + struct kobject kobj; + unsigned long _flags; + void *data; +@@ -109,7 +108,7 @@ static inline void of_node_put(struct device_node *node) { } + #ifdef CONFIG_OF + + /* Pointer for first entry in chain of all nodes. */ +-extern struct device_node *of_allnodes; ++extern struct device_node *of_root; + extern struct device_node *of_chosen; + extern struct device_node *of_aliases; + extern struct device_node *of_stdout; +@@ -117,7 +116,7 @@ extern raw_spinlock_t devtree_lock; + + static inline bool of_have_populated_dt(void) + { +- return of_allnodes != NULL; ++ return of_root != NULL; + } + + static inline bool of_node_is_root(const struct device_node *node) +@@ -161,6 +160,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag + clear_bit(flag, &p->_flags); + } + ++extern struct device_node *__of_find_all_nodes(struct device_node *prev); + extern struct device_node *of_find_all_nodes(struct device_node *prev); + + /* +@@ -216,8 +216,9 @@ static inline const char *of_node_full_name(const struct device_node *np) + return np ? np->full_name : ""; + } + +-#define for_each_of_allnodes(dn) \ +- for (dn = of_allnodes; dn; dn = dn->allnext) ++#define for_each_of_allnodes_from(from, dn) \ ++ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) ++#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) + extern struct device_node *of_find_node_by_name(struct device_node *from, + const char *name); + extern struct device_node *of_find_node_by_type(struct device_node *from, +diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h +index c65a18a..7e09244 100644 +--- a/include/linux/of_pdt.h ++++ b/include/linux/of_pdt.h +@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); + /* for building the device tree */ + extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); + +-extern void (*of_pdt_build_more)(struct device_node *dp, +- struct device_node ***nextp); ++extern void (*of_pdt_build_more)(struct device_node *dp); + + #endif /* _LINUX_OF_PDT_H */ +diff --git a/include/linux/pci.h b/include/linux/pci.h +index a99f301..f28c88b 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -562,6 +562,7 @@ static inline int pcibios_err_to_errno(int err) + /* Low-level architecture-dependent routines */ + + struct pci_ops { ++ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); + int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); + int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); + }; +@@ -859,6 +860,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, + int where, u16 val); + int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, u32 val); ++ ++int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val); ++int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val); ++int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val); ++int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val); ++ + struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); + + static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) +diff --git a/include/linux/phy.h b/include/linux/phy.h +index d090cfc..eda18a8 100644 +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -700,6 +700,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, + struct phy_c45_device_ids *c45_ids); + struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); + int phy_device_register(struct phy_device *phy); ++void phy_device_remove(struct phy_device *phydev); + int phy_init_hw(struct phy_device *phydev); + int phy_suspend(struct phy_device *phydev); + int phy_resume(struct phy_device *phydev); +diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h +index f2ca1b4..fe5732d 100644 +--- a/include/linux/phy_fixed.h ++++ b/include/linux/phy_fixed.h +@@ -11,7 +11,7 @@ struct fixed_phy_status { + + struct device_node; + +-#ifdef CONFIG_FIXED_PHY ++#if IS_ENABLED(CONFIG_FIXED_PHY) + extern int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status); + extern struct phy_device *fixed_phy_register(unsigned int irq, +@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); + extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); ++extern int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed); + #else + static inline int fixed_phy_add(unsigned int irq, int phy_id, + struct fixed_phy_status *status) +@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, + { + return -ENODEV; + } ++static inline int fixed_phy_update_state(struct phy_device *phydev, ++ const struct fixed_phy_status *status, ++ const struct fixed_phy_status *changed) ++{ ++ return -ENODEV; ++} + #endif /* CONFIG_FIXED_PHY */ + + #endif /* __PHY_FIXED_H */ +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index 63c16d1..55dd2fb 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -731,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + if (!handle) { + handle = handle_bad_irq; + } else { +- if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) ++ struct irq_data *irq_data = &desc->irq_data; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ /* ++ * With hierarchical domains we might run into a ++ * situation where the outermost chip is not yet set ++ * up, but the inner chips are there. Instead of ++ * bailing we install the handler, but obviously we ++ * cannot enable/startup the interrupt at this point. ++ */ ++ while (irq_data) { ++ if (irq_data->chip != &no_irq_chip) ++ break; ++ /* ++ * Bail out if the outer chip is not set up ++ * and the interrrupt supposed to be started ++ * right away. ++ */ ++ if (WARN_ON(is_chained)) ++ goto out; ++ /* Try the parent */ ++ irq_data = irq_data->parent_data; ++ } ++#endif ++ if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) + goto out; + } + +@@ -911,6 +934,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data, + } + + /** ++ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h ++ * ++ * Conditional, as the underlying parent chip might not implement it. ++ */ ++int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) ++{ ++ data = data->parent_data; ++ ++ if (data->chip->irq_set_type) ++ return data->chip->irq_set_type(data, type); ++ ++ return -ENOSYS; ++} ++ ++/** + * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware + * @data: Pointer to interrupt specific data + * +@@ -925,6 +965,22 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data) + + return -ENOSYS; + } ++ ++/** ++ * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ * @on: Whether to set or reset the wake-up capability of this irq ++ * ++ * Conditional, as the underlying parent chip might not implement it. ++ */ ++int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) ++{ ++ data = data->parent_data; ++ if (data->chip->irq_set_wake) ++ return data->chip->irq_set_wake(data, on); ++ ++ return -ENOSYS; ++} + #endif + + /** +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 8069237..acb401f 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1758,3 +1758,94 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, + + return retval; + } ++ ++/** ++ * irq_get_irqchip_state - returns the irqchip state of a interrupt. ++ * @irq: Interrupt line that is forwarded to a VM ++ * @which: One of IRQCHIP_STATE_* the caller wants to know about ++ * @state: a pointer to a boolean where the state is to be storeed ++ * ++ * This call snapshots the internal irqchip state of an ++ * interrupt, returning into @state the bit corresponding to ++ * stage @which ++ * ++ * This function should be called with preemption disabled if the ++ * interrupt controller has per-cpu registers. ++ */ ++int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool *state) ++{ ++ struct irq_desc *desc; ++ struct irq_data *data; ++ struct irq_chip *chip; ++ unsigned long flags; ++ int err = -EINVAL; ++ ++ desc = irq_get_desc_buslock(irq, &flags, 0); ++ if (!desc) ++ return err; ++ ++ data = irq_desc_get_irq_data(desc); ++ ++ do { ++ chip = irq_data_get_irq_chip(data); ++ if (chip->irq_get_irqchip_state) ++ break; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ data = data->parent_data; ++#else ++ data = NULL; ++#endif ++ } while (data); ++ ++ if (data) ++ err = chip->irq_get_irqchip_state(data, which, state); ++ ++ irq_put_desc_busunlock(desc, flags); ++ return err; ++} ++ ++/** ++ * irq_set_irqchip_state - set the state of a forwarded interrupt. ++ * @irq: Interrupt line that is forwarded to a VM ++ * @which: State to be restored (one of IRQCHIP_STATE_*) ++ * @val: Value corresponding to @which ++ * ++ * This call sets the internal irqchip state of an interrupt, ++ * depending on the value of @which. ++ * ++ * This function should be called with preemption disabled if the ++ * interrupt controller has per-cpu registers. ++ */ ++int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ++ bool val) ++{ ++ struct irq_desc *desc; ++ struct irq_data *data; ++ struct irq_chip *chip; ++ unsigned long flags; ++ int err = -EINVAL; ++ ++ desc = irq_get_desc_buslock(irq, &flags, 0); ++ if (!desc) ++ return err; ++ ++ data = irq_desc_get_irq_data(desc); ++ ++ do { ++ chip = irq_data_get_irq_chip(data); ++ if (chip->irq_set_irqchip_state) ++ break; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ data = data->parent_data; ++#else ++ data = NULL; ++#endif ++ } while (data); ++ ++ if (data) ++ err = chip->irq_set_irqchip_state(data, which, val); ++ ++ irq_put_desc_busunlock(desc, flags); ++ return err; ++} +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +index 2495ed0..54433c2 100644 +--- a/kernel/irq/msi.c ++++ b/kernel/irq/msi.c +@@ -106,8 +106,10 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq = ops->get_hwirq(info, arg); + int i, ret; + ++#if 0 + if (irq_find_mapping(domain, hwirq) > 0) + return -EEXIST; ++#endif + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) +@@ -327,8 +329,15 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) + struct msi_desc *desc; + + for_each_msi_entry(desc, dev) { +- irq_domain_free_irqs(desc->irq, desc->nvec_used); +- desc->irq = 0; ++ /* ++ * We might have failed to allocate an MSI early ++ * enough that there is no IRQ associated to this ++ * entry. If that's the case, don't do anything. ++ */ ++ if (desc->irq) { ++ irq_domain_free_irqs(desc->irq, desc->nvec_used); ++ desc->irq = 0; ++ } + } + } + +diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c +index fa756d0..ad57f0c 100644 +--- a/sound/soc/fsl/mpc8610_hpcd.c ++++ b/sound/soc/fsl/mpc8610_hpcd.c +@@ -12,11 +12,11 @@ + + #include + #include ++#include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c +index f75c3cf..64a0bb6 100644 +--- a/sound/soc/fsl/p1022_ds.c ++++ b/sound/soc/fsl/p1022_ds.c +@@ -11,12 +11,12 @@ + */ + + #include ++#include + #include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c +index 9d89bb0..4ce4aff 100644 +--- a/sound/soc/fsl/p1022_rdk.c ++++ b/sound/soc/fsl/p1022_rdk.c +@@ -18,12 +18,12 @@ + */ + + #include ++#include + #include + #include + #include + #include + #include +-#include + + #include "fsl_dma.h" + #include "fsl_ssi.h" +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/series.arm64 b/packages/base/any/kernels/3.18.25/patches/series.arm64 index 0983a8d1..f32ec8fa 100644 --- a/packages/base/any/kernels/3.18.25/patches/series.arm64 +++ b/packages/base/any/kernels/3.18.25/patches/series.arm64 @@ -2,3 +2,5 @@ aufs.patch driver-support-intel-igb-bcm54616-phy.patch add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch add-nxp-arm64-ls2088ardb-device-tree.patch +add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch +backport-some-kernel-patches-based-on-3.18.25.patch